text
stringlengths 0
1.05M
| meta
dict |
---|---|
"""
This module defines standard interpreted text role functions, a registry for
interpreted text roles, and an API for adding to and retrieving from the
registry.
The interface for interpreted role functions is as follows::
def role_fn(name, rawtext, text, lineno, inliner,
options={}, content=[]):
code...
# Set function attributes for customization:
role_fn.options = ...
role_fn.content = ...
Parameters:
- ``name`` is the local name of the interpreted text role, the role name
actually used in the document.
- ``rawtext`` is a string containing the entire interpreted text construct.
Return it as a ``problematic`` node linked to a system message if there is a
problem.
- ``text`` is the interpreted text content, with backslash escapes converted
to nulls (``\x00``).
- ``lineno`` is the line number where the interpreted text beings.
- ``inliner`` is the Inliner object that called the role function.
It defines the following useful attributes: ``reporter``,
``problematic``, ``memo``, ``parent``, ``document``.
- ``options``: A dictionary of directive options for customization, to be
interpreted by the role function. Used for additional attributes for the
generated elements and other functionality.
- ``content``: A list of strings, the directive content for customization
("role" directive). To be interpreted by the role function.
Function attributes for customization, interpreted by the "role" directive:
- ``options``: A dictionary, mapping known option names to conversion
functions such as `int` or `float`. ``None`` or an empty dict implies no
options to parse. Several directive option conversion functions are defined
in the `directives` module.
All role functions implicitly support the "class" option, unless disabled
with an explicit ``{'class': None}``.
- ``content``: A boolean; true if content is allowed. Client code must handle
the case where content is required but not supplied (an empty content list
will be supplied).
Note that unlike directives, the "arguments" function attribute is not
supported for role customization. Directive arguments are handled by the
"role" directive itself.
Interpreted role functions return a tuple of two values:
- A list of nodes which will be inserted into the document tree at the
point where the interpreted role was encountered (can be an empty
list).
- A list of system messages, which will be inserted into the document tree
immediately after the end of the current inline block (can also be empty).
"""
__docformat__ = 'reStructuredText'
from docutils import nodes, utils
from docutils.parsers.rst import directives
from docutils.parsers.rst.languages import en as _fallback_language_module
DEFAULT_INTERPRETED_ROLE = 'title-reference'
"""
The canonical name of the default interpreted role. This role is used
when no role is specified for a piece of interpreted text.
"""
_role_registry = {}
"""Mapping of canonical role names to role functions. Language-dependent role
names are defined in the ``language`` subpackage."""
_roles = {}
"""Mapping of local or language-dependent interpreted text role names to role
functions."""
def role(role_name, language_module, lineno, reporter):
"""
Locate and return a role function from its language-dependent name, along
with a list of system messages. If the role is not found in the current
language, check English. Return a 2-tuple: role function (``None`` if the
named role cannot be found) and a list of system messages.
"""
normname = role_name.lower()
messages = []
msg_text = []
if normname in _roles:
return _roles[normname], messages
if role_name:
canonicalname = None
try:
canonicalname = language_module.roles[normname]
except AttributeError, error:
msg_text.append('Problem retrieving role entry from language '
'module %r: %s.' % (language_module, error))
except KeyError:
msg_text.append('No role entry for "%s" in module "%s".'
% (role_name, language_module.__name__))
else:
canonicalname = DEFAULT_INTERPRETED_ROLE
# If we didn't find it, try English as a fallback.
if not canonicalname:
try:
canonicalname = _fallback_language_module.roles[normname]
msg_text.append('Using English fallback for role "%s".'
% role_name)
except KeyError:
msg_text.append('Trying "%s" as canonical role name.'
% role_name)
# The canonical name should be an English name, but just in case:
canonicalname = normname
# Collect any messages that we generated.
if msg_text:
message = reporter.info('\n'.join(msg_text), line=lineno)
messages.append(message)
# Look the role up in the registry, and return it.
if canonicalname in _role_registry:
role_fn = _role_registry[canonicalname]
register_local_role(normname, role_fn)
return role_fn, messages
else:
return None, messages # Error message will be generated by caller.
def register_canonical_role(name, role_fn):
"""
Register an interpreted text role by its canonical name.
:Parameters:
- `name`: The canonical name of the interpreted role.
- `role_fn`: The role function. See the module docstring.
"""
set_implicit_options(role_fn)
_role_registry[name] = role_fn
def register_local_role(name, role_fn):
"""
Register an interpreted text role by its local or language-dependent name.
:Parameters:
- `name`: The local or language-dependent name of the interpreted role.
- `role_fn`: The role function. See the module docstring.
"""
set_implicit_options(role_fn)
_roles[name] = role_fn
def set_implicit_options(role_fn):
"""
Add customization options to role functions, unless explicitly set or
disabled.
"""
if not hasattr(role_fn, 'options') or role_fn.options is None:
role_fn.options = {'class': directives.class_option}
elif 'class' not in role_fn.options:
role_fn.options['class'] = directives.class_option
def register_generic_role(canonical_name, node_class):
"""For roles which simply wrap a given `node_class` around the text."""
role = GenericRole(canonical_name, node_class)
register_canonical_role(canonical_name, role)
class GenericRole:
"""
Generic interpreted text role, where the interpreted text is simply
wrapped with the provided node class.
"""
def __init__(self, role_name, node_class):
self.name = role_name
self.node_class = node_class
def __call__(self, role, rawtext, text, lineno, inliner,
options={}, content=[]):
set_classes(options)
return [self.node_class(rawtext, utils.unescape(text), **options)], []
class CustomRole:
"""
Wrapper for custom interpreted text roles.
"""
def __init__(self, role_name, base_role, options={}, content=[]):
self.name = role_name
self.base_role = base_role
self.options = None
if hasattr(base_role, 'options'):
self.options = base_role.options
self.content = None
if hasattr(base_role, 'content'):
self.content = base_role.content
self.supplied_options = options
self.supplied_content = content
def __call__(self, role, rawtext, text, lineno, inliner,
options={}, content=[]):
opts = self.supplied_options.copy()
opts.update(options)
cont = list(self.supplied_content)
if cont and content:
cont += '\n'
cont.extend(content)
return self.base_role(role, rawtext, text, lineno, inliner,
options=opts, content=cont)
def generic_custom_role(role, rawtext, text, lineno, inliner,
options={}, content=[]):
""""""
# Once nested inline markup is implemented, this and other methods should
# recursively call inliner.nested_parse().
set_classes(options)
return [nodes.inline(rawtext, utils.unescape(text), **options)], []
generic_custom_role.options = {'class': directives.class_option}
######################################################################
# Define and register the standard roles:
######################################################################
register_generic_role('abbreviation', nodes.abbreviation)
register_generic_role('acronym', nodes.acronym)
register_generic_role('emphasis', nodes.emphasis)
register_generic_role('literal', nodes.literal)
register_generic_role('strong', nodes.strong)
register_generic_role('subscript', nodes.subscript)
register_generic_role('superscript', nodes.superscript)
register_generic_role('title-reference', nodes.title_reference)
def pep_reference_role(role, rawtext, text, lineno, inliner,
options={}, content=[]):
try:
pepnum = int(text)
if pepnum < 0 or pepnum > 9999:
raise ValueError
except ValueError:
msg = inliner.reporter.error(
'PEP number must be a number from 0 to 9999; "%s" is invalid.'
% text, line=lineno)
prb = inliner.problematic(rawtext, rawtext, msg)
return [prb], [msg]
# Base URL mainly used by inliner.pep_reference; so this is correct:
ref = (inliner.document.settings.pep_base_url
+ inliner.document.settings.pep_file_url_template % pepnum)
set_classes(options)
return [nodes.reference(rawtext, 'PEP ' + utils.unescape(text), refuri=ref,
**options)], []
register_canonical_role('pep-reference', pep_reference_role)
def rfc_reference_role(role, rawtext, text, lineno, inliner,
options={}, content=[]):
try:
rfcnum = int(text)
if rfcnum <= 0:
raise ValueError
except ValueError:
msg = inliner.reporter.error(
'RFC number must be a number greater than or equal to 1; '
'"%s" is invalid.' % text, line=lineno)
prb = inliner.problematic(rawtext, rawtext, msg)
return [prb], [msg]
# Base URL mainly used by inliner.rfc_reference, so this is correct:
ref = inliner.document.settings.rfc_base_url + inliner.rfc_url % rfcnum
set_classes(options)
node = nodes.reference(rawtext, 'RFC ' + utils.unescape(text), refuri=ref,
**options)
return [node], []
register_canonical_role('rfc-reference', rfc_reference_role)
def raw_role(role, rawtext, text, lineno, inliner, options={}, content=[]):
if not inliner.document.settings.raw_enabled:
msg = inliner.reporter.warning('raw (and derived) roles disabled')
prb = inliner.problematic(rawtext, rawtext, msg)
return [prb], [msg]
if 'format' not in options:
msg = inliner.reporter.error(
'No format (Writer name) is associated with this role: "%s".\n'
'The "raw" role cannot be used directly.\n'
'Instead, use the "role" directive to create a new role with '
'an associated format.' % role, line=lineno)
prb = inliner.problematic(rawtext, rawtext, msg)
return [prb], [msg]
set_classes(options)
node = nodes.raw(rawtext, utils.unescape(text, 1), **options)
return [node], []
raw_role.options = {'format': directives.unchanged}
register_canonical_role('raw', raw_role)
def math_role(role, rawtext, text, lineno, inliner, options={}, content=[]):
i = rawtext.find('`')
text = rawtext.split('`')[1]
node = nodes.math(rawtext, text)
return [node], []
register_canonical_role('math', math_role)
######################################################################
# Register roles that are currently unimplemented.
######################################################################
def unimplemented_role(role, rawtext, text, lineno, inliner, attributes={}):
msg = inliner.reporter.error(
'Interpreted text role "%s" not implemented.' % role, line=lineno)
prb = inliner.problematic(rawtext, rawtext, msg)
return [prb], [msg]
register_canonical_role('index', unimplemented_role)
register_canonical_role('named-reference', unimplemented_role)
register_canonical_role('anonymous-reference', unimplemented_role)
register_canonical_role('uri-reference', unimplemented_role)
register_canonical_role('footnote-reference', unimplemented_role)
register_canonical_role('citation-reference', unimplemented_role)
register_canonical_role('substitution-reference', unimplemented_role)
register_canonical_role('target', unimplemented_role)
# This should remain unimplemented, for testing purposes:
register_canonical_role('restructuredtext-unimplemented-role',
unimplemented_role)
def set_classes(options):
"""
Auxiliary function to set options['classes'] and delete
options['class'].
"""
if 'class' in options:
assert 'classes' not in options
options['classes'] = options['class']
del options['class']
| {
"repo_name": "cuongthai/cuongthai-s-blog",
"path": "docutils/parsers/rst/roles.py",
"copies": "2",
"size": "13783",
"license": "bsd-3-clause",
"hash": -5576980266694448000,
"line_mean": 36.6078431373,
"line_max": 79,
"alpha_frac": 0.634187042,
"autogenerated": false,
"ratio": 4.162790697674419,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5796977739674418,
"avg_score": null,
"num_lines": null
} |
"""
This module defines standard interpreted text role functions, a registry for
interpreted text roles, and an API for adding to and retrieving from the
registry.
The interface for interpreted role functions is as follows::
def role_fn(name, rawtext, text, lineno, inliner,
options={}, content=[]):
code...
# Set function attributes for customization:
role_fn.options = ...
role_fn.content = ...
Parameters:
- ``name`` is the local name of the interpreted text role, the role name
actually used in the document.
- ``rawtext`` is a string containing the entire interpreted text construct.
Return it as a ``problematic`` node linked to a system message if there is a
problem.
- ``text`` is the interpreted text content, with backslash escapes converted
to nulls (``\x00``).
- ``lineno`` is the line number where the interpreted text beings.
- ``inliner`` is the Inliner object that called the role function.
It defines the following useful attributes: ``reporter``,
``problematic``, ``memo``, ``parent``, ``document``.
- ``options``: A dictionary of directive options for customization, to be
interpreted by the role function. Used for additional attributes for the
generated elements and other functionality.
- ``content``: A list of strings, the directive content for customization
("role" directive). To be interpreted by the role function.
Function attributes for customization, interpreted by the "role" directive:
- ``options``: A dictionary, mapping known option names to conversion
functions such as `int` or `float`. ``None`` or an empty dict implies no
options to parse. Several directive option conversion functions are defined
in the `directives` module.
All role functions implicitly support the "class" option, unless disabled
with an explicit ``{'class': None}``.
- ``content``: A boolean; true if content is allowed. Client code must handle
the case where content is required but not supplied (an empty content list
will be supplied).
Note that unlike directives, the "arguments" function attribute is not
supported for role customization. Directive arguments are handled by the
"role" directive itself.
Interpreted role functions return a tuple of two values:
- A list of nodes which will be inserted into the document tree at the
point where the interpreted role was encountered (can be an empty
list).
- A list of system messages, which will be inserted into the document tree
immediately after the end of the current inline block (can also be empty).
"""
__docformat__ = 'reStructuredText'
from docutils import nodes, utils
from docutils.parsers.rst import directives
from docutils.parsers.rst.languages import en as _fallback_language_module
from docutils.utils.code_analyzer import Lexer, LexerError
DEFAULT_INTERPRETED_ROLE = 'title-reference'
"""
The canonical name of the default interpreted role. This role is used
when no role is specified for a piece of interpreted text.
"""
_role_registry = {}
"""Mapping of canonical role names to role functions. Language-dependent role
names are defined in the ``language`` subpackage."""
_roles = {}
"""Mapping of local or language-dependent interpreted text role names to role
functions."""
def role(role_name, language_module, lineno, reporter):
"""
Locate and return a role function from its language-dependent name, along
with a list of system messages. If the role is not found in the current
language, check English. Return a 2-tuple: role function (``None`` if the
named role cannot be found) and a list of system messages.
"""
normname = role_name.lower()
messages = []
msg_text = []
if normname in _roles:
return _roles[normname], messages
if role_name:
canonicalname = None
try:
canonicalname = language_module.roles[normname]
except AttributeError, error:
msg_text.append('Problem retrieving role entry from language '
'module %r: %s.' % (language_module, error))
except KeyError:
msg_text.append('No role entry for "%s" in module "%s".'
% (role_name, language_module.__name__))
else:
canonicalname = DEFAULT_INTERPRETED_ROLE
# If we didn't find it, try English as a fallback.
if not canonicalname:
try:
canonicalname = _fallback_language_module.roles[normname]
msg_text.append('Using English fallback for role "%s".'
% role_name)
except KeyError:
msg_text.append('Trying "%s" as canonical role name.'
% role_name)
# The canonical name should be an English name, but just in case:
canonicalname = normname
# Collect any messages that we generated.
if msg_text:
message = reporter.info('\n'.join(msg_text), line=lineno)
messages.append(message)
# Look the role up in the registry, and return it.
if canonicalname in _role_registry:
role_fn = _role_registry[canonicalname]
register_local_role(normname, role_fn)
return role_fn, messages
else:
return None, messages # Error message will be generated by caller.
def register_canonical_role(name, role_fn):
"""
Register an interpreted text role by its canonical name.
:Parameters:
- `name`: The canonical name of the interpreted role.
- `role_fn`: The role function. See the module docstring.
"""
set_implicit_options(role_fn)
_role_registry[name] = role_fn
def register_local_role(name, role_fn):
"""
Register an interpreted text role by its local or language-dependent name.
:Parameters:
- `name`: The local or language-dependent name of the interpreted role.
- `role_fn`: The role function. See the module docstring.
"""
set_implicit_options(role_fn)
_roles[name] = role_fn
def set_implicit_options(role_fn):
"""
Add customization options to role functions, unless explicitly set or
disabled.
"""
if not hasattr(role_fn, 'options') or role_fn.options is None:
role_fn.options = {'class': directives.class_option}
elif 'class' not in role_fn.options:
role_fn.options['class'] = directives.class_option
def register_generic_role(canonical_name, node_class):
"""For roles which simply wrap a given `node_class` around the text."""
role = GenericRole(canonical_name, node_class)
register_canonical_role(canonical_name, role)
class GenericRole:
"""
Generic interpreted text role, where the interpreted text is simply
wrapped with the provided node class.
"""
def __init__(self, role_name, node_class):
self.name = role_name
self.node_class = node_class
def __call__(self, role, rawtext, text, lineno, inliner,
options={}, content=[]):
set_classes(options)
return [self.node_class(rawtext, utils.unescape(text), **options)], []
class CustomRole:
"""
Wrapper for custom interpreted text roles.
"""
def __init__(self, role_name, base_role, options={}, content=[]):
self.name = role_name
self.base_role = base_role
self.options = None
if hasattr(base_role, 'options'):
self.options = base_role.options
self.content = None
if hasattr(base_role, 'content'):
self.content = base_role.content
self.supplied_options = options
self.supplied_content = content
def __call__(self, role, rawtext, text, lineno, inliner,
options={}, content=[]):
opts = self.supplied_options.copy()
opts.update(options)
cont = list(self.supplied_content)
if cont and content:
cont += '\n'
cont.extend(content)
return self.base_role(role, rawtext, text, lineno, inliner,
options=opts, content=cont)
def generic_custom_role(role, rawtext, text, lineno, inliner,
options={}, content=[]):
""""""
# Once nested inline markup is implemented, this and other methods should
# recursively call inliner.nested_parse().
set_classes(options)
return [nodes.inline(rawtext, utils.unescape(text), **options)], []
generic_custom_role.options = {'class': directives.class_option}
######################################################################
# Define and register the standard roles:
######################################################################
register_generic_role('abbreviation', nodes.abbreviation)
register_generic_role('acronym', nodes.acronym)
register_generic_role('emphasis', nodes.emphasis)
register_generic_role('literal', nodes.literal)
register_generic_role('strong', nodes.strong)
register_generic_role('subscript', nodes.subscript)
register_generic_role('superscript', nodes.superscript)
register_generic_role('title-reference', nodes.title_reference)
def pep_reference_role(role, rawtext, text, lineno, inliner,
options={}, content=[]):
try:
pepnum = int(text)
if pepnum < 0 or pepnum > 9999:
raise ValueError
except ValueError:
msg = inliner.reporter.error(
'PEP number must be a number from 0 to 9999; "%s" is invalid.'
% text, line=lineno)
prb = inliner.problematic(rawtext, rawtext, msg)
return [prb], [msg]
# Base URL mainly used by inliner.pep_reference; so this is correct:
ref = (inliner.document.settings.pep_base_url
+ inliner.document.settings.pep_file_url_template % pepnum)
set_classes(options)
return [nodes.reference(rawtext, 'PEP ' + utils.unescape(text), refuri=ref,
**options)], []
register_canonical_role('pep-reference', pep_reference_role)
def rfc_reference_role(role, rawtext, text, lineno, inliner,
options={}, content=[]):
try:
rfcnum = int(text)
if rfcnum <= 0:
raise ValueError
except ValueError:
msg = inliner.reporter.error(
'RFC number must be a number greater than or equal to 1; '
'"%s" is invalid.' % text, line=lineno)
prb = inliner.problematic(rawtext, rawtext, msg)
return [prb], [msg]
# Base URL mainly used by inliner.rfc_reference, so this is correct:
ref = inliner.document.settings.rfc_base_url + inliner.rfc_url % rfcnum
set_classes(options)
node = nodes.reference(rawtext, 'RFC ' + utils.unescape(text), refuri=ref,
**options)
return [node], []
register_canonical_role('rfc-reference', rfc_reference_role)
def raw_role(role, rawtext, text, lineno, inliner, options={}, content=[]):
if not inliner.document.settings.raw_enabled:
msg = inliner.reporter.warning('raw (and derived) roles disabled')
prb = inliner.problematic(rawtext, rawtext, msg)
return [prb], [msg]
if 'format' not in options:
msg = inliner.reporter.error(
'No format (Writer name) is associated with this role: "%s".\n'
'The "raw" role cannot be used directly.\n'
'Instead, use the "role" directive to create a new role with '
'an associated format.' % role, line=lineno)
prb = inliner.problematic(rawtext, rawtext, msg)
return [prb], [msg]
set_classes(options)
node = nodes.raw(rawtext, utils.unescape(text, 1), **options)
node.source, node.line = inliner.reporter.get_source_and_line(lineno)
return [node], []
raw_role.options = {'format': directives.unchanged}
register_canonical_role('raw', raw_role)
def code_role(role, rawtext, text, lineno, inliner, options={}, content=[]):
set_classes(options)
language = options.get('language', '')
classes = ['code']
if language:
classes.append(language)
if 'classes' in options:
classes.extend(options['classes'])
try:
tokens = Lexer(utils.unescape(text, 1), language,
inliner.document.settings.syntax_highlight)
except LexerError, error:
msg = inliner.reporter.warning(error)
prb = inliner.problematic(rawtext, rawtext, msg)
return [prb], [msg]
node = nodes.literal(rawtext, '', classes=classes)
# analyze content and add nodes for every token
for classes, value in tokens:
# print (classes, value)
if classes:
node += nodes.inline(value, value, classes=classes)
else:
# insert as Text to decrease the verbosity of the output
node += nodes.Text(value, value)
return [node], []
code_role.options = {'class': directives.class_option,
'language': directives.unchanged}
register_canonical_role('code', code_role)
def math_role(role, rawtext, text, lineno, inliner, options={}, content=[]):
i = rawtext.find('`')
text = rawtext.split('`')[1]
node = nodes.math(rawtext, text)
return [node], []
register_canonical_role('math', math_role)
######################################################################
# Register roles that are currently unimplemented.
######################################################################
def unimplemented_role(role, rawtext, text, lineno, inliner, attributes={}):
msg = inliner.reporter.error(
'Interpreted text role "%s" not implemented.' % role, line=lineno)
prb = inliner.problematic(rawtext, rawtext, msg)
return [prb], [msg]
register_canonical_role('index', unimplemented_role)
register_canonical_role('named-reference', unimplemented_role)
register_canonical_role('anonymous-reference', unimplemented_role)
register_canonical_role('uri-reference', unimplemented_role)
register_canonical_role('footnote-reference', unimplemented_role)
register_canonical_role('citation-reference', unimplemented_role)
register_canonical_role('substitution-reference', unimplemented_role)
register_canonical_role('target', unimplemented_role)
# This should remain unimplemented, for testing purposes:
register_canonical_role('restructuredtext-unimplemented-role',
unimplemented_role)
def set_classes(options):
"""
Auxiliary function to set options['classes'] and delete
options['class'].
"""
if 'class' in options:
assert 'classes' not in options
options['classes'] = options['class']
del options['class']
| {
"repo_name": "neumerance/deploy",
"path": ".venv/lib/python2.7/site-packages/docutils/parsers/rst/roles.py",
"copies": "4",
"size": "14712",
"license": "apache-2.0",
"hash": 5285176982817548000,
"line_mean": 36.3401015228,
"line_max": 79,
"alpha_frac": 0.6494018488,
"autogenerated": false,
"ratio": 4.098050139275766,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6747451988075766,
"avg_score": null,
"num_lines": null
} |
"""$Id: root.py 717 2007-01-04 18:04:57Z rubys $"""
__author__ = "Sam Ruby <http://intertwingly.net/> and Mark Pilgrim <http://diveintomark.org/>"
__version__ = "$Revision: 717 $"
__date__ = "$Date: 2007-01-04 18:04:57 +0000 (Thu, 04 Jan 2007) $"
__copyright__ = "Copyright (c) 2002 Sam Ruby and Mark Pilgrim"
from base import validatorBase
rss11_namespace='http://purl.org/net/rss1.1#'
purl1_namespace='http://purl.org/rss/1.0/'
soap_namespace='http://feeds.archive.org/validator/'
pie_namespace='http://purl.org/atom/ns#'
atom_namespace='http://www.w3.org/2005/Atom'
opensearch_namespace='http://a9.com/-/spec/opensearch/1.1/'
xrds_namespace='xri://$xrds'
#
# Main document.
# Supports rss, rdf, pie, and ffkar
#
class root(validatorBase):
def __init__(self, parent, base):
validatorBase.__init__(self)
self.parent = parent
self.dispatcher = parent
self.name = "root"
self.xmlBase = base
self.xmlLang = None
def startElementNS(self, name, qname, attrs):
if name=='rss':
if qname:
from logging import InvalidNamespace
self.log(InvalidNamespace({"parent":"root", "element":name, "namespace":qname}))
self.dispatcher.defaultNamespaces.append(qname)
if name=='feed' or name=='entry':
if qname==pie_namespace:
from logging import ObsoleteNamespace
self.log(ObsoleteNamespace({"element":"feed"}))
self.dispatcher.defaultNamespaces.append(pie_namespace)
from logging import TYPE_ATOM
self.setFeedType(TYPE_ATOM)
elif not qname:
from logging import MissingNamespace
self.log(MissingNamespace({"parent":"root", "element":name}))
else:
if name=='feed':
from logging import TYPE_ATOM
self.setFeedType(TYPE_ATOM)
else:
from logging import TYPE_ATOM_ENTRY
self.setFeedType(TYPE_ATOM_ENTRY)
self.dispatcher.defaultNamespaces.append(atom_namespace)
if qname<>atom_namespace:
from logging import InvalidNamespace
self.log(InvalidNamespace({"parent":"root", "element":name, "namespace":qname}))
self.dispatcher.defaultNamespaces.append(qname)
if name=='Channel':
if not qname:
from logging import MissingNamespace
self.log(MissingNamespace({"parent":"root", "element":name}))
elif qname != rss11_namespace :
from logging import InvalidNamespace
self.log(InvalidNamespace({"parent":"root", "element":name, "namespace":qname}))
else:
self.dispatcher.defaultNamespaces.append(qname)
from logging import TYPE_RSS1
self.setFeedType(TYPE_RSS1)
if name=='OpenSearchDescription':
if not qname:
from logging import MissingNamespace
self.log(MissingNamespace({"parent":"root", "element":name}))
qname = opensearch_namespace
elif qname != opensearch_namespace:
from logging import InvalidNamespace
self.log(InvalidNamespace({"element":name, "namespace":qname}))
self.dispatcher.defaultNamespaces.append(qname)
qname = opensearch_namespace
if name=='XRDS':
from logging import TYPE_XRD
self.setFeedType(TYPE_XRD)
if not qname:
from logging import MissingNamespace
self.log(MissingNamespace({"parent":"root", "element":name}))
qname = xrds_namespace
elif qname != xrds_namespace:
from logging import InvalidNamespace
self.log(InvalidNamespace({"element":name, "namespace":qname}))
self.dispatcher.defaultNamespaces.append(qname)
qname = xrds_namespace
validatorBase.startElementNS(self, name, qname, attrs)
def unknown_starttag(self, name, qname, attrs):
from logging import ObsoleteNamespace,InvalidNamespace,UndefinedElement
if qname in ['http://example.com/newformat#','http://purl.org/atom/ns#']:
self.log(ObsoleteNamespace({"element":name, "namespace":qname}))
elif name=='feed':
self.log(InvalidNamespace({"element":name, "namespace":qname}))
else:
self.log(UndefinedElement({"parent":"root", "element":name}))
from validators import any
return any(self, name, qname, attrs)
def do_rss(self):
from rss import rss
return rss()
def do_feed(self):
from feed import feed
if pie_namespace in self.dispatcher.defaultNamespaces:
from validators import eater
return eater()
return feed()
def do_entry(self):
from entry import entry
return entry()
def do_opml(self):
from opml import opml
return opml()
def do_outlineDocument(self):
from logging import ObsoleteVersion
self.log(ObsoleteVersion({"element":"outlineDocument"}))
from opml import opml
return opml()
def do_opensearch_OpenSearchDescription(self):
import opensearch
self.dispatcher.defaultNamespaces.append(opensearch_namespace)
from logging import TYPE_OPENSEARCH
self.setFeedType(TYPE_OPENSEARCH)
return opensearch.OpenSearchDescription()
def do_xrds_XRDS(self):
from xrd import xrds
return xrds()
def do_rdf_RDF(self):
from rdf import rdf
self.dispatcher.defaultNamespaces.append(purl1_namespace)
return rdf()
def do_Channel(self):
from channel import rss10Channel
return rss10Channel()
def do_soap_Envelope(self):
return root(self, self.xmlBase)
def do_soap_Body(self):
self.dispatcher.defaultNamespaces.append(soap_namespace)
return root(self, self.xmlBase)
def do_request(self):
return root(self, self.xmlBase)
def do_xhtml_html(self):
from logging import UndefinedElement
self.log(UndefinedElement({"parent":"root", "element":"xhtml:html"}))
from validators import eater
return eater()
| {
"repo_name": "AlphaCluster/NewsBlur",
"path": "vendor/feedvalidator/root.py",
"copies": "16",
"size": "5750",
"license": "mit",
"hash": 5844375525889711000,
"line_mean": 32.4302325581,
"line_max": 94,
"alpha_frac": 0.6753043478,
"autogenerated": false,
"ratio": 3.8564721663313213,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.016523600044666294,
"num_lines": 172
} |
# $Id: rpc.py 23 2006-11-08 15:45:33Z dugsong $
# -*- coding: utf-8 -*-
"""Remote Procedure Call."""
from __future__ import absolute_import
import struct
from . import dpkt
# RPC.dir
CALL = 0
REPLY = 1
# RPC.Auth.flavor
AUTH_NONE = AUTH_NULL = 0
AUTH_UNIX = 1
AUTH_SHORT = 2
AUTH_DES = 3
# RPC.Reply.stat
MSG_ACCEPTED = 0
MSG_DENIED = 1
# RPC.Reply.Accept.stat
SUCCESS = 0
PROG_UNAVAIL = 1
PROG_MISMATCH = 2
PROC_UNAVAIL = 3
GARBAGE_ARGS = 4
SYSTEM_ERR = 5
# RPC.Reply.Reject.stat
RPC_MISMATCH = 0
AUTH_ERROR = 1
class RPC(dpkt.Packet):
"""Remote Procedure Call.
TODO: Longer class information....
Attributes:
__hdr__: Header fields of RPC.
TODO.
"""
__hdr__ = (
('xid', 'I', 0),
('dir', 'I', CALL)
)
class Auth(dpkt.Packet):
__hdr__ = (('flavor', 'I', AUTH_NONE), )
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
n = struct.unpack('>I', self.data[:4])[0]
self.data = self.data[4:4 + n]
def __len__(self):
return 8 + len(self.data)
def __bytes__(self):
return self.pack_hdr() + struct.pack('>I', len(self.data)) + \
bytes(self.data)
class Call(dpkt.Packet):
__hdr__ = (
('rpcvers', 'I', 2),
('prog', 'I', 0),
('vers', 'I', 0),
('proc', 'I', 0)
)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
self.cred = RPC.Auth(self.data)
self.verf = RPC.Auth(self.data[len(self.cred):])
self.data = self.data[len(self.cred) + len(self.verf):]
def __len__(self):
return len(str(self)) # XXX
def __bytes__(self):
return dpkt.Packet.__bytes__(self) + \
bytes(getattr(self, 'cred', RPC.Auth())) + \
bytes(getattr(self, 'verf', RPC.Auth())) + \
bytes(self.data)
class Reply(dpkt.Packet):
__hdr__ = (('stat', 'I', MSG_ACCEPTED), )
class Accept(dpkt.Packet):
__hdr__ = (('stat', 'I', SUCCESS), )
def unpack(self, buf):
self.verf = RPC.Auth(buf)
buf = buf[len(self.verf):]
self.stat = struct.unpack('>I', buf[:4])[0]
if self.stat == SUCCESS:
self.data = buf[4:]
elif self.stat == PROG_MISMATCH:
self.low, self.high = struct.unpack('>II', buf[4:12])
self.data = buf[12:]
def __len__(self):
if self.stat == PROG_MISMATCH: n = 8
else: n = 0
return len(self.verf) + 4 + n + len(self.data)
def __bytes__(self):
if self.stat == PROG_MISMATCH:
return bytes(self.verf) + struct.pack('>III', self.stat,
self.low, self.high) + self.data
return bytes(self.verf) + dpkt.Packet.__bytes__(self)
class Reject(dpkt.Packet):
__hdr__ = (('stat', 'I', AUTH_ERROR), )
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
if self.stat == RPC_MISMATCH:
self.low, self.high = struct.unpack('>II', self.data[:8])
self.data = self.data[8:]
elif self.stat == AUTH_ERROR:
self.why = struct.unpack('>I', self.data[:4])[0]
self.data = self.data[4:]
def __len__(self):
if self.stat == RPC_MISMATCH: n = 8
elif self.stat == AUTH_ERROR: n = 4
else: n = 0
return 4 + n + len(self.data)
def __bytes__(self):
if self.stat == RPC_MISMATCH:
return struct.pack('>III', self.stat, self.low, self.high) + self.data
elif self.stat == AUTH_ERROR:
return struct.pack('>II', self.stat, self.why) + self.data
return dpkt.Packet.__bytes__(self)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
if self.stat == MSG_ACCEPTED:
self.data = self.accept = self.Accept(self.data)
elif self.status == MSG_DENIED:
self.data = self.reject = self.Reject(self.data)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
if self.dir == CALL:
self.data = self.call = self.Call(self.data)
elif self.dir == REPLY:
self.data = self.reply = self.Reply(self.data)
def unpack_xdrlist(cls, buf):
l = []
while buf:
if buf.startswith(b'\x00\x00\x00\x01'):
p = cls(buf[4:])
l.append(p)
buf = p.data
elif buf.startswith(b'\x00\x00\x00\x00'):
break
else:
raise dpkt.UnpackError('invalid XDR list')
return l
def pack_xdrlist(*args):
return b'\x00\x00\x00\x01'.join(map(bytes, args)) + b'\x00\x00\x00\x00'
| {
"repo_name": "dimagol/trex-core",
"path": "scripts/external_libs/dpkt-1.9.1/dpkt/rpc.py",
"copies": "3",
"size": "5082",
"license": "apache-2.0",
"hash": -8877801505236011000,
"line_mean": 28.5465116279,
"line_max": 90,
"alpha_frac": 0.4795356159,
"autogenerated": false,
"ratio": 3.483207676490747,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0014716746885399606,
"num_lines": 172
} |
# $Id: rpc.py 23 2006-11-08 15:45:33Z dugsong $
# -*- coding: utf-8 -*-
"""Remote Procedure Call."""
import struct
import dpkt
# RPC.dir
CALL = 0
REPLY = 1
# RPC.Auth.flavor
AUTH_NONE = AUTH_NULL = 0
AUTH_UNIX = 1
AUTH_SHORT = 2
AUTH_DES = 3
# RPC.Reply.stat
MSG_ACCEPTED = 0
MSG_DENIED = 1
# RPC.Reply.Accept.stat
SUCCESS = 0
PROG_UNAVAIL = 1
PROG_MISMATCH = 2
PROC_UNAVAIL = 3
GARBAGE_ARGS = 4
SYSTEM_ERR = 5
# RPC.Reply.Reject.stat
RPC_MISMATCH = 0
AUTH_ERROR = 1
class RPC(dpkt.Packet):
__hdr__ = (
('xid', 'I', 0),
('dir', 'I', CALL)
)
class Auth(dpkt.Packet):
__hdr__ = (('flavor', 'I', AUTH_NONE), )
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
n = struct.unpack('>I', self.data[:4])[0]
self.data = self.data[4:4 + n]
def __len__(self):
return 8 + len(self.data)
def __str__(self):
return self.pack_hdr() + struct.pack('>I', len(self.data)) + \
str(self.data)
class Call(dpkt.Packet):
__hdr__ = (
('rpcvers', 'I', 2),
('prog', 'I', 0),
('vers', 'I', 0),
('proc', 'I', 0)
)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
self.cred = RPC.Auth(self.data)
self.verf = RPC.Auth(self.data[len(self.cred):])
self.data = self.data[len(self.cred) + len(self.verf):]
def __len__(self):
return len(str(self)) # XXX
def __str__(self):
return dpkt.Packet.__str__(self) + \
str(getattr(self, 'cred', RPC.Auth())) + \
str(getattr(self, 'verf', RPC.Auth())) + \
str(self.data)
class Reply(dpkt.Packet):
__hdr__ = (('stat', 'I', MSG_ACCEPTED), )
class Accept(dpkt.Packet):
__hdr__ = (('stat', 'I', SUCCESS), )
def unpack(self, buf):
self.verf = RPC.Auth(buf)
buf = buf[len(self.verf):]
self.stat = struct.unpack('>I', buf[:4])[0]
if self.stat == SUCCESS:
self.data = buf[4:]
elif self.stat == PROG_MISMATCH:
self.low, self.high = struct.unpack('>II', buf[4:12])
self.data = buf[12:]
def __len__(self):
if self.stat == PROG_MISMATCH: n = 8
else: n = 0
return len(self.verf) + 4 + n + len(self.data)
def __str__(self):
if self.stat == PROG_MISMATCH:
return str(self.verf) + struct.pack('>III', self.stat,
self.low, self.high) + self.data
return str(self.verf) + dpkt.Packet.__str__(self)
class Reject(dpkt.Packet):
__hdr__ = (('stat', 'I', AUTH_ERROR), )
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
if self.stat == RPC_MISMATCH:
self.low, self.high = struct.unpack('>II', self.data[:8])
self.data = self.data[8:]
elif self.stat == AUTH_ERROR:
self.why = struct.unpack('>I', self.data[:4])[0]
self.data = self.data[4:]
def __len__(self):
if self.stat == RPC_MISMATCH: n = 8
elif self.stat == AUTH_ERROR: n = 4
else: n = 0
return 4 + n + len(self.data)
def __str__(self):
if self.stat == RPC_MISMATCH:
return struct.pack('>III', self.stat, self.low, self.high) + self.data
elif self.stat == AUTH_ERROR:
return struct.pack('>II', self.stat, self.why) + self.data
return dpkt.Packet.__str__(self)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
if self.stat == MSG_ACCEPTED:
self.data = self.accept = self.Accept(self.data)
elif self.status == MSG_DENIED:
self.data = self.reject = self.Reject(self.data)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
if self.dir == CALL:
self.data = self.call = self.Call(self.data)
elif self.dir == REPLY:
self.data = self.reply = self.Reply(self.data)
def unpack_xdrlist(cls, buf):
l = []
while buf:
if buf.startswith('\x00\x00\x00\x01'):
p = cls(buf[4:])
l.append(p)
buf = p.data
elif buf.startswith('\x00\x00\x00\x00'):
break
else:
raise dpkt.UnpackError, 'invalid XDR list'
return l
def pack_xdrlist(*args):
return '\x00\x00\x00\x01'.join(map(str, args)) + '\x00\x00\x00\x00'
| {
"repo_name": "lkash/test",
"path": "dpkt/rpc.py",
"copies": "6",
"size": "4854",
"license": "bsd-3-clause",
"hash": -630027343890657700,
"line_mean": 29.149068323,
"line_max": 90,
"alpha_frac": 0.4721878863,
"autogenerated": false,
"ratio": 3.44499645138396,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0015024353501409532,
"num_lines": 161
} |
# $Id: rpc.py 23 2006-11-08 15:45:33Z dugsong $
"""Remote Procedure Call."""
import struct
import dpkt
# RPC.dir
CALL = 0
REPLY = 1
# RPC.Auth.flavor
AUTH_NONE = AUTH_NULL = 0
AUTH_UNIX = 1
AUTH_SHORT = 2
AUTH_DES = 3
# RPC.Reply.stat
MSG_ACCEPTED = 0
MSG_DENIED = 1
# RPC.Reply.Accept.stat
SUCCESS = 0
PROG_UNAVAIL = 1
PROG_MISMATCH = 2
PROC_UNAVAIL = 3
GARBAGE_ARGS = 4
SYSTEM_ERR = 5
# RPC.Reply.Reject.stat
RPC_MISMATCH = 0
AUTH_ERROR = 1
class RPC(dpkt.Packet):
__hdr__ = (
('xid', 'I', 0),
('dir', 'I', CALL)
)
class Auth(dpkt.Packet):
__hdr__ = (('flavor', 'I', AUTH_NONE), )
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
n = struct.unpack('>I', self.data[:4])[0]
self.data = self.data[4:4+n]
def __len__(self):
return 8 + len(self.data)
def __str__(self):
return self.pack_hdr() + struct.pack('>I', len(self.data)) + \
str(self.data)
class Call(dpkt.Packet):
__hdr__ = (
('rpcvers', 'I', 2),
('prog', 'I', 0),
('vers', 'I', 0),
('proc', 'I', 0)
)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
self.cred = RPC.Auth(self.data)
self.verf = RPC.Auth(self.data[len(self.cred):])
self.data = self.data[len(self.cred) + len(self.verf):]
def __len__(self):
return len(str(self)) # XXX
def __str__(self):
return dpkt.Packet.__str__(self) + \
str(getattr(self, 'cred', RPC.Auth())) + \
str(getattr(self, 'verf', RPC.Auth())) + \
str(self.data)
class Reply(dpkt.Packet):
__hdr__ = (('stat', 'I', MSG_ACCEPTED), )
class Accept(dpkt.Packet):
__hdr__ = (('stat', 'I', SUCCESS), )
def unpack(self, buf):
self.verf = RPC.Auth(buf)
buf = buf[len(self.verf):]
self.stat = struct.unpack('>I', buf[:4])[0]
if self.stat == SUCCESS:
self.data = buf[4:]
elif self.stat == PROG_MISMATCH:
self.low, self.high = struct.unpack('>II', buf[4:12])
self.data = buf[12:]
def __len__(self):
if self.stat == PROG_MISMATCH: n = 8
else: n = 0
return len(self.verf) + 4 + n + len(self.data)
def __str__(self):
if self.stat == PROG_MISMATCH:
return str(self.verf) + struct.pack('>III', self.stat,
self.low, self.high) + self.data
return str(self.verf) + dpkt.Packet.__str__(self)
class Reject(dpkt.Packet):
__hdr__ = (('stat', 'I', AUTH_ERROR), )
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
if self.stat == RPC_MISMATCH:
self.low, self.high = struct.unpack('>II', self.data[:8])
self.data = self.data[8:]
elif self.stat == AUTH_ERROR:
self.why = struct.unpack('>I', self.data[:4])[0]
self.data = self.data[4:]
def __len__(self):
if self.stat == RPC_MISMATCH: n = 8
elif self.stat == AUTH_ERROR: n =4
else: n = 0
return 4 + n + len(self.data)
def __str__(self):
if self.stat == RPC_MISMATCH:
return struct.pack('>III', self.stat, self.low,
self.high) + self.data
elif self.stat == AUTH_ERROR:
return struct.pack('>II', self.stat, self.why) + self.data
return dpkt.Packet.__str__(self)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
if self.stat == MSG_ACCEPTED:
self.data = self.accept = self.Accept(self.data)
elif self.status == MSG_DENIED:
self.data = self.reject = self.Reject(self.data)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
if self.dir == CALL:
self.data = self.call = self.Call(self.data)
elif self.dir == REPLY:
self.data = self.reply = self.Reply(self.data)
def unpack_xdrlist(cls, buf):
l = []
while buf:
if buf.startswith('\x00\x00\x00\x01'):
p = cls(buf[4:])
l.append(p)
buf = p.data
elif buf.startswith('\x00\x00\x00\x00'):
break
else:
raise dpkt.UnpackError, 'invalid XDR list'
return l
def pack_xdrlist(*args):
return '\x00\x00\x00\x01'.join(map(str, args)) + '\x00\x00\x00\x00'
| {
"repo_name": "guke001/QMarkdowner",
"path": "dpkt/rpc.py",
"copies": "15",
"size": "4858",
"license": "mit",
"hash": 6604804491012872000,
"line_mean": 32.2739726027,
"line_max": 78,
"alpha_frac": 0.469740634,
"autogenerated": false,
"ratio": 3.4675231977159173,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
# $Id: rpc.py 271 2006-01-11 16:03:33Z dugsong $
"""Remote Procedure Call."""
import struct
import dpkt
# RPC.dir
CALL = 0
REPLY = 1
# RPC.Auth.flavor
AUTH_NONE = AUTH_NULL = 0
AUTH_UNIX = 1
AUTH_SHORT = 2
AUTH_DES = 3
# RPC.Reply.stat
MSG_ACCEPTED = 0
MSG_DENIED = 1
# RPC.Reply.Accept.stat
SUCCESS = 0
PROG_UNAVAIL = 1
PROG_MISMATCH = 2
PROC_UNAVAIL = 3
GARBAGE_ARGS = 4
SYSTEM_ERR = 5
# RPC.Reply.Reject.stat
RPC_MISMATCH = 0
AUTH_ERROR = 1
class RPC(dpkt.Packet):
__hdr__ = (
('xid', 'I', 0),
('dir', 'I', CALL)
)
class Auth(dpkt.Packet):
__hdr__ = (('flavor', 'I', AUTH_NONE), )
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
n = struct.unpack('>I', self.data[:4])[0]
self.data = self.data[4:4+n]
def __len__(self):
return 8 + len(self.data)
def __str__(self):
return self.pack_hdr() + struct.pack('>I', len(self.data)) + \
str(self.data)
class Call(dpkt.Packet):
__hdr__ = (
('rpcvers', 'I', 2),
('prog', 'I', 0),
('vers', 'I', 0),
('proc', 'I', 0)
)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
self.cred = RPC.Auth(self.data)
self.verf = RPC.Auth(self.data[len(self.cred):])
self.data = self.data[len(self.cred) + len(self.verf):]
def __len__(self):
return len(str(self)) # XXX
def __str__(self):
return dpkt.Packet.__str__(self) + \
str(getattr(self, 'cred', RPC.Auth())) + \
str(getattr(self, 'verf', RPC.Auth())) + \
str(self.data)
class Reply(dpkt.Packet):
__hdr__ = (('stat', 'I', MSG_ACCEPTED), )
class Accept(dpkt.Packet):
__hdr__ = (('stat', 'I', SUCCESS), )
def unpack(self, buf):
self.verf = RPC.Auth(buf)
buf = buf[len(self.verf):]
self.stat = struct.unpack('>I', buf[:4])[0]
if self.stat == SUCCESS:
self.data = buf[4:]
elif self.stat == PROG_MISMATCH:
self.low, self.high = struct.unpack('>II', buf[4:12])
self.data = buf[12:]
def __len__(self):
if self.stat == PROG_MISMATCH: n = 8
else: n = 0
return len(self.verf) + 4 + n + len(self.data)
def __str__(self):
if self.stat == PROG_MISMATCH:
return str(self.verf) + struct.pack('>III', self.stat,
self.low, self.high) + self.data
return str(self.verf) + dpkt.Packet.__str__(self)
class Reject(dpkt.Packet):
__hdr__ = (('stat', 'I', AUTH_ERROR), )
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
if self.stat == RPC_MISMATCH:
self.low, self.high = struct.unpack('>II', self.data[:8])
self.data = self.data[8:]
elif self.stat == AUTH_ERROR:
self.why = struct.unpack('>I', self.data[:4])[0]
self.data = self.data[4:]
def __len__(self):
if self.stat == RPC_MISMATCH: n = 8
elif self.stat == AUTH_ERROR: n =4
else: n = 0
return 4 + n + len(self.data)
def __str__(self):
if self.stat == RPC_MISMATCH:
return struct.pack('>III', self.stat, self.low,
self.high) + self.data
elif self.stat == AUTH_ERROR:
return struct.pack('>II', self.stat, self.why) + self.data
return dpkt.Packet.__str__(self)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
if self.stat == MSG_ACCEPTED:
self.data = self.accept = self.Accept(self.data)
elif self.status == MSG_DENIED:
self.data = self.reject = self.Reject(self.data)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
if self.dir == CALL:
self.data = self.call = self.Call(self.data)
elif self.dir == REPLY:
self.data = self.reply = self.Reply(self.data)
def unpack_xdrlist(cls, buf):
l = []
while buf:
if buf.startswith('\x00\x00\x00\x01'):
p = cls(buf[4:])
l.append(p)
buf = p.data
elif buf.startswith('\x00\x00\x00\x00'):
break
else:
raise dpkt.UnpackError, 'invalid XDR list'
return l
def pack_xdrlist(*args):
return '\x00\x00\x00\x01'.join(map(str, args)) + '\x00\x00\x00\x00'
| {
"repo_name": "MercenaryLogic/StompingGround",
"path": "stompingground/dpkt/rpc.py",
"copies": "1",
"size": "4859",
"license": "mit",
"hash": -2740422560684032000,
"line_mean": 32.2808219178,
"line_max": 78,
"alpha_frac": 0.4698497633,
"autogenerated": false,
"ratio": 3.465763195435093,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9382056404407622,
"avg_score": 0.010711310865494305,
"num_lines": 146
} |
"""$Id: rss.py 699 2006-09-25 02:01:18Z rubys $"""
__author__ = "Sam Ruby <http://intertwingly.net/> and Mark Pilgrim <http://diveintomark.org/>"
__version__ = "$Revision: 699 $"
__date__ = "$Date: 2006-09-25 02:01:18 +0000 (Mon, 25 Sep 2006) $"
__copyright__ = "Copyright (c) 2002 Sam Ruby and Mark Pilgrim"
from base import validatorBase
from logging import *
from validators import noduplicates
#
# Rss element. The only valid child element is "channel"
#
class rss(validatorBase):
def do_channel(self):
from channel import rss20Channel
return rss20Channel(), noduplicates()
def do_access_restriction(self):
from extension import access_restriction
return access_restriction(), noduplicates()
def getExpectedAttrNames(self):
return [(None, u'version')]
def prevalidate(self):
self.setFeedType(TYPE_RSS2) # could be anything in the 0.9x family, don't really care
self.version = "2.0"
if (None,'version') not in self.attrs.getNames():
self.log(MissingAttribute({"parent":self.parent.name, "element":self.name, "attr":"version"}))
elif [e for e in self.dispatcher.loggedEvents if e.__class__==ValidDoctype]:
self.version = self.attrs[(None,'version')]
if self.attrs[(None,'version')]<>'0.91':
self.log(InvalidDoctype({"parent":self.parent.name, "element":self.name, "attr":"version"}))
else:
self.version = self.attrs[(None,'version')]
def validate(self):
if not "channel" in self.children:
self.log(MissingElement({"parent":self.name, "element":"channel"}))
| {
"repo_name": "stone5495/NewsBlur",
"path": "vendor/feedvalidator/rss.py",
"copies": "16",
"size": "1563",
"license": "mit",
"hash": -2125336112809196800,
"line_mean": 37.1219512195,
"line_max": 100,
"alpha_frac": 0.6769033909,
"autogenerated": false,
"ratio": 3.383116883116883,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.02051701985880553,
"num_lines": 41
} |
# $Id: rtp.py 23 2006-11-08 15:45:33Z dugsong $
# -*- coding: utf-8 -*-
"""Real-Time Transport Protocol."""
from __future__ import absolute_import
from .dpkt import Packet
from .decorators import deprecated
# version 1100 0000 0000 0000 ! 0xC000 14
# p 0010 0000 0000 0000 ! 0x2000 13
# x 0001 0000 0000 0000 ! 0x1000 12
# cc 0000 1111 0000 0000 ! 0x0F00 8
# m 0000 0000 1000 0000 ! 0x0080 7
# pt 0000 0000 0111 1111 ! 0x007F 0
#
_VERSION_MASK = 0xC000
_P_MASK = 0x2000
_X_MASK = 0x1000
_CC_MASK = 0x0F00
_M_MASK = 0x0080
_PT_MASK = 0x007F
_VERSION_SHIFT = 14
_P_SHIFT = 13
_X_SHIFT = 12
_CC_SHIFT = 8
_M_SHIFT = 7
_PT_SHIFT = 0
VERSION = 2
class RTP(Packet):
"""Real-Time Transport Protocol.
TODO: Longer class information....
Attributes:
__hdr__: Header fields of RTP.
TODO.
"""
__hdr__ = (
('_type', 'H', 0x8000),
('seq', 'H', 0),
('ts', 'I', 0),
('ssrc', 'I', 0),
)
csrc = b''
@property
def version(self): return (self._type & _VERSION_MASK) >> _VERSION_SHIFT
@version.setter
def version(self, ver):
self._type = (ver << _VERSION_SHIFT) | (self._type & ~_VERSION_MASK)
@property
def p(self): return (self._type & _P_MASK) >> _P_SHIFT
@p.setter
def p(self, p): self._type = (p << _P_SHIFT) | (self._type & ~_P_MASK)
@property
def x(self): return (self._type & _X_MASK) >> _X_SHIFT
@x.setter
def x(self, x): self._type = (x << _X_SHIFT) | (self._type & ~_X_MASK)
@property
def cc(self): return (self._type & _CC_MASK) >> _CC_SHIFT
@cc.setter
def cc(self, cc): self._type = (cc << _CC_SHIFT) | (self._type & ~_CC_MASK)
@property
def m(self): return (self._type & _M_MASK) >> _M_SHIFT
@m.setter
def m(self, m): self._type = (m << _M_SHIFT) | (self._type & ~_M_MASK)
@property
def pt(self): return (self._type & _PT_MASK) >> _PT_SHIFT
@pt.setter
def pt(self, m): self._type = (m << _PT_SHIFT) | (self._type & ~_PT_MASK)
def __len__(self):
return self.__hdr_len__ + len(self.csrc) + len(self.data)
def __bytes__(self):
return self.pack_hdr() + self.csrc + bytes(self.data)
def unpack(self, buf):
super(RTP, self).unpack(buf)
self.csrc = buf[self.__hdr_len__:self.__hdr_len__ + self.cc * 4]
self.data = buf[self.__hdr_len__ + self.cc * 4:]
def test_rtp():
rtp = RTP(b"\x80\x08\x4d\x01\x00\x01\x00\xe0\x34\x3f\xfa\x34\x53\x53\x53\x56\x53\x5d\x56\x57\xd5\xd6\xd1\xde\xdf\xd3\xd9\xda\xdf\xdc\xdf\xd8\xdd\xd4\xdd\xd9\xd1\xd6\xdc\xda\xde\xdd\xc7\xc1\xdf\xdf\xda\xdb\xdd\xdd\xc4\xd9\x55\x57\xd4\x50\x44\x44\x5b\x44\x4f\x4c\x47\x40\x4c\x47\x59\x5b\x58\x5d\x56\x56\x53\x56\xd5\xd5\x54\x55\xd6\xd6\xd4\xd1\xd1\xd0\xd1\xd5\xdd\xd6\x55\xd4\xd6\xd1\xd4\xd6\xd7\xd7\xd5\xd4\xd0\xd7\xd1\xd4\xd2\xdc\xd6\xdc\xdf\xdc\xdd\xd2\xde\xdc\xd0\xdd\xdc\xd0\xd6\xd6\xd6\x55\x54\x55\x57\x57\x56\x50\x50\x5c\x5c\x52\x5d\x5d\x5f\x5e\x5d\x5e\x52\x50\x52\x56\x54\x57\x55\x55\xd4\xd7\x55\xd5\x55\x55\x55\x55\x55\x54\x57\x54\x55\x55\xd5\xd5\xd7\xd6\xd7\xd1\xd1\xd3\xd2\xd3\xd2\xd2\xd3\xd3")
assert (rtp.version == 2)
assert (rtp.p == 0)
assert (rtp.x == 0)
assert (rtp.cc == 0)
assert (rtp.m == 0)
assert (rtp.pt == 8)
assert (rtp.seq == 19713)
assert (rtp.ts == 65760)
assert (rtp.ssrc == 0x343ffa34)
assert (len(rtp) == 172)
assert (bytes(rtp) == b"\x80\x08\x4d\x01\x00\x01\x00\xe0\x34\x3f\xfa\x34\x53\x53\x53\x56\x53\x5d\x56\x57\xd5\xd6\xd1\xde\xdf\xd3\xd9\xda\xdf\xdc\xdf\xd8\xdd\xd4\xdd\xd9\xd1\xd6\xdc\xda\xde\xdd\xc7\xc1\xdf\xdf\xda\xdb\xdd\xdd\xc4\xd9\x55\x57\xd4\x50\x44\x44\x5b\x44\x4f\x4c\x47\x40\x4c\x47\x59\x5b\x58\x5d\x56\x56\x53\x56\xd5\xd5\x54\x55\xd6\xd6\xd4\xd1\xd1\xd0\xd1\xd5\xdd\xd6\x55\xd4\xd6\xd1\xd4\xd6\xd7\xd7\xd5\xd4\xd0\xd7\xd1\xd4\xd2\xdc\xd6\xdc\xdf\xdc\xdd\xd2\xde\xdc\xd0\xdd\xdc\xd0\xd6\xd6\xd6\x55\x54\x55\x57\x57\x56\x50\x50\x5c\x5c\x52\x5d\x5d\x5f\x5e\x5d\x5e\x52\x50\x52\x56\x54\x57\x55\x55\xd4\xd7\x55\xd5\x55\x55\x55\x55\x55\x54\x57\x54\x55\x55\xd5\xd5\xd7\xd6\xd7\xd1\xd1\xd3\xd2\xd3\xd2\xd2\xd3\xd3")
# the following tests RTP header setters
rtp = RTP()
rtp.m = 1
rtp.pt = 3
rtp.seq = 1234
rtp.ts = 5678
rtp.ssrc = 0xabcdef01
assert (rtp.m == 1)
assert (rtp.pt == 3)
assert (rtp.seq == 1234)
assert (rtp.ts == 5678)
assert (rtp.ssrc == 0xabcdef01)
| {
"repo_name": "smutt/dpkt",
"path": "dpkt/rtp.py",
"copies": "3",
"size": "4445",
"license": "bsd-3-clause",
"hash": 1335018711523291600,
"line_mean": 34.8467741935,
"line_max": 718,
"alpha_frac": 0.6105736783,
"autogenerated": false,
"ratio": 2.0868544600938965,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9194794862275302,
"avg_score": 0.0005266552237189198,
"num_lines": 124
} |
# $Id: rtp.py 23 2006-11-08 15:45:33Z dugsong $
# -*- coding: utf-8 -*-
"""Real-Time Transport Protocol"""
from dpkt import Packet
from decorators import deprecated
# version 1100 0000 0000 0000 ! 0xC000 14
# p 0010 0000 0000 0000 ! 0x2000 13
# x 0001 0000 0000 0000 ! 0x1000 12
# cc 0000 1111 0000 0000 ! 0x0F00 8
# m 0000 0000 1000 0000 ! 0x0080 7
# pt 0000 0000 0111 1111 ! 0x007F 0
#
_VERSION_MASK = 0xC000
_P_MASK = 0x2000
_X_MASK = 0x1000
_CC_MASK = 0x0F00
_M_MASK = 0x0080
_PT_MASK = 0x007F
_VERSION_SHIFT = 14
_P_SHIFT = 13
_X_SHIFT = 12
_CC_SHIFT = 8
_M_SHIFT = 7
_PT_SHIFT = 0
VERSION = 2
class RTP(Packet):
__hdr__ = (
('_type', 'H', 0x8000),
('seq', 'H', 0),
('ts', 'I', 0),
('ssrc', 'I', 0),
)
csrc = ''
@property
def version(self): return (self._type & _VERSION_MASK) >> _VERSION_SHIFT
@version.setter
def version(self, ver):
self._type = (ver << _VERSION_SHIFT) | (self._type & ~_VERSION_MASK)
@property
def p(self): return (self._type & _P_MASK) >> _P_SHIFT
@p.setter
def p(self, p): self._type = (p << _P_SHIFT) | (self._type & ~_P_MASK)
@property
def x(self): return (self._type & _X_MASK) >> _X_SHIFT
@x.setter
def x(self, x): self._type = (x << _X_SHIFT) | (self._type & ~_X_MASK)
@property
def cc(self): return (self._type & _CC_MASK) >> _CC_SHIFT
@cc.setter
def cc(self, cc): self._type = (cc << _CC_SHIFT) | (self._type & ~_CC_MASK)
@property
def m(self): return (self._type & _M_MASK) >> _M_SHIFT
@m.setter
def m(self, m): self._type = (m << _M_SHIFT) | (self._type & ~_M_MASK)
@property
def pt(self): return (self._type & _PT_MASK) >> _PT_SHIFT
@pt.setter
def pt(self, m): self._type = (m << _PT_SHIFT) | (self._type & ~_PT_MASK)
# Deprecated methods, will be removed in the future
# =================================================
@deprecated('version')
def _get_version(self): return self.version
@deprecated('version')
def _set_version(self, ver): self.version = ver
@deprecated('p')
def _get_p(self): return self.p
@deprecated('p')
def _set_p(self, p): self.p = p
@deprecated('x')
def _get_x(self): return self.x
@deprecated('x')
def _set_x(self, x): self.x = x
@deprecated('cc')
def _get_cc(self): return self.cc
@deprecated('cc')
def _set_cc(self, cc): self.cc = cc
@deprecated('m')
def _get_m(self): return self.m
@deprecated('m')
def _set_m(self, m): self.m = m
@deprecated('pt')
def _get_pt(self): return self.pt
@deprecated('pt')
def _set_pt(self, pt): self.pt = pt
# =================================================
def __len__(self):
return self.__hdr_len__ + len(self.csrc) + len(self.data)
def __str__(self):
return self.pack_hdr() + self.csrc + str(self.data)
def unpack(self, buf):
super(RTP, self).unpack(buf)
self.csrc = buf[self.__hdr_len__:self.__hdr_len__ + self.cc * 4]
self.data = buf[self.__hdr_len__ + self.cc * 4:]
| {
"repo_name": "hexcap/dpkt",
"path": "dpkt/rtp.py",
"copies": "6",
"size": "3148",
"license": "bsd-3-clause",
"hash": 5765775470230767000,
"line_mean": 23.7874015748,
"line_max": 79,
"alpha_frac": 0.5371664549,
"autogenerated": false,
"ratio": 2.8880733944954127,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6425239849395412,
"avg_score": null,
"num_lines": null
} |
# $Id: rtp.py 23 2006-11-08 15:45:33Z dugsong $
"""Real-Time Transport Protocol"""
from dpkt import Packet
# version 1100 0000 0000 0000 ! 0xC000 14
# p 0010 0000 0000 0000 ! 0x2000 13
# x 0001 0000 0000 0000 ! 0x1000 12
# cc 0000 1111 0000 0000 ! 0x0F00 8
# m 0000 0000 1000 0000 ! 0x0080 7
# pt 0000 0000 0111 1111 ! 0x007F 0
#
_VERSION_MASK= 0xC000
_P_MASK = 0x2000
_X_MASK = 0x1000
_CC_MASK = 0x0F00
_M_MASK = 0x0080
_PT_MASK = 0x007F
_VERSION_SHIFT=14
_P_SHIFT = 13
_X_SHIFT = 12
_CC_SHIFT = 8
_M_SHIFT = 7
_PT_SHIFT = 0
VERSION = 2
class RTP(Packet):
__hdr__ = (
('_type', 'H', 0x8000),
('seq', 'H', 0),
('ts', 'I', 0),
('ssrc', 'I', 0),
)
csrc = ''
def _get_version(self): return (self._type&_VERSION_MASK)>>_VERSION_SHIFT
def _set_version(self, ver):
self._type = (ver << _VERSION_SHIFT) | (self._type & ~_VERSION_MASK)
def _get_p(self): return (self._type & _P_MASK) >> _P_SHIFT
def _set_p(self, p): self._type = (p << _P_SHIFT) | (self._type & ~_P_MASK)
def _get_x(self): return (self._type & _X_MASK) >> _X_SHIFT
def _set_x(self, x): self._type = (x << _X_SHIFT) | (self._type & ~_X_MASK)
def _get_cc(self): return (self._type & _CC_MASK) >> _CC_SHIFT
def _set_cc(self, cc): self._type = (cc<<_CC_SHIFT)|(self._type&~_CC_MASK)
def _get_m(self): return (self._type & _M_MASK) >> _M_SHIFT
def _set_m(self, m): self._type = (m << _M_SHIFT) | (self._type & ~_M_MASK)
def _get_pt(self): return (self._type & _PT_MASK) >> _PT_SHIFT
def _set_pt(self, m): self._type = (m << _PT_SHIFT)|(self._type&~_PT_MASK)
version = property(_get_version, _set_version)
p = property(_get_p, _set_p)
x = property(_get_x, _set_x)
cc = property(_get_cc, _set_cc)
m = property(_get_m, _set_m)
pt = property(_get_pt, _set_pt)
def __len__(self):
return self.__hdr_len__ + len(self.csrc) + len(self.data)
def __str__(self):
return self.pack_hdr() + self.csrc + str(self.data)
def unpack(self, buf):
super(RTP, self).unpack(buf)
self.csrc = buf[self.__hdr_len__:self.__hdr_len__ + self.cc * 4]
self.data = buf[self.__hdr_len__ + self.cc * 4:]
| {
"repo_name": "ashrith/dpkt",
"path": "dpkt/rtp.py",
"copies": "15",
"size": "2306",
"license": "bsd-3-clause",
"hash": -1694694482503197000,
"line_mean": 31.9428571429,
"line_max": 79,
"alpha_frac": 0.540329575,
"autogenerated": false,
"ratio": 2.5593784683684793,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
# $Id: rvp.py,v 1.5 2004/11/09 11:29:31 dvd Exp $
# embedding sample for RVP, a part of RNV, http://davidashen.net/rnv.html
# code kept simple to show the technique, not to provide a general purpose
# module.
#
# details of the protocol are in a long comment near the start of rvp.c
#
import sys, os, string, re, xml.parsers.expat
global rvpin,rvpout,pat,errors,parser,text,ismixed,prevline,prevcol
# raised by resp if it gets something the module does not understand
class ProtocolError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
# run RVP with grammar specified on the command line
def launch():
global rvpin,rvpout
inp,out=os.popen2('rvp '+string.join(sys.argv[1:],' '))
rvpin,rvpout=inp.fileno(),out.fileno() # os.read|os.write will be used
# terminate string with zero, encode in utf-8 and then send to RVP
def send(s):
os.write(rvpin,s.encode('UTF-8')+'\0')
# receive a zero-terminated response from RVP, zero byte is dropped
def recv():
s=''
while 1:
s=s+os.read(rvpout,16) # 16 is good for ok responses, errors should be rare
if(s[-1]=='\0'): break # last character in a response is always '\0'
return s[:-1]
# return current pattern value, and print error message on stderr, if any
def resp():
global errors,prevline,prevcol
r=string.split(recv(),' ',3)
if(r[0]=='ok'): return r[1]
if(r[0]=='error'):
errors=1
if(r[3]!=''): # if the error string is empty, then error
# is occured in erroneous state; don't report
line,col=parser.ErrorLineNumber,parser.ErrorColumnNumber
if(line!=prevline or col!=prevcol): # one report per file position
sys.stderr.write(str(line)+","+str(col)+": "+r[3])
prevline,prevcol=line,col
return r[1]
if(r[0]=='er'):
errors=1
return r[1]
raise ProtocolError,"unexpected response '"+r[0]+"'"
def start_tag_open(cur,name):
send('start-tag-open '+cur+' '+name)
return resp()
def attribute(cur,name,val):
send('attribute '+cur+' '+name+' '+val)
return resp()
def start_tag_close(cur,name):
send('start-tag-close '+cur+' '+name)
return resp()
def end_tag(cur,name):
send('end-tag '+cur+' '+name)
return resp()
def textonly(cur,text):
send('text '+cur+' '+text)
return resp()
# in mixed content, whitespace is simply discarded, and any
# non-whitespace is equal; but this optimization gives only
# 5% increase in speed at most in practical cases
def mixed(cur,text):
if(re.search('[^\t\n ]',text)):
send('mixed '+cur+' .')
return resp()
else: return cur
def start(g):
send('start '+g)
return resp()
def quit():
send('quit')
return resp()
# Expat handlers
# If I remember correctly, Expat has the custom not to concatenate
# text nodes; therefore CharDataHandler just concatenates them into
# text, and then flush_text passes the text to the validator
def flush_text():
global ismixed,pat,text
if(ismixed):
pat=mixed(pat,text)
else:
pat=textonly(pat,text)
text=''
def start_element(name,attrs):
global ismixed,pat
ismixed=1
flush_text()
pat=start_tag_open(pat,name)
ismixed=0
for n,v in attrs.items(): pat=attribute(pat,n,v)
pat=start_tag_close(pat,name)
def end_element(name):
global ismixed,pat
flush_text()
pat=end_tag(pat,name)
ismixed=1
def characters(data):
global text
text=text+data
# Main
errors=0
launch()
pat=start('0') # that is, start of the first grammar;
# multiple grammars can be passed to rvp
parser=xml.parsers.expat.ParserCreate('UTF-8',':') # last colon in the name
# separates local name from namespace URI
parser.StartElementHandler=start_element
parser.EndElementHandler=end_element
parser.CharacterDataHandler=characters
text=''
prevline,prevcol=-1,-1
parser.ParseFile(sys.stdin)
quit() # this stops RVP; many files can be validated with a single RVP
# running, concurrently or sequentially
sys.exit(errors)
| {
"repo_name": "dtolpin/RNV",
"path": "tools/rvp.py",
"copies": "1",
"size": "3966",
"license": "bsd-3-clause",
"hash": -7261839087932771000,
"line_mean": 25.6174496644,
"line_max": 79,
"alpha_frac": 0.6835602622,
"autogenerated": false,
"ratio": 3.165203511572227,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4348763773772226,
"avg_score": null,
"num_lines": null
} |
# $Id: rx.py 23 2006-11-08 15:45:33Z jonojono $
"""Rx Protocol."""
import dpkt
# Types
DATA = 0x01
ACK = 0x02
BUSY = 0x03
ABORT = 0x04
ACKALL = 0x05
CHALLENGE = 0x06
RESPONSE = 0x07
DEBUG = 0x08
# Flags
CLIENT_INITIATED = 0x01
REQUEST_ACK = 0x02
LAST_PACKET = 0x04
MORE_PACKETS = 0x08
SLOW_START_OK = 0x20
JUMBO_PACKET = 0x20
# Security
SEC_NONE = 0x00
SEC_BCRYPT = 0x01
SEC_RXKAD = 0x02
SEC_RXKAD_ENC = 0x03
class Rx(dpkt.Packet):
__hdr__ = (
('epoch', 'I', 0),
('cid', 'I', 0),
('call', 'I', 1),
('seq', 'I', 0),
('serial', 'I', 1),
('type', 'B', 0),
('flags', 'B', CLIENT_INITIATED),
('status', 'B', 0),
('security', 'B', 0),
('sum', 'H', 0),
('service', 'H', 0)
)
| {
"repo_name": "somethingnew2-0/CS642-HW2",
"path": "dpkt/rx.py",
"copies": "32",
"size": "1030",
"license": "mit",
"hash": -7849405836270798000,
"line_mean": 22.4090909091,
"line_max": 47,
"alpha_frac": 0.386407767,
"autogenerated": false,
"ratio": 3.03834808259587,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
import sadm_sandbox
import sadm_cmd
from sadm_constants import *
from sadm_util import *
from sadm_error import *
from sadm_dispatch import *
from sadm_config import *
from sadm_vcs import update_program_if_needed
from sadm_sandbox import get_by_name_pattern
# From buildscripts...
from textui.ansi import *
from textui.colors import *
from textui.getch import *
import sandbox
_PROMPT_OFFSET = 54
INTERACTIVE_MODE = 0
AUTOABORT_MODE = 1
AUTOCONFIRM_MODE = 2
MENU = ''
for cmd in [c for c in sadm_cmd.commands() if 'dev' in c.tags]:
syntax = cmd.syntax.ljust(25)
width = len(syntax)
params = syntax[len(cmd.verb):]
params = params.replace(' do ', ' ' + CMD_COLOR + 'do' + PARAM_COLOR + ' ')
syntax = CMD_COLOR + cmd.abbrev + NORMTXT + cmd.verb[len(cmd.abbrev):] + PARAM_COLOR + params
MENU += syntax + DELIM_COLOR + '- ' + NORMTXT + cmd.descrip + '\n'
class Prompt:
def __init__(self):
self._mode = None
self.has_shown_menu = False
def get_mode(self):
return self._mode
def set_mode(self, value):
self._mode = value
def can_interact(self):
return (self._mode is None) or (self._mode == INTERACTIVE_MODE)
# Create a global instance to be used by the program.
prompter = Prompt()
# Write a question to stdout. Wait for the user to answer it.
# If defaultValue is set, show the value that will be used
# if the user just presses Enter.
def prompt(q, defaultValue = None, color=PARAM_COLOR, mask=None):
mode = prompter.get_mode()
if mode is None:
prompter.set_mode(INTERACTIVE_MODE)
elif mode == AUTOCONFIRM_MODE:
return defaultValue
elif mode == AUTOABORT_MODE:
print('mode = %s' % str(mode))
raise Exception('Interactive prompting disallowed; aborting.')
while q.startswith('\n'):
print('')
q = q[1:]
txt = q
width = len(txt)
if not (defaultValue is None):
defaultValue = str(defaultValue)
txt = txt + " [" + color + str(defaultValue) + NORMTXT + "]"
width += 3 + len(defaultValue)
if width > _PROMPT_OFFSET:
i = txt[0:_PROMPT_OFFSET].rindex(' ')
print(txt[0:i])
txt = INDENT + txt[i + 1:].lstrip()
width = width - (i + 1) + len(INDENT)
if width < _PROMPT_OFFSET:
txt = txt + ' '*(_PROMPT_OFFSET-width)
writec(txt + ": " + color)
try:
if CYGWIN:
sys.stdout.flush()
if mask:
answer = readMasked()
else:
answer = sys.stdin.readline().strip()
if not answer:
answer = defaultValue
return answer
finally:
writec(NORMTXT)
def readMasked():
value = ''
while True:
c = getch()
if (c == '\n') or (c == '\r') or (c == '\x1B'):
print('')
break
elif (c == '\x08'):
if len(value) > 0:
value = value[:-1]
sys.stdout.write('\b \b')
else:
value += c
sys.stdout.write('*')
return value
def _is_yes(answer):
return bool(answer) and 'ty1'.find(answer.lower()[0]) != -1
def prompt_bool(q, defaultValue):
mode = prompter.get_mode()
if mode == AUTOABORT_MODE:
return False
if mode == AUTOCONFIRM_MODE:
return True
answer = _is_yes(prompt(q, defaultValue))
return answer
_MENU_HDR = '\n' + cwrap(APP_TITLE, TITLE_COLOR) + '\n' + cwrap('-'*78, DELIM_COLOR) + '\n'
_QUIT_PATTERN = re.compile('q|quit|exit', re.IGNORECASE)
def _use_menu(symbols):
while True:
printc(_MENU_HDR)
printc(MENU)
try:
cmdline = prompt('Command ?', color=CMD_COLOR)
except KeyboardInterrupt:
printc('\n')
break
printc('')
try:
if not cmdline or _QUIT_PATTERN.match(cmdline):
break
args = shlex.split(cmdline)
err = dispatch(symbols, args)
except KeyboardInterrupt:
printc('')
except Exception:
write_error()
sys.exit(0)
# Display an interactive menu.
def interact(symbols):
if not prompter.can_interact():
return
prompter.set_mode(INTERACTIVE_MODE)
try:
update_program_if_needed(silent=True)
except SystemExit:
sys.exit(0)
except:
write_error()
prompter.has_shown_menu = True
_use_menu(symbols)
def _validate_sandbox_choice(sb, selector, verb):
if not (selector is None):
if not selector(sb):
if verb:
print('%s is not eligible for %s right now.' % (sb.get_name(), verb))
sb = None
return sb
# Prompt the user to choose a sandbox, and validate their choice. Return tuple
# where first value is name of sandbox, and second is pid for that ctest+sandbox
# combo, if applicable.
def _prompt_for_sandbox(verb, sandboxes=None, selector=None, allow_multi_matches=False, msg=None, display_only_matches=False):
if not prompter.can_interact():
return
prompter.set_mode(INTERACTIVE_MODE)
if sandboxes is None:
sandboxes = sandbox.list(config.sandbox_container_folder)
if not sandboxes:
print('No sandboxes defined.')
return
# If we're trying to allow selection of just a subset -- either ones
# that have active ctest instances, or ones that don't -- then check
# whether that is even valid.
subset = not (selector is None)
if subset:
if count(sandboxes, selector) == 0:
print('No sandboxes support %s right now.' % verb)
return
list_sandboxes(sandboxes, selector, choose=True, display_only_matches=display_only_matches)
print('')
if not msg:
msg = 'Sandbox?'
if allow_multi_matches:
msg += ' (wildcards match name; * or "all"=all)'
which = prompt(msg)
if not which:
return
if which=='all':
which='*'
selected = []
if which[0].isdigit():
which = int(which)
if which < 1 or which > len(sandboxes):
invalid = True
else:
selected.append(sandboxes[which - 1])
else:
selected = get_by_name_pattern(sandboxes, which, allow_multi_matches)
if not selected:
print('No match for sandbox "%s".' % str(which))
selected = [sb for sb in selected if _validate_sandbox_choice(sb, selector, verb)]
if allow_multi_matches:
return selected
return selected[0]
_FUNC_TYPE = type(lambda x:x)
def choose_sandbox(verb, name=None, selector=None, enforce=True, allow_multi_matches=False, msg=None, display_only_matches=False):
'''
Figure out which sandbox(es) user wants to operate on.
'''
sb = None
if name=='all':
name='*'
elif name == 'last':
mostRecentlyStarted = sadm_sandbox.list_recent_starts(1)
if not mostRecentlyStarted:
return []
name = mostRecentlyStarted[0][0]
# Depending on how we're called, we might get either a name as the first
# arg and a selector as the second, a name with no selector, or a selector
# as the first arg and no name. In the latter case, swap args so we
# interpret correctly.
if (not (name is None)) and (type(name) == _FUNC_TYPE) and (selector is None):
selector = name
name = '*'
sandboxes = sandbox.list(config.sandbox_container_folder)
if not name:
sb = _prompt_for_sandbox(verb, sandboxes, selector, allow_multi_matches, msg, display_only_matches)
else:
sb = sadm_sandbox.get_by_name_pattern(sandboxes, name, allow_multi_matches)
if sb and enforce and (not (selector is None)):
sb = [s for s in sb if _validate_sandbox_choice(s, selector, verb)]
if sb and not allow_multi_matches:
sb = sb[0]
return sb
# Display a list of all our sandboxes.
def list_sandboxes(sandboxes=None, selector=None, choose=False, display_only_matches=False):
# If we're not in interactive mode, there's no point in numbering
# the sandboxes so they can be selected.
if not prompter.can_interact():
choose = False
if choose:
sep = DELIM_COLOR + ' - ' + NORMTXT
else:
sep = ''
if not choose:
runningMask = inertMask = ''
if not sandboxes:
sandboxes = sandbox.list(config.sandbox_container_folder)
elif type(sandboxes) != type([]):
sandboxes = choose_sandbox(None, sandboxes, allow_multi_matches=True)
if not sandboxes:
print('No sandboxes defined.')
else:
n = 1
for sb in sandboxes:
id = ''
if choose:
id = str(n)
if selector and not selector(sb):
if display_only_matches:
n += 1
continue
id = ' '.rjust(len(id))
row = ''
if id:
if n < 10:
row += ' '
row += PARAM_COLOR + id
if sep:
row += sep
row += PARAM_COLOR + sb.get_name().ljust(35)
row += DELIM_COLOR +' - ' + NORMTXT
if hasattr(sb, 'schedule') and (not (sb.schedule is None)):
row += str(sb.schedule)
else:
if sb.get_sandboxtype().get_should_schedule() and not config.schedule_continuous_manually:
row += 'auto-scheduled'
else:
row += 'unscheduled'
if sb.get_sandboxtype().get_should_publish():
row += '; publishing enabled'
if sb.is_locked():
row += ' (pid=%s)' % str(sb.get_lock_obj().pid)
printc(row)
n += 1
| {
"repo_name": "perfectsearch/sandman",
"path": "code/sadm/lib/sadm_prompt.py",
"copies": "1",
"size": "9959",
"license": "mit",
"hash": -900025863090103600,
"line_mean": 32.6452702703,
"line_max": 130,
"alpha_frac": 0.5796766744,
"autogenerated": false,
"ratio": 3.703607288955002,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9747376551891602,
"avg_score": 0.00718148229267985,
"num_lines": 296
} |
import tarfile
from sadm_constants import *
from sadm_error import *
# From buildscripts...
from ioutil import *
import buildinfo
import sandbox
import sandboxtype
import component
if os.name == 'nt':
_PROCLIST_CMD = 'tasklist /V'
else:
_PROCLIST_CMD = 'ps -ef'
_PID_IN_CMD_LOG_PAT = re.compile(r'\s+start\s+(.*?)\s*=\s*pid\s*(\d+)')
if os.name == 'nt':
_PID_PAT = re.compile(r'^ctest.exe\s+(\d+).*')
else:
_PID_PAT = re.compile(r'^[_a-zA-Z0-9]+\s+(\d+)\s+.*ctest\s+-S\s+steer.cmake')
_CTEST_PROPERTY_PAT = re.compile(r'^\s*set\s*\(\s*(CTEST[^ \t]+)\s*(.*?)\s*\)', re.MULTILINE | re.IGNORECASE)
_REPO_FROM_FULLPATH_PAT = re.compile('(.*)/(trunk|branches)/.*')
def _get_archive_url(repo, branch, proj):
return join_path(repo, get_branch_path_in_svn(branch), proj)
def _is_linked_to_archive(buildRoot):
return os.path.isdir(os.path.join(buildRoot, ARCHIVE_FOLDER, '.bzr'))
def _load_best_template(aux_folder, baseNames, suffix = ''):
if '/\\'.find(aux_folder[-1]) == -1:
aux_folder += '/'
txt = ''
if type(baseNames) == type(''):
baseNames = [baseNames]
preferred = baseNames[0]
i = 0
for baseName in baseNames:
checkedIn = join_path(aux_folder, baseName)
#print('probing for %s' % checkedIn)
useCheckedIn = os.path.exists(checkedIn)
if not useCheckedIn:
checkedIn += CHECKEDIN_TEMPLATE_SUFFIX
#print('probing for %s' % checkedIn)
useCheckedIn = os.path.exists(checkedIn)
if useCheckedIn:
#print('Using checked-in %s as template for %s.' % (checkedIn[len(aux_folder):], preferred))
txt = read_file(checkedIn)
break
else:
#print('no checked in template')
if i == len(baseNames) - 1:
#print('loading %s' % preferred + suffix)
txt = load_template(preferred + suffix)
if txt is None:
raise Exception('No template found for %s.' % str(baseNames))
i += 1
return txt, useCheckedIn
# Write initial settings for an eclipse workspace corresponding to this sandbox.
# This is relevant even to cmake-driven projects, because eclipse may be used as
# a C++ IDE.
def configure_for_eclipse(sb, force=False):
path = join_path(sb.get_root(), ECLIPSE_METADATA_FOLDER)
if force or (not os.path.isdir(path)):
tar = tarfile.open(join_path(TEMPLATES_FOLDER, ECLIPSE_METADATA_ARCHIVE))
# Can't use tar.extractall() -- method didn't exist in python 2.4
for tarinfo in tar:
tar.extract(tarinfo, self.getPath())
# If this workspace will interact with ant in any way, set ant
# properties for code root and build root, so that running ant
# in the IDE and running from build scripts give identical results.
fpath = join_path(path, '.plugins/org.eclipse.core.runtime/.settings/org.eclipse.ant.core.prefs')
txt = read_file(fpath)
lines = txt.strip().split('\n')
lines = [l for l in lines if l.strip() and (not ANT_ROOTPROP_PAT.match(l))]
lines.append('property.code.root=%s' % self.get_code_root())
lines.append('property.built.root=%s' % self.get_built_root())
txt = '\n'.join(lines)
f = open(fpath, 'wt')
f.write(txt)
f.close()
def get_last_start_date(sb):
'''
Return the time when this sandbox was last started.
'''
recent = Sandbox.list_recent_starts()
for tuple in recent:
#print(tuple)
if tuple[0] == self.name:
return tuple[2]
return None
def list_recent_starts(max=50):
# Return the names, pids, and start times of the last few sandboxes that
# were started, most recent first.
recent = get_tail(CMD_LOG, 50, lambda x: x.find(' start') > -1 and x.find('= pid') > -1)
x = []
for line in reversed(recent):
m = _PID_IN_CMD_LOG_PAT.search(line)
if m:
when = line[0:line.find(' start')].replace(' ', ' ')
name = m.group(1)
# Disqualify old sadm sandboxes...
if '/' not in name:
x.append((name, m.group(2), when))
if len(x) >= max:
break
return x
def get_by_name_pattern(sandboxes, namePat, allow_multi_matches):
selected = []
namePat = namePat.strip().lower()
if allow_multi_matches:
namePat = namePat.replace('.', '\\.').replace('*', '.*').replace('?', '.')
regex = re.compile('^' + namePat + '$', re.IGNORECASE)
for sb in sandboxes:
if allow_multi_matches:
match = bool(regex.match(sb.get_name()))
else:
match = bool(sb.get_name().lower() == namePat)
if match:
selected.append(sb)
if not allow_multi_matches:
break
return selected
DEBUG_OR_RELEASE_PAT = re.compile(r'^set\s*\(\s*CMAKE_BUILD_TYPE\s+(Debug|Release)')
# These imports have to come at end of file to avoid circular import errors
from sadm_util import *
from sadm_schedule import Schedule
from sadm_config import *
| {
"repo_name": "perfectsearch/sandman",
"path": "code/sadm/lib/sadm_sandbox.py",
"copies": "1",
"size": "5293",
"license": "mit",
"hash": -8586137300789196000,
"line_mean": 36.8071428571,
"line_max": 109,
"alpha_frac": 0.5996599282,
"autogenerated": false,
"ratio": 3.3394321766561514,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.44390921048561516,
"avg_score": null,
"num_lines": null
} |
"""Slider widgets for selecting a value from a range."""
from pygame import K_KP_PLUS, K_PLUS, K_RIGHT, K_DOWN, K_KP_MINUS, K_MINUS
from pygame import K_LEFT, K_UP, K_PAGEUP, K_PAGEDOWN, K_HOME, K_END, Rect
from Range import Range
from ocempgui.draw import Draw
from Constants import *
from StyleInformation import StyleInformation
import base
class Scale (Range):
"""Scale (minimum, maximum, step=1.0) -> Scale
An abstract scaling widget class for numerical value selections.
The Scale is an abstract widget class, which enhances the Range by
different events and an activation method. Concrete implementations
of it are the HScale, a horizontal scale widget, and the VScale,
vertical scale widget.
Inheriting widgets have to implement the _get_value_from_coords()
method, which calculates the value of the Scale using a pair of
absolute screen coordinates relative to a given area. Example
implementations can be found in the HScale and VScale widget
classes.
Default action (invoked by activate()):
Give the Scale the input focus.
Mnemonic action (invoked by activate_mnemonic()):
None
Signals:
SIG_MOUSEDOWN - Invoked, when a mouse button is pressed on the
Scale.
SIG_MOUSEUP - Invoked, when a mouse buttor is released on the
Scale.
SIG_MOUSEMOVE - Invoked, when the mouse moves over the Scale.
"""
def __init__ (self, minimum, maximum, step=1.0):
Range.__init__ (self, minimum, maximum, step)
# Internal click and mouse enter detection.
self.__click = False
self._signals[SIG_MOUSEDOWN] = []
self._signals[SIG_MOUSEMOVE] = []
self._signals[SIG_MOUSEUP] = []
self._signals[SIG_KEYDOWN] = None # Dummy for keyboard activation.
def activate (self):
"""S.activate () -> None
Activates the Scale default action.
Activates the Scale default action. This usually means giving
the Scale the input focus.
"""
if not self.sensitive:
return
self.focus = True
def _get_value_from_coords (self, area, coords):
"""S._get_value_from_coords (...) -> float
Calculates the float value of the Scale.
Calculates the float value of the Scale from the passed
absolute coordinates tuple relative to the area.
This method has to be implemented by inherited widgets.
"""
raise NotImplementedError
def notify (self, event):
"""S.notify (...) -> None
Notifies the Scale about an event.
"""
if not self.sensitive:
return
if event.signal in SIGNALS_MOUSE:
eventarea = self.rect_to_client ()
if event.signal == SIG_MOUSEDOWN:
if eventarea.collidepoint (event.data.pos):
self.focus = True
if event.data.button == 1:
# Guarantee a correct look, if the signal
# handlers run a long time
self.state = STATE_ACTIVE
self.run_signal_handlers (SIG_MOUSEDOWN, event.data)
if event.data.button == 1: # Only react upon left clicks.
self.__click = True
val = self._get_value_from_coords (eventarea,
event.data.pos)
if val != self.value:
self.value = val
# Mouse wheel.
elif event.data.button == 4:
val = self.value - 2 * self.step
if val > self.minimum:
self.value = val
else:
self.value = self.minimum
elif event.data.button == 5:
val = self.value + 2 * self.step
if val < self.maximum:
self.value = val
else:
self.value = self.maximum
event.handled = True
elif event.signal == SIG_MOUSEUP:
if eventarea.collidepoint (event.data.pos):
if event.data.button == 1:
if self.state == STATE_ACTIVE:
self.state = STATE_ENTERED
else:
self.state = STATE_NORMAL
self.__click = False
self.run_signal_handlers (SIG_MOUSEUP, event.data)
event.handled = True
elif (event.data.button == 1) and self.__click:
self.state = STATE_NORMAL
self.__click = False
elif event.signal == SIG_MOUSEMOVE:
if eventarea.collidepoint (event.data.pos):
self.focus = True
self.run_signal_handlers (SIG_MOUSEMOVE, event.data)
if self.__click and self.focus:
val = self._get_value_from_coords (eventarea,
event.data.pos)
if val != self.value:
self.value = val
self.entered = True
event.handled = True
else:
self.entered = False
elif (event.signal == SIG_KEYDOWN) and self.focus:
if event.data.key in (K_KP_PLUS, K_PLUS, K_RIGHT, K_DOWN):
self.increase ()
event.handled = True
elif event.data.key in (K_KP_MINUS, K_MINUS, K_LEFT, K_UP):
self.decrease ()
event.handled = True
elif event.data.key == K_PAGEUP:
val = self.value - 10 * self.step
if val > self.minimum:
self.value = val
else:
self.value = self.minimum
event.handled = True
elif event.data.key == K_PAGEDOWN:
val = self.value + 10 * self.step
if val < self.maximum:
self.value = val
else:
self.value = self.maximum
event.handled = True
elif event.data.key == K_END:
self.value = self.maximum
event.handled = True
elif event.data.key == K_HOME:
self.value = self.minimum
event.handled = True
Range.notify (self, event)
class HScale (Scale):
"""HScale (minimum, maximum, step=1.0) -> HScale
A horizontal scaling widget for selecting numerical values.
The HScale widget is a scaling widget with a horizontal orientation
and allows the user to select and adjust a value from a range moving
a slider.
Default action (invoked by activate()):
See the Scale class.
Mnemonic action (invoked by activate_mnemonic()):
See the Scale class.
"""
def __init__ (self, minimum, maximum, step=1.0):
Scale.__init__ (self, minimum, maximum, step)
self.minsize = 120, 20 # Default size.
def _get_value_from_coords (self, area, coords):
"""H._get_value_from_coords (...) -> float
Calculates the float value of the HScale.
Calculates the float value of the HScale from the passed
absolute coordinates tuple relative to the area.
"""
# We need this for a proper calculation.
slider = StyleInformation.get ("HSCALE_SLIDER_SIZE")
# The slide range, in which the slider can move.
slide = self.width - slider[0]
# Calculate the absolute current position
n = coords[0] - area.left - slider[0] / 2.0
# Step range in dependance of the width and value range of the
# Scale.
step = (self.maximum - self.minimum) / float (slide)
# Calculate it.
val = self.minimum + step * n
if val > self.maximum:
val = self.maximum
elif val < self.minimum:
val = self.minimum
return val
def _get_coords_from_value (self):
"""H._get_coords_from_value () -> float
Calculates the coordinates from the current value of the HScale.
Calculates the relative coordinates on the HScale from the
current value.
"""
# We need this for a proper calculation.
slider = StyleInformation.get ("HSCALE_SLIDER_SIZE")
width = self.width
# The slide range in which the slider can move.
slide = width - slider[0]
# Step range in dependance of the width and value range of the
# Scale.
step = (self.maximum - self.minimum) / float (slide)
# Calculate the value
val = (self.value - self.minimum) / step + slider[0] / 2.0
return val
def draw_bg (self):
"""H.draw_bg () -> Surface
Draws the background surface of the HScale and returns it.
Creates the visible surface of the HScale and returns it to the
caller.
"""
return base.GlobalStyle.engine.draw_scale (self,
ORIENTATION_HORIZONTAL)
def draw (self):
"""H.draw () -> None
Draws the HScale surface and its slider.
"""
Scale.draw (self)
cls = self.__class__
style = base.GlobalStyle
active = StyleInformation.get ("ACTIVE_BORDER")
border = style.get_border_size (cls, self.style,
StyleInformation.get ("SCALE_BORDER"))
border_active = style.get_border_size (cls, self.style, active)
slider = StyleInformation.get ("HSCALE_SLIDER_SIZE")
# Creates the slider surface.
sf_slider = style.engine.draw_slider (slider[0], slider[1],
self.state, cls, self.style)
rect_slider = sf_slider.get_rect ()
# Dashed border.
if self.focus:
b = border + border_active
r = Rect (b, b, rect_slider.width - 2 * b,
rect_slider.height - 2 * b)
style.engine.draw_border \
(sf_slider, self.state, cls, self.style, active, r,
StyleInformation.get ("ACTIVE_BORDER_SPACE"))
size = self.width - 2 * border, self.height / 3 - 2 * border
# Fill the scale line.
sf_fill = Draw.draw_rect (size[0], size[1],
StyleInformation.get ("SCALE_COLOR"))
self.image.blit (sf_fill, (border, self.height / 3 + border))
# Blit slider at the correct position.
rect_slider.centerx = self._get_coords_from_value ()
rect_slider.centery = self.image.get_rect ().centery
self.image.blit (sf_slider, rect_slider)
# Fill until the slider start.
if rect_slider.x > 0:
sf_fill = Draw.draw_rect (rect_slider.x - border, size[1],
StyleInformation.get ("PROGRESS_COLOR"))
self.image.blit (sf_fill, (border, self.height / 3 + border))
class VScale (Scale):
"""VScale (minimum, maximum, step=1.0) -> VScale
A vertical scaling widget for selecting numerical values.
The VScale widget is a scaling widget with a vertical orientation
and allows the user to select and adjust a value from a range moving
a slider.
Default action (invoked by activate()):
See the Scale class.
Mnemonic action (invoked by activate_mnemonic()):
See the Scale class.
"""
def __init__ (self, minimum, maximum, step=1.0):
Scale.__init__ (self, minimum, maximum, step)
self.minsize = 20, 120 # Default size.
def _get_value_from_coords (self, area, coords):
"""V._get_value_from_coords (...) -> float
Calculates the float value of the VScale.
Calculates the float value of the VScale from the passed
absolute coordinates tuple relative to the area.
"""
# We need this for a proper calculation.
slider = StyleInformation.get ("VSCALE_SLIDER_SIZE")
# The slide range, in which the slider can move.
slide = self.height - slider[1]
# Calculate the absolute current position
n = coords[1] - area.top - slider[1] / 2.0
# Step range in dependance of the width and value range of the
# Scale.
step = (self.maximum - self.minimum) / float (slide)
# Calculate it.
val = self.minimum + step * n
if val > self.maximum:
val = self.maximum
elif val < self.minimum:
val = self.minimum
return val
def _get_coords_from_value (self):
"""V.get_coords_from_value () -> float
Calculates the coordinates from the current value of the VScale.
Calculates the relative coordinates on the VScale from the
current value.
"""
# We need this for a proper calculation.
slider = StyleInformation.get ("VSCALE_SLIDER_SIZE")
height = self.height
# The slide range in which the slider can move.
slide = height - slider[1]
# Step range in dependance of the width and value range of the
# Scale.
step = (self.maximum - self.minimum) / float (slide)
# Calculate the value
val = (self.value - self.minimum) / step + slider[1] / 2.0
return val
def draw_bg (self):
"""V.draw_bg () -> Surface
Draws the VScale background surface and returns it.
Creates the visible surface of the VScale and returns it to the
caller.
"""
return base.GlobalStyle.engine.draw_scale (self, ORIENTATION_VERTICAL)
def draw (self):
"""V.draw () -> None
Draws the VScale surface and its slider.
"""
Scale.draw (self)
cls = self.__class__
style = base.GlobalStyle
border = style.get_border_size (cls, self.style,
StyleInformation.get ("SCALE_BORDER"))
active = StyleInformation.get ("ACTIVE_BORDER")
border_active = style.get_border_size (cls, self.style, active)
slider = StyleInformation.get ("VSCALE_SLIDER_SIZE")
# Creates the slider surface.
sf_slider = style.engine.draw_slider (slider[0], slider[1], self.state,
cls, self.style)
rect_slider = sf_slider.get_rect ()
# Dashed border.
if self.focus:
b = border + border_active
r = Rect (b, b, rect_slider.width - 2 * b,
rect_slider.height - 2 * b)
style.engine.draw_border \
(sf_slider, self.state, cls, self.style, active, r,
StyleInformation.get ("ACTIVE_BORDER_SPACE"))
size = self.width / 3 - 2 * border, self.height - 2 * border
# Fill the scale line.
sf_fill = Draw.draw_rect (size[0], size[1],
StyleInformation.get ("SCALE_COLOR"))
self.image.blit (sf_fill, (self.width / 3 + border, border))
# Blit slider at the correct position.
rect_slider.centerx = self.image.get_rect ().centerx
rect_slider.centery = self._get_coords_from_value ()
self.image.blit (sf_slider, rect_slider)
# Fill until the slider start.
if rect_slider.y > 0:
sf_fill = Draw.draw_rect (size[0], rect_slider.y - border,
StyleInformation.get ("PROGRESS_COLOR"))
self.image.blit (sf_fill, (self.width / 3 + border, border))
| {
"repo_name": "prim/ocempgui",
"path": "ocempgui/widgets/Scale.py",
"copies": "1",
"size": "17399",
"license": "bsd-2-clause",
"hash": 7664209984137610000,
"line_mean": 37.3237885463,
"line_max": 79,
"alpha_frac": 0.5627334904,
"autogenerated": false,
"ratio": 4.401467240070832,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5464200730470832,
"avg_score": null,
"num_lines": null
} |
"""
Function to scan the sub-directory structure in a given directory.
"""
__author__ = "Graham Klyne (GK@ACM.ORG)"
__copyright__ = "Copyright 2011-2013, Graham Klyne, University of Oxford"
__license__ = "MIT (http://opensource.org/licenses/MIT)"
from os.path import join, isdir, normpath
import os
import logging
logger = logging.getLogger("ScanDirectories")
#logger.setLevel(logging.INFO)
# Scan the sub-directory structure in a given directory
#
# Exceptions are left to the calling program.
#
# srcdir directory to search, maybe including sub-directories
# DirFunc a function to be called for each selected directory name
# as DirFunc( dir ). (NOTE: this can be an
# object method with access to the instance data of
# the object to which it belongs.)
# FileFunc a function to be called for each selected file name
# as FileFunc( file ). (NOTE: this can be an
# object method with access to the instance data of
# the object to which it belongs.)
# recursive is True if directories are to be scanned recursively,
# otherwise only the named directory is scanned.
#
def ScanDirectoriesEx(srcdir, DirFunc, FileFunc=None, recursive=True):
"""
Scan all sub-directories in a given source directory.
Exceptions are thrown back to the calling program.
"""
if not srcdir.endswith(os.path.sep): srcdir += os.path.sep
directoryList = os.listdir(srcdir)
for directoryComponent in directoryList:
path = srcdir+directoryComponent
if isdir(path):
DirFunc(path)
if recursive:
logger.debug("Adding Directory %s " % (path))
ScanDirectoriesEx(path, DirFunc, FileFunc, recursive)
elif FileFunc:
FileFunc(path)
return
# Scan the sub-directory structure in a given directory
#
# This is just like 'ScanDirectoriesEx' above, except that an error
# is reported if an I/O exception occurs.
#
# srcdir directory to search, maybe including sub-directories
# DirFunc a function to be called for each selected directory name
# as DirFunc( dir ). (NOTE: this can be an
# object method with access to the instance data of
# the object to which it belongs.)
# recursive is True if directories are to be scanned recursively,
# otherwise only the named directory is scanned.
#
def ScanDirectories(srcdir, DirFunc, listFiles=False, recursive=True):
try:
ScanDirectoriesEx(srcdir, DirFunc, listFiles, recursive)
except (IOError, os.error), why:
logger.debug("Can't scan %s: %s" % (`srcdir`, str(why)))
print "Can't scan %s: %s" % (`srcdir`, str(why))
return
# Collect directories/sub-directories found under the source directory
#
# srcdir directory to search, maybe including sub-directories
# baseDir a base directory that is removed from all results returned.
# listFiles is True if files are to be included in the listing returned
# recursive is True if directories are to be scanned recursively,
# otherwise only the named directory is scanned.
# appendSep is True if path separator character is to be appended to directory names
#
# Returns a list of directory contents
#
def CollectDirectoryContents(srcDir, baseDir="",
listDirs=True, listFiles=False, recursive=True, appendSep=False):
"""
Return a list of directory contents found under the source directory.
"""
logger.debug("CollectDirectories: %s, %s, %s"%(srcDir,baseDir,str(os.path.sep)))
dirsuffix = ""
if appendSep: dirsuffix = os.path.sep
collection = []
if (baseDir != "") and (not baseDir.endswith(os.path.sep)):
baseDir = baseDir+os.path.sep
def CollectDir(path):
logger.debug("- CollectDir base: %s, path: %s"%(baseDir, path))
if listDirs: collection.append(path.replace(baseDir,"",1)+dirsuffix)
def CollectFile(path):
logger.debug("- CollectFile base: %s, path: %s"%(baseDir, path))
if listFiles: collection.append(path.replace(baseDir,"",1))
ScanDirectoriesEx(srcDir, CollectDir, CollectFile, recursive)
return collection
if __name__ == "__main__":
directoryCollection = CollectDirectoryContents(".", baseDir=".",
listFiles=True, listDirs=False, appendSep=True)
print "\n".join(directoryCollection)
# End.
| {
"repo_name": "wf4ever/ro-manager",
"path": "src/MiscUtils/ScanDirectories.py",
"copies": "1",
"size": "4443",
"license": "mit",
"hash": 7672233155959463000,
"line_mean": 39.7614678899,
"line_max": 84,
"alpha_frac": 0.683772226,
"autogenerated": false,
"ratio": 3.9458259325044405,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.512959815850444,
"avg_score": null,
"num_lines": null
} |
# $Id: ScanDirectories.py 1047 2009-01-15 14:48:58Z graham $
#
"""
Function to scan the sub-directory structure in a given directory.
Note that files are not included in the results returned.
"""
from os.path import join, isdir, normpath
import os, logging
#from functional import partial
logger = logging.getLogger("ScanDirectories")
# Scan the sub-directory structure in a given directory
#
# Exceptions are left to the calling program.
#
# srcdir directory to search, maybe including sub-directories
# DirFunc a function to be called for each selected directory name
# as DirFunc( dir ). (NOTE: this can be an
# object method with access to the instance data of
# the object to which it belongs.)
# recursive is True if directories are to be scanned recursively,
# otherwise only the named directory is scanned.
#
def ScanDirectoriesEx(srcdir, DirFunc,listFiles=False, recursive=True):
"""
Scan all sub-directories in a given source directory.
Exceptions are thrown back to the calling program.
"""
directoryList = os.listdir(srcdir)
for directoryComponent in directoryList:
path = srcdir+"/"+directoryComponent
if isdir(path):
DirFunc(path)
if recursive:
logger.debug("Adding Directory %s " % (path))
ScanDirectoriesEx(path, DirFunc, listFiles, recursive)
if listFiles==True:
DirFunc(path)
return
# Scan the sub-directory structure in a given directory
#
# This is just like 'ScanDirectoriesEx' above, except that an error
# is reported if an I/O exception occurs.
#
# srcdir directory to search, maybe including sub-directories
# DirFunc a function to be called for each selected directory name
# as DirFunc( dir ). (NOTE: this can be an
# object method with access to the instance data of
# the object to which it belongs.)
# recursive is True if directories are to be scanned recursively,
# otherwise only the named directory is scanned.
#
def ScanDirectories(srcdir, DirFunc, listFiles=False, recursive=True):
try:
ScanDirectoriesEx(srcdir, DirFunc, listFiles, recursive)
except (IOError, os.error), why:
logger.debug("Can't scan %s: %s" % (`srcdir`, str(why)))
print "Can't scan %s: %s" % (`srcdir`, str(why))
return
# Collect directories/sub-directories found under the source directory
#
# srcdir directory to search, maybe including sub-directories
# recursive is True if directories are to be scanned recursively,
# otherwise only the named directory is scanned.
#
# Returns a list of directory contents
#
def CollectDirectoryContents(srcDir, baseDir, listFiles=False, recursive=True):
"""
Return a list of directory contents found under the source directory.
"""
#logger.debug("CollectDirectories: %s, %s, %s"%(srcDir,baseDir,str(os.path.sep)))
collection = []
if (baseDir != "") and (not baseDir.endswith(os.path.sep)):
baseDir = baseDir+os.path.sep
def Collect(path):
collection.append(path.replace(baseDir,"",1))
ScanDirectoriesEx(srcDir, Collect, listFiles, recursive)
return collection
if __name__ == "__main__":
directoryCollection = CollectDirectories(".")
#PrintCollection()
# Collect user accessible and writable directories/sub-directories found under the source directory
#
# srcdir directory to search, maybe including sub-directories
# recursive is True if directories are to be scanned recursively,
# otherwise only the named directory is scanned.
#
# Returns a list of directories
#
def CollectWritableDirectories(srcDir, baseDir,listFiles=False, recursive=True):
"""
Return a list of user accessible and writable directories found under the source directory.
"""
#logger.debug("CollectDirectories: %s, %s, %s"%(srcDir,baseDir,str(os.path.sep)))
collection = []
if (baseDir != "") and (not baseDir.endswith(os.path.sep)):
baseDir = baseDir+os.path.sep
def CollectDirs(path):
if IsDirectoryWritable(path):
logger.debug("Adding Path to tree = " + repr(path))
collection.append(path.replace(baseDir,"",1))
ScanDirectoriesEx(srcDir, CollectDirs,listFiles, recursive)
return collection
def IsDirectoryWritable(dirPath):
if os.environ.has_key("REMOTE_USER"):
uname = os.environ['REMOTE_USER']
logger.debug("Remote user = " + repr(uname))
accesspath = "/usr/local/sbin/testuseraccess.sh" + " " + uname + " " + dirPath
logger.debug("accesspath = " + repr(accesspath))
accessStatus = os.system(accesspath)
logger.debug("accessStatus = " + repr(accessStatus))
if accessStatus == 0:
return True
else :
return False
else :
return True
| {
"repo_name": "bhavanaananda/DataStage",
"path": "src/AdminUI/MiscLib/ScanDirectories.py",
"copies": "6",
"size": "4908",
"license": "mit",
"hash": -6267346238698686000,
"line_mean": 36.4732824427,
"line_max": 99,
"alpha_frac": 0.6756316218,
"autogenerated": false,
"ratio": 4.09,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.026078902152812538,
"num_lines": 131
} |
# $Id: ScanFiles.py 1047 2009-01-15 14:48:58Z graham $
#
"""
Funtions to scan all files with names matching a given pattern in
a directory or directory tree.
Note that directories are not included in the results returned.
"""
__author__ = "Graham Klyne (GK@ACM.ORG)"
__copyright__ = "Copyright 2011-2013, Graham Klyne, University of Oxford"
__license__ = "MIT (http://opensource.org/licenses/MIT)"
from os.path import join, isdir, normpath
import os
# Scan files matching pattern in a directory tree
#
# Exceptions are left to the calling program.
#
# srcdir directory to search, maybe including subdirectories
# pattern a compiled regex pattern, for filename selection
# FileFunc a function to be called for each selected filename
# as FileFunc( dir, name ). (NOTE: this can be an
# object method with access to the instance data of
# the object to which it belongs.)
# recursive is True if directories are to be scanned recursively,
# otherwise only the named directory is scanned.
#
def ScanFilesEx(srcdir, pattern, FileFunc, recursive=True):
"""
Scan all files in a directory or directory tree matching a given pattern.
Exceptions are thrown back to the calling program.
"""
names = os.listdir(srcdir)
for name in names:
srcname = join(srcdir, name)
if isdir(srcname):
if recursive:
ScanFilesEx(srcname, pattern, FileFunc)
elif pattern.match(name):
FileFunc(srcdir, name)
# Scan files matching pattern in a directory tree
#
# This is just like 'ScanFilesEx' above, except that an error
# is reported if an I/O exception occurs.
#
# srcdir directory to search, maybe including subdirectories
# pattern a compiled regex pattern, for filename selection
# FileFunc a function to be called for each selected filename
# as FileFunc( dir, name ). (NOTE: this can be an
# object method with access to the instance data of
# the object to which it belongs.)
# recursive is True if directories are to be scanned recursively,
# otherwise only the named directory is scanned.
#
def ScanFiles(srcdir, pattern, FileFunc, recursive=True):
try:
ScanFilesEx(srcdir, pattern, FileFunc, recursive)
except (IOError, os.error), why:
print "Can't scan %s: %s" % (`srcdir`, str(why))
# Collect files matching pattern in a directory tree
#
# srcdir directory to search, maybe including subdirectories
# pattern a compiled regex pattern, for filename selection
# recursive is True if directories are to be scanned recursively,
# otherwise only the named directory is scanned.
#
# Returns a list of pairs of the form (directory,filename)
#
def CollectFiles(srcdir, pattern, recursive=True):
"""
Return a list of (dir,name) pairs for matching files in a directory tree.
"""
global collection
collection = []
ScanFilesEx(srcdir, pattern, Collect, recursive)
return collection
def Collect(fdir,fnam):
global collection
collection.append( (fdir,fnam) )
# Helper functions to read the contents of a file into a string
def joinDirName(fdir,fnam):
"""
Return a normalized path name obtained by combining a named directory
with a file name. The first argument is presumed to name a directory,
even when its trailing directory indicator is omitted.
"""
return normpath(join(fdir,fnam))
def readDirNameFile(fdir,fnam):
"""
Read a file from a specified directory, and return its content as a string.
"""
return readFile(joinDirName(fdir,fnam))
def readFile(nam):
"""
Read a file and return its content as a string.
"""
f = open(nam,"r")
s = f.read()
f.close()
return s
# Test case
if __name__ == "__main__":
import re
pattern = re.compile( r'^.+\.py$' )
c = CollectFiles(".", pattern)
for (d,f) in c:
print d+"\\"+f
# print "*******************************************"
# def ProcFile(path,name):
# print "path: ", path, ", name: ", name
# pattern=re.compile(r'.*')
# ScanFilesEx(".",pattern,ProcFile)
# $Log: ScanFiles.py,v $
| {
"repo_name": "wf4ever/ro-manager",
"path": "src/MiscUtils/ScanFiles.py",
"copies": "1",
"size": "4177",
"license": "mit",
"hash": -479690974244343100,
"line_mean": 32.416,
"line_max": 79,
"alpha_frac": 0.6698587503,
"autogenerated": false,
"ratio": 3.8928238583411,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0068672887841086635,
"num_lines": 125
} |
""" $Id: ScarabConnection.py,v 1.3 2000/03/13 22:32:35 kmacleod Exp $
Scarab Connection class
The Scarab Connection class provides the housekeeping for setting up
connections, setting parameters, and creating the initial root proxy
for a server.
"""
from urllib import splittype, splithost, splitport
import types
import sys
from traceback import format_exception
import string
from string import find
ScarabConnectionError = "ScarabConnection.ScarabConnectionError"
ScarabConnectionRemoteError = "ScarabConnection.ScarabConnectionRemoteError"
class ScarabConnection:
def __init__(self, options):
self.globals = {}
self.is_valid = 1
self.debug = 0
if options != None:
if type(options) == types.InstanceType:
options = options.__dict__
for key in options.keys():
setattr(self, key, options[key])
def root_proxy(self):
return ScarabProxy(self, 'root')
def call(self, object, method, args):
"""Forwards an object method and arguments to a remote
server to be executed. Exceptions in the remote
server are propagated to the local caller."""
if self.protocol == "soap":
if len(args) != 1:
raise ScarabConnectionError, \
"More than one argument to a SOAP method"
# FIXME sanity checking on SOAP top-level
self.pickler.encode_call(method, args[0])
else:
self.pickler.dump({ 'object' : object,
'method' : method,
'args' : args })
try:
self.pickler.flush()
except AttributeError:
self.stream.flush()
result = self.unpickler.load()
if self.protocol == "soap":
# FIXME check for invalid data
method = result['SOAP:Envelope']['SOAP:Body'].keys()[0]
if method == "SOAP:Fault":
result = { 'error' : result['SOAP:Envelope']['SOAP:Body']['SOAP:Fault']['message'] }
else:
result = { 'result' : result['SOAP:Envelope']['SOAP:Body'][method] }
t = type(result)
if t != types.DictType:
raise ScarabConnectionError, \
"expected dictionary result, got %s" % `t.__name__`
if result.has_key('error'):
raise ScarabConnectionRemoteError, result['error']
return result['result'][0]
def accept(self):
self.log_info("checking connection protocol", 2)
if not hasattr(self, 'unpickler'):
stream = ScarabBufferedFile(self.stream)
# allow exceptions to bubble up
# FIXME should mark connection as invalid
# FIXME could block on stream having less than 100 bytes
head = stream.head(4)
if head[0:4] == ("\x89" + "CBF"):
import LDOBinary
self.unpickler = LDOBinary.LDOBinaryUnmarshaler(stream)
self.pickler = LDOBinary.LDOBinaryMarshaler(stream)
self.protocol = 'ldobinary'
elif head[0:2] == '<?':
head = stream.head(100)
if string.find(head, 'urn:schemas-xmlsoap-org:soap.v1') != -1:
import SOAP
self.unpickler = SOAP.SOAPUnmarshaler(stream)
self.pickler = SOAP.SOAPMarshaler(stream)
self.protocol = 'soap'
else:
import LDOXML
self.unpickler = LDOXML.LDOXMLUnmarshaler(stream)
self.pickler = LDOXML.LDOXMLMarshaler(stream)
self.protocol = "ldoxml"
elif head[0:1] != ' ' and head[0:1] != "\t" and head[0:1] != '<':
import cPickle
self.unpickler = cPickle.Unpickler(stream)
self.pickler = cPickle.Pickler(stream)
self.protocol = "pickle"
else:
self.log_info("accept: unrecognized serialization", 2)
"""FIXME LDO-XML or other XML format"""
raise ScarabConnectionError
self.log_info("accepted " + self.protocol + " connection")
error = None
try:
request = self.unpickler.load()
if self.protocol == "soap":
# FIXME check for invalid data
method = request['SOAP:Envelope']['SOAP:Body'].keys()[0]
request = { 'object' : 'root',
'method' : method,
'args' : [ request['SOAP:Envelope']['SOAP:Body'][method] ] }
if self.debug >= 2:
self.log_info("request: " + str(request), 3)
except EOFError:
self.log_info("EOF on connection", 2)
self.is_valid = 0
# FIXME close pickler (which'll close stream?)
# FIXME notify conn manager
return
except:
exc_type, value, traceback = sys.exc_info()
# since we load picklers dynamically, we need to check their
# errors in a dynamic way
if str(exc_type) == "cPickle.UnpicklingError":
self.log_info("EOF on connection", 2)
self.is_valid = 0
# FIXME close pickler (which'll close stream?)
# FIXME notify conn manager
return
error = "error unmarshaling, closing connection: " \
+ string.join(format_exception(exc_type, value, traceback))
self.is_valid = 0
# we'll still try to send a failure response, ya never know
else:
t = type(request)
if t != types.DictType:
error = "expected dictionary for request, got %s" % `t.__name__`
else:
if (not(request.has_key('object')
and request.has_key('method')
and request.has_key('args'))):
error = "missing object id, method name, or args in request"
else:
object = request['object']
method_name = request['method']
if not self.globals.has_key(object):
error = "no such object registered: %s" % `object`
else:
try:
method = getattr(self.globals[object], method_name)
except:
error = "no such method %s for object %s" \
% (method_name, `object`,)
if error != None:
self.log_info(error)
result = { 'error' : error }
else:
call_str = object + "." + method_name
self.log_info("calling `" + call_str + "'")
try:
result = apply(method, tuple(request['args']))
except:
exc_type, value, traceback = sys.exc_info()
exc_str = string.join(format_exception(exc_type, value, traceback))
self.log_info("exception raised in `" + call_str + "': " + exc_str)
result = { 'error' : exc_str }
else:
self.log_info("`" + call_str + "' returned successfully", 2)
result = { 'result' : [ result ] }
try:
if self.debug >= 2:
self.log_info("response: " + str(result), 2)
if self.protocol == "soap":
if result.has_key('error'):
self.pickler.encode_fault(100, result['error'], 1)
else:
self.pickler.encode_response(method_name,
result['result'][0])
else:
self.pickler.dump(result)
try:
self.pickler.flush()
except AttributeError:
self.stream.flush()
except:
exc_type, value, traceback = sys.exc_info()
self.log_info("error sending response"
+ string.join(format_exception(exc_type,
value,
traceback)))
if self.is_valid:
"""FIXME ignore, log, or reraise?"""
# else: ignore
# FIXME if not self.is_valid: pickler.close
def process_string(self, message):
import StringIO
self.stream = StringIO.StringIO(message)
self.accept()
delattr(self.stream)
delattr(self.pickler)
delattr(self.unpickler)
def run_loop(self):
# FIXME this is socket specific, see FIXMEs in Scarab.py
while self.is_valid:
self.log_info("awaiting connection", 2)
socket, addr = self.socket.accept()
self.caller = str(addr)
stream = socket.makefile('r+')
server = ScarabConnection(self)
if self.debug >= 4:
server.stream = ScarabDebugFile(stream, self)
else:
server.stream = stream
while server.is_valid:
server.accept()
def log_info(self, message, level = 1):
if self.debug >= level:
if hasattr(self, 'caller'):
print self.caller + ": " + message
else:
print message
def connect_to(url, options = {}):
conn = ScarabConnection(options)
# FIXME this should take the lower-level protocol and produce
# a connection that can do either multiple or one-shot
# connections. For right now, we assume it's a TCP
# connection, hence a socket stream
type, path = splittype(url)
host, dummy_path = splithost(path)
host, port = splitport(host)
if port == None:
raise "no port defined and no defaults yet"
port = int(port)
from socket import *
conn.socket = socket(AF_INET, SOCK_STREAM)
conn.socket.connect(host, port)
conn.stream = conn.socket.makefile('r+')
# FIXME we assume one pickler instance, too, for that stream
conn.pickler = conn.pickler_class(conn.stream)
conn.unpickler = conn.unpickler_class(conn.stream)
return conn
def connect_to_root_at(url, options):
conn = server(url, options)
return conn.root
def server_on(url = '', options = {}):
conn = ScarabConnection(options)
# url defaults to '', but may be passed as None by user
if url == None:
url = ''
conn.lower = url
# FIXME see FIXME in Scarab.server()
host, dummy_path = splithost(url)
if host == None:
host = ''
port = None
else:
host, port = splitport(host)
if port == None:
port = 0
from socket import *
conn.socket = socket(AF_INET, SOCK_STREAM)
conn.socket.bind(host, port)
conn.socket.listen(1)
ip_addr, ip_port = conn.socket.getsockname()
if ip_addr == '0.0.0.0':
ip_host, dummy, dummy = gethostbyaddr(gethostname())
else:
ip_host, aliases, ip_addrs = gethostbyaddr(ip_addr)
conn.url = 'ANY:tcp://' + ip_host + ':' + str(ip_port) + '/'
conn.log_info("server opened on " + conn.url)
return conn
class ScarabBufferedFile:
def __init__(self, stream):
self.stream = stream
self.str_read = stream.read
self.str_readline = stream.readline
self.write = stream.write
self.flush = stream.flush
self._head = ""
self._head_bytes = 0
def head(self, num_bytes):
if self._head_bytes > 0:
num_bytes = num_bytes - self._head_bytes
if num_bytes <= 0:
return self._head
bytes_to_go = num_bytes
bytes = ''
while bytes_to_go > 0:
partial_bytes = self.str_read(bytes_to_go)
bytes_to_go = bytes_to_go - len(partial_bytes)
bytes = bytes + partial_bytes
self._head = self._head + bytes
self._head_bytes = self._head_bytes + num_bytes
return bytes
def read(self, size = -1):
if self._head_bytes > 0:
if size < 0:
# Return rest of file
bytes = self._head
bytes = bytes + self.str_read()
self._head = ''
self._head_bytes = 0
elif size <= self._head_bytes:
# read part of self._head_bytes
bytes = self._head[0:size]
self._head = self._head[size:]
self._head_bytes = self._head_bytes - size
else:
# read all of self._head_bytes and then some
bytes = self._head
size = size - self._head_bytes
bytes = bytes + self.str_read(size)
self._head = ''
self._head_bytes = 0
else:
# we have no head bytes, read completely from file
return self.str_read(size)
return bytes
def readline(self, size = -1):
if self._head_bytes > 0:
if size < 0:
nl_index = find(self._head, "\n") + 1
if nl_index == 0:
# Return head + a real readline
bytes = self._head + self.str_readline()
self._head = ''
self._head_bytes = 0
else:
# return only portion of head
bytes = self._head[0:nl_index]
self._head = self._head[nl_index:]
self._head_bytes = self._head_bytes - nl_index
elif size <= self._head_bytes:
nl_index = find(self._head, "\n", 0, size) + 1
if nl_index == 0:
# Return only a `size' portion of head
bytes = self._head[0:size]
self._head = self._head[size:]
self._head_bytes = self._head_bytes - size
else:
# return only an `nl_index' bytes of head
bytes = self._head[0:nl_index]
self._head = self._head[nl_index:]
self._head_bytes = self._head_bytes - nl_index
else:
nl_index = find(self._head, "\n") + 1
if nl_index == 0:
# Return all of head and then some
bytes = self._head + self.str_readline(size
- self._head_bytes)
self._head = ''
self._head_bytes = 0
else:
# return only an `nl_index' bytes of head
bytes = self._head[0:nl_index]
self._head = self._head[nl_index:]
self._head_bytes = self._head_bytes - nl_index
return bytes
else:
# we have no head bytes, read completely from file
if size == -1:
# work-around for a bug in StreamIO, not accepting -1
# as the same as no argument
return self.str_readline()
else:
return self.str_readline(size)
class ScarabDebugFile:
def __init__(self, stream, log):
self.stream = stream
self.str_read = stream.read
self.str_readline = stream.readline
self.str_write = stream.write
self.str_flush = stream.flush
self.log_info = log.log_info
self.is_binary = 0
def read(self, size = -1):
if size == -1:
bytes = selft.str_read()
else:
bytes = self.str_read(size)
self._log('read: ', bytes)
return (bytes)
def readline(self, size = -1):
if size == -1:
bytes = self.str_readline()
else:
bytes = self.str_readline(size)
self._log('readline: ', bytes)
return (bytes)
def write(self, bytes):
self._log('write: ', bytes)
self.str_write(bytes)
def flush(self):
self.log_info("flush(str)")
self.str_flush()
def _log(self, mode, strng):
ii = 0
while ii < len(strng):
char = strng[ii]
if ((char != "\n" and char != "\f")
and (ord(char) < 32 or ord(char) > 127)):
self.is_binary = 1
break
ii = ii + 1
if self.is_binary:
ii = 0
while ii < len(strng):
jj = 0
line_str = ""
while jj < 8 and ii < len(strng):
line_str = line_str + hex(ord(strng[ii])) + " "
jj = jj + 1
ii = ii + 1
self.log_info(mode + line_str)
else:
self.log_info(mode + strng)
| {
"repo_name": "mworks/mworks",
"path": "supporting_libs/scarab/Scarab-0.1.00d19/python/ScarabConnection.py",
"copies": "1",
"size": "16374",
"license": "mit",
"hash": -7688913665942233000,
"line_mean": 34.5184381779,
"line_max": 100,
"alpha_frac": 0.5171002809,
"autogenerated": false,
"ratio": 4.062019350037212,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5079119630937212,
"avg_score": null,
"num_lines": null
} |
""" $Id: ScarabProxy.py,v 1.1 2000/03/04 19:38:38 kmacleod Exp $
ScarabProxy implements surrogates for remote objects
"""
class ScarabProxy:
def __init__(self, connection, object_name):
self._connection_ = connection
self._object_ = object_name
def __repr__(self):
# Prohibit call to __getattr__
return '<Agent instance at %s>' % hex(id(self))[2:]
__str__ = __repr__
def __getinitargs__(self):
return ()
def __getstate__(self):
"""Create a representation of the Proxy object that
can be revived later, in another process, or on
another client"""
return None
def __setstate__(self, state):
"""Revive a Proxy that was created earlier, in another
process, or on another client"""
def __getattr__(self, attr):
"""Normally, this might get an attribute from the
remote object. In this case though, we only support
calls to the remote object, so we'll cache a Method
object in the __dict__ for future use, and also return
the Method object for this time."""
forwarder = ScarabProxy.Method(self._connection_, self._object_, attr)
self.__dict__[attr] = forwarder
return forwarder
def __setattr__(self, attr, value):
"""Set an attribute in the remote object."""
class Method:
"""Represents a method on the remote object.
Overloads the function call method to look like a function."""
def __init__(self, connection, object, method):
"""
connection the connection instance the
method will use
object the identifier of the remote object
method the method to call on the remote object"""
self._connection_ = connection
self._object_ = object
self._method_ = method
def __call__(self, *args):
"""This method enables calling instances as if
they were functions. It will perform a remote procedure
call on the server object the agent is connected to."""
result = self._connection_.call(self._object_, self._method_, args)
return result
| {
"repo_name": "mworks/mworks",
"path": "supporting_libs/scarab/Scarab-0.1.00d19/python/ScarabProxy.py",
"copies": "1",
"size": "1937",
"license": "mit",
"hash": 6837486774085125000,
"line_mean": 27.4852941176,
"line_max": 72,
"alpha_frac": 0.6861125452,
"autogenerated": false,
"ratio": 3.587037037037037,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9655221498379924,
"avg_score": 0.023585616771422607,
"num_lines": 68
} |
""" $Id: Scarab.py,v 1.3 2000/03/13 22:32:35 kmacleod Exp $
Scarab.py implements the basic user API for using Scarab as either a
client or a server. It can create connections and proxies for remote
servers and run a server loop for implementing a server.
Scarab URLs are two-part URLs that contain an upper-level protocol
scheme (ldoxml, ldo (ldobinary), xmlrpc, soap, wddx, pickle) and a
lower-level protocol scheme (http, mailto, tcp [default]) like this:
xmlrpc:http://casbah.org/listener
connect_to() accepts an Scarab URL and returns a connection object.
connect_to_root_at() accepts an Scarab URL and returns the root proxy
object of the server.
server_on() accepts an Scarab URL or an ordinary URL. If given an
Scarab URL then it only supports the given upper-level protocol. If
given an ordinary URL then it attempts to determine the upper-level
protocol on each connection.
AVAILABLE PROTOCOLS
Serialization:
soap: Simple Object Access Protocol
ldoxml: The reference protocol for Scarab
ldo, ldobinary: A binary implementation of the reference protocol
pickle: Python's native pickle format, can marshal all types of
Python data but can only be used when Python is on both
ends of the link
Transport:
http: via CGI
tcp: straight-sockets
`tcp' is also the default protocol and can be left off of Scarab URLs.
The following two are synonomous:
ldo:tcp://anysite.com:4321/
ldo://anysite.com:4321/
There are no default ports for tcp connections, a port must always be
specified.
"""
import urlparse
from urllib import splittype, splithost, splitport
import ScarabConnection
uppers = ['pickle', 'ldo', 'ldobinary', 'ldoxml', 'soap']
def connect_to(scarab_url, options = {}):
""" Create a connection object for the server at
'scarab_url' """
options['upper'], lower = splittype(scarab_url)
if options['upper'] == 'ANY':
options['upper'] = None
options['pickler_class'], options['unpickler_class'], options['protocol'] = _picklers(options['upper'])
return ScarabConnection.connect_to(lower, options)
def server_on(scarab_url = None, options = {}):
""" Create a server connection for 'scarab_url' """
if scarab_url == None:
options['upper'] = None
lower = None
else:
options['upper'], lower = splittype(scarab_url)
options['pickler_class'], options['unpickler_class'], options['protocol'] = _picklers(options['upper'])
return ScarabConnection.server_on(lower, options)
def _picklers(protocol = None):
""" _picklers accepts a protocol scheme and returns a picker and
unpickler Class objects for that scheme. If the scheme is not
given or isn't an upper level protocol scheme, ldobinary is used
by default. """
if protocol == None:
protocol = 'ldobinary'
if protocol not in uppers:
protocol = 'ldobinary'
if protocol == 'pickle':
import cPickle
pickler = cPickle.Pickler
unpickler = cPickle.Unpickler
elif protocol == 'ldoxml':
import LDOXML
pickler = LDOXML.LDOXMLMarshaler
unpickler = LDOXML.LDOXMLUnmarshaler
elif protocol == 'soap':
import SOAP
pickler = SOAP.SOAPMarshaler
unpickler = SOAP.SOAPUnmarshaler
elif (protocol == 'ldobinary'
or protocol == 'ldo'):
import LDOBinary
pickler = LDOBinary.LDOBinaryMarshaler
unpickler = LDOBinary.LDOBinaryUnmarshaler
return pickler, unpickler, protocol
| {
"repo_name": "mworks/mworks",
"path": "supporting_libs/scarab/Scarab-0.1.00d19/python/Scarab.py",
"copies": "1",
"size": "3477",
"license": "mit",
"hash": 5280108905469691000,
"line_mean": 30.0446428571,
"line_max": 107,
"alpha_frac": 0.7049180328,
"autogenerated": false,
"ratio": 3.6256517205422316,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.978553851078143,
"avg_score": 0.00900624851216034,
"num_lines": 112
} |
# $Id: sccp.py 23 2006-11-08 15:45:33Z dugsong $
# -*- coding: utf-8 -*-
"""Cisco Skinny Client Control Protocol."""
from __future__ import absolute_import
from . import dpkt
KEYPAD_BUTTON = 0x00000003
OFF_HOOK = 0x00000006
ON_HOOK = 0x00000007
OPEN_RECEIVE_CHANNEL_ACK = 0x00000022
START_TONE = 0x00000082
STOP_TONE = 0x00000083
SET_LAMP = 0x00000086
SET_SPEAKER_MODE = 0x00000088
START_MEDIA_TRANSMIT = 0x0000008A
STOP_MEDIA_TRANSMIT = 0x0000008B
CALL_INFO = 0x0000008F
DEFINE_TIME_DATE = 0x00000094
DISPLAY_TEXT = 0x00000099
OPEN_RECEIVE_CHANNEL = 0x00000105
CLOSE_RECEIVE_CHANNEL = 0x00000106
SELECT_SOFTKEYS = 0x00000110
CALL_STATE = 0x00000111
DISPLAY_PROMPT_STATUS = 0x00000112
CLEAR_PROMPT_STATUS = 0x00000113
ACTIVATE_CALL_PLANE = 0x00000116
class ActivateCallPlane(dpkt.Packet):
__byte_order__ = '<'
__hdr__ = (
('line_instance', 'I', 0),
)
class CallInfo(dpkt.Packet):
__byte_order__ = '<'
__hdr__ = (
('calling_party_name', '40s', ''),
('calling_party', '24s', ''),
('called_party_name', '40s', ''),
('called_party', '24s', ''),
('line_instance', 'I', 0),
('call_id', 'I', 0),
('call_type', 'I', 0),
('orig_called_party_name', '40s', ''),
('orig_called_party', '24s', '')
)
class CallState(dpkt.Packet):
__byte_order__ = '<'
__hdr__ = (
('call_state', 'I', 12), # 12: Proceed, 15: Connected
('line_instance', 'I', 1),
('call_id', 'I', 0)
)
class ClearPromptStatus(dpkt.Packet):
__byte_order__ = '<'
__hdr__ = (
('line_instance', 'I', 1),
('call_id', 'I', 0)
)
class CloseReceiveChannel(dpkt.Packet):
__byte_order__ = '<'
__hdr__ = (
('conference_id', 'I', 0),
('passthruparty_id', 'I', 0),
)
class DisplayPromptStatus(dpkt.Packet):
__byte_order__ = '<'
__hdr__ = (
('msg_timeout', 'I', 0),
('display_msg', '32s', ''),
('line_instance', 'I', 1),
('call_id', 'I', 0)
)
class DisplayText(dpkt.Packet):
__byte_order__ = '<'
__hdr__ = (
('display_msg', '36s', ''),
)
class KeypadButton(dpkt.Packet):
__byte_order__ = '<'
__hdr__ = (
('button', 'I', 0),
)
class OpenReceiveChannel(dpkt.Packet):
__byte_order__ = '<'
__hdr__ = (
('conference_id', 'I', 0),
('passthruparty_id', 'I', 0),
('ms_packet', 'I', 0),
('payload_capability', 'I', 4), # 4: G.711 u-law 64k
('echo_cancel_type', 'I', 4),
('g723_bitrate', 'I', 0),
)
class OpenReceiveChannelAck(dpkt.Packet):
__byte_order__ = '<'
__hdr__ = (
('channel_status', 'I', 0),
('ip', '4s', ''),
('port', 'I', 0),
('passthruparty_id', 'I', 0),
)
class SelectStartKeys(dpkt.Packet):
__byte_order__ = '<'
__hdr__ = (
('line_id', 'I', 1),
('call_id', 'I', 0),
('softkey_set', 'I', 8),
('softkey_map', 'I', 0xffffffff)
)
class SetLamp(dpkt.Packet):
__byte_order__ = '<'
__hdr__ = (
('stimulus', 'I', 9), # 9: Line
('stimulus_instance', 'I', 1),
('lamp_mode', 'I', 1),
)
class SetSpeakerMode(dpkt.Packet):
__byte_order__ = '<'
__hdr__ = (
('speaker', 'I', 2), # 2: SpeakerOff
)
class StartMediaTransmission(dpkt.Packet):
__byte_order__ = '<'
__hdr__ = (
('conference_id', 'I', 0),
('passthruparty_id', 'I', 0),
('remote_ip', '4s', ''),
('remote_port', 'I', 0),
('ms_packet', 'I', 0),
('payload_capability', 'I', 4), # 4: G.711 u-law 64k
('precedence', 'I', 0),
('silence_suppression', 'I', 0),
('max_frames_per_pkt', 'I', 1),
('g723_bitrate', 'I', 0),
)
class StartTone(dpkt.Packet):
__byte_order__ = '<'
__hdr__ = (
('tone', 'I', 0x24), # 0x24: AlertingTone
)
class StopMediaTransmission(dpkt.Packet):
__byte_order__ = '<'
__hdr__ = (
('conference_id', 'I', 0),
('passthruparty_id', 'I', 0),
)
class SCCP(dpkt.Packet):
"""Cisco Skinny Client Control Protocol.
TODO: Longer class information....
Attributes:
__hdr__: Header fields of SCCP.
TODO.
"""
__byte_order__ = '<'
__hdr__ = (
('len', 'I', 0),
('rsvd', 'I', 0),
('msgid', 'I', 0),
('msg', '0s', ''),
)
_msgsw = {
KEYPAD_BUTTON: KeypadButton,
OPEN_RECEIVE_CHANNEL_ACK: OpenReceiveChannelAck,
START_TONE: StartTone,
SET_LAMP: SetLamp,
START_MEDIA_TRANSMIT: StartMediaTransmission,
STOP_MEDIA_TRANSMIT: StopMediaTransmission,
CALL_INFO: CallInfo,
DISPLAY_TEXT: DisplayText,
OPEN_RECEIVE_CHANNEL: OpenReceiveChannel,
CLOSE_RECEIVE_CHANNEL: CloseReceiveChannel,
CALL_STATE: CallState,
DISPLAY_PROMPT_STATUS: DisplayPromptStatus,
CLEAR_PROMPT_STATUS: ClearPromptStatus,
ACTIVATE_CALL_PLANE: ActivateCallPlane,
}
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
n = self.len - 4
if n > len(self.data):
raise dpkt.NeedData('not enough data')
self.msg, self.data = self.data[:n], self.data[n:]
try:
p = self._msgsw[self.msgid](self.msg)
setattr(self, p.__class__.__name__.lower(), p)
except (KeyError, dpkt.UnpackError):
pass
| {
"repo_name": "smutt/dpkt",
"path": "dpkt/sccp.py",
"copies": "1",
"size": "5492",
"license": "bsd-3-clause",
"hash": 542809333792048960,
"line_mean": 23.5178571429,
"line_max": 62,
"alpha_frac": 0.5145666424,
"autogenerated": false,
"ratio": 3.020902090209021,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9031004446894735,
"avg_score": 0.0008928571428571429,
"num_lines": 224
} |
# $Id: sccp.py 23 2006-11-08 15:45:33Z dugsong $
# -*- coding: utf-8 -*-
"""Cisco Skinny Client Control Protocol."""
import dpkt
KEYPAD_BUTTON = 0x00000003
OFF_HOOK = 0x00000006
ON_HOOK = 0x00000007
OPEN_RECEIVE_CHANNEL_ACK = 0x00000022
START_TONE = 0x00000082
STOP_TONE = 0x00000083
SET_LAMP = 0x00000086
SET_SPEAKER_MODE = 0x00000088
START_MEDIA_TRANSMIT = 0x0000008A
STOP_MEDIA_TRANSMIT = 0x0000008B
CALL_INFO = 0x0000008F
DEFINE_TIME_DATE = 0x00000094
DISPLAY_TEXT = 0x00000099
OPEN_RECEIVE_CHANNEL = 0x00000105
CLOSE_RECEIVE_CHANNEL = 0x00000106
SELECT_SOFTKEYS = 0x00000110
CALL_STATE = 0x00000111
DISPLAY_PROMPT_STATUS = 0x00000112
CLEAR_PROMPT_STATUS = 0x00000113
ACTIVATE_CALL_PLANE = 0x00000116
class ActivateCallPlane(dpkt.Packet):
__byte_order__ = '<'
__hdr__ = (
('line_instance', 'I', 0),
)
class CallInfo(dpkt.Packet):
__byte_order__ = '<'
__hdr__ = (
('calling_party_name', '40s', ''),
('calling_party', '24s', ''),
('called_party_name', '40s', ''),
('called_party', '24s', ''),
('line_instance', 'I', 0),
('call_id', 'I', 0),
('call_type', 'I', 0),
('orig_called_party_name', '40s', ''),
('orig_called_party', '24s', '')
)
class CallState(dpkt.Packet):
__byte_order__ = '<'
__hdr__ = (
('call_state', 'I', 12), # 12: Proceed, 15: Connected
('line_instance', 'I', 1),
('call_id', 'I', 0)
)
class ClearPromptStatus(dpkt.Packet):
__byte_order__ = '<'
__hdr__ = (
('line_instance', 'I', 1),
('call_id', 'I', 0)
)
class CloseReceiveChannel(dpkt.Packet):
__byte_order__ = '<'
__hdr__ = (
('conference_id', 'I', 0),
('passthruparty_id', 'I', 0),
)
class DisplayPromptStatus(dpkt.Packet):
__byte_order__ = '<'
__hdr__ = (
('msg_timeout', 'I', 0),
('display_msg', '32s', ''),
('line_instance', 'I', 1),
('call_id', 'I', 0)
)
class DisplayText(dpkt.Packet):
__byte_order__ = '<'
__hdr__ = (
('display_msg', '36s', ''),
)
class KeypadButton(dpkt.Packet):
__byte_order__ = '<'
__hdr__ = (
('button', 'I', 0),
)
class OpenReceiveChannel(dpkt.Packet):
__byte_order__ = '<'
__hdr__ = (
('conference_id', 'I', 0),
('passthruparty_id', 'I', 0),
('ms_packet', 'I', 0),
('payload_capability', 'I', 4), # 4: G.711 u-law 64k
('echo_cancel_type', 'I', 4),
('g723_bitrate', 'I', 0),
)
class OpenReceiveChannelAck(dpkt.Packet):
__byte_order__ = '<'
__hdr__ = (
('channel_status', 'I', 0),
('ip', '4s', ''),
('port', 'I', 0),
('passthruparty_id', 'I', 0),
)
class SelectStartKeys(dpkt.Packet):
__byte_order__ = '<'
__hdr__ = (
('line_id', 'I', 1),
('call_id', 'I', 0),
('softkey_set', 'I', 8),
('softkey_map', 'I', 0xffffffffL)
)
class SetLamp(dpkt.Packet):
__byte_order__ = '<'
__hdr__ = (
('stimulus', 'I', 9), # 9: Line
('stimulus_instance', 'I', 1),
('lamp_mode', 'I', 1),
)
class SetSpeakerMode(dpkt.Packet):
__byte_order__ = '<'
__hdr__ = (
('speaker', 'I', 2), # 2: SpeakerOff
)
class StartMediaTransmission(dpkt.Packet):
__byte_order__ = '<'
__hdr__ = (
('conference_id', 'I', 0),
('passthruparty_id', 'I', 0),
('remote_ip', '4s', ''),
('remote_port', 'I', 0),
('ms_packet', 'I', 0),
('payload_capability', 'I', 4), # 4: G.711 u-law 64k
('precedence', 'I', 0),
('silence_suppression', 'I', 0),
('max_frames_per_pkt', 'I', 1),
('g723_bitrate', 'I', 0),
)
class StartTone(dpkt.Packet):
__byte_order__ = '<'
__hdr__ = (
('tone', 'I', 0x24), # 0x24: AlertingTone
)
class StopMediaTransmission(dpkt.Packet):
__byte_order__ = '<'
__hdr__ = (
('conference_id', 'I', 0),
('passthruparty_id', 'I', 0),
)
class SCCP(dpkt.Packet):
__byte_order__ = '<'
__hdr__ = (
('len', 'I', 0),
('rsvd', 'I', 0),
('msgid', 'I', 0),
('msg', '0s', ''),
)
_msgsw = {
KEYPAD_BUTTON: KeypadButton,
OPEN_RECEIVE_CHANNEL_ACK: OpenReceiveChannelAck,
START_TONE: StartTone,
SET_LAMP: SetLamp,
START_MEDIA_TRANSMIT: StartMediaTransmission,
STOP_MEDIA_TRANSMIT: StopMediaTransmission,
CALL_INFO: CallInfo,
DISPLAY_TEXT: DisplayText,
OPEN_RECEIVE_CHANNEL: OpenReceiveChannel,
CLOSE_RECEIVE_CHANNEL: CloseReceiveChannel,
CALL_STATE: CallState,
DISPLAY_PROMPT_STATUS: DisplayPromptStatus,
CLEAR_PROMPT_STATUS: ClearPromptStatus,
ACTIVATE_CALL_PLANE: ActivateCallPlane,
}
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
n = self.len - 4
if n > len(self.data):
raise dpkt.NeedData('not enough data')
self.msg, self.data = self.data[:n], self.data[n:]
try:
p = self._msgsw[self.msgid](self.msg)
setattr(self, p.__class__.__name__.lower(), p)
except (KeyError, dpkt.UnpackError):
pass
| {
"repo_name": "yangbh/dpkt",
"path": "dpkt/sccp.py",
"copies": "6",
"size": "5278",
"license": "bsd-3-clause",
"hash": -4282370109017054000,
"line_mean": 23.6635514019,
"line_max": 62,
"alpha_frac": 0.5115574081,
"autogenerated": false,
"ratio": 2.9819209039548022,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6493478312054802,
"avg_score": null,
"num_lines": null
} |
# $Id: sccp.py 23 2006-11-08 15:45:33Z dugsong $
"""Cisco Skinny Client Control Protocol."""
import dpkt
KEYPAD_BUTTON = 0x00000003
OFF_HOOK = 0x00000006
ON_HOOK = 0x00000007
OPEN_RECEIVE_CHANNEL_ACK= 0x00000022
START_TONE = 0x00000082
STOP_TONE = 0x00000083
SET_LAMP = 0x00000086
SET_SPEAKER_MODE = 0x00000088
START_MEDIA_TRANSMIT = 0x0000008A
STOP_MEDIA_TRANSMIT = 0x0000008B
CALL_INFO = 0x0000008F
DEFINE_TIME_DATE = 0x00000094
DISPLAY_TEXT = 0x00000099
OPEN_RECEIVE_CHANNEL = 0x00000105
CLOSE_RECEIVE_CHANNEL = 0x00000106
SELECT_SOFTKEYS = 0x00000110
CALL_STATE = 0x00000111
DISPLAY_PROMPT_STATUS = 0x00000112
CLEAR_PROMPT_STATUS = 0x00000113
ACTIVATE_CALL_PLANE = 0x00000116
class ActivateCallPlane(dpkt.Packet):
__byte_order__ = '<'
__hdr__ = (
('line_instance', 'I', 0),
)
class CallInfo(dpkt.Packet):
__byte_order__ = '<'
__hdr__ = (
('calling_party_name', '40s', ''),
('calling_party', '24s', ''),
('called_party_name', '40s', ''),
('called_party', '24s', ''),
('line_instance', 'I', 0),
('call_id', 'I', 0),
('call_type', 'I', 0),
('orig_called_party_name', '40s', ''),
('orig_called_party', '24s', '')
)
class CallState(dpkt.Packet):
__byte_order__ = '<'
__hdr__ = (
('call_state', 'I', 12), # 12: Proceed, 15: Connected
('line_instance', 'I', 1),
('call_id', 'I', 0)
)
class ClearPromptStatus(dpkt.Packet):
__byte_order__ = '<'
__hdr__ = (
('line_instance', 'I', 1),
('call_id', 'I', 0)
)
class CloseReceiveChannel(dpkt.Packet):
__byte_order__ = '<'
__hdr__ = (
('conference_id', 'I', 0),
('passthruparty_id', 'I', 0),
)
class DisplayPromptStatus(dpkt.Packet):
__byte_order__ = '<'
__hdr__ = (
('msg_timeout', 'I', 0),
('display_msg', '32s', ''),
('line_instance', 'I', 1),
('call_id', 'I', 0)
)
class DisplayText(dpkt.Packet):
__byte_order__ = '<'
__hdr__ = (
('display_msg', '36s', ''),
)
class KeypadButton(dpkt.Packet):
__byte_order__ = '<'
__hdr__ = (
('button', 'I', 0),
)
class OpenReceiveChannel(dpkt.Packet):
__byte_order__ = '<'
__hdr__ = (
('conference_id', 'I', 0),
('passthruparty_id', 'I', 0),
('ms_packet', 'I', 0),
('payload_capability', 'I', 4), # 4: G.711 u-law 64k
('echo_cancel_type', 'I', 4),
('g723_bitrate', 'I', 0),
)
class OpenReceiveChannelAck(dpkt.Packet):
__byte_order__ = '<'
__hdr__ = (
('channel_status', 'I', 0),
('ip', '4s', ''),
('port', 'I', 0),
('passthruparty_id', 'I', 0),
)
class SelectStartKeys(dpkt.Packet):
__byte_order__ = '<'
__hdr__ = (
('line_id', 'I', 1),
('call_id', 'I', 0),
('softkey_set', 'I', 8),
('softkey_map', 'I', 0xffffffffL)
)
class SetLamp(dpkt.Packet):
__byte_order__ = '<'
__hdr__ = (
('stimulus', 'I', 9), # 9: Line
('stimulus_instance', 'I', 1),
('lamp_mode', 'I', 1),
)
class SetSpeakerMode(dpkt.Packet):
__byte_order__ = '<'
__hdr__ = (
('speaker', 'I', 2), # 2: SpeakerOff
)
class StartMediaTransmission(dpkt.Packet):
__byte_order__ = '<'
__hdr__ = (
('conference_id', 'I', 0),
('passthruparty_id', 'I', 0),
('remote_ip', '4s', ''),
('remote_port', 'I', 0),
('ms_packet', 'I', 0),
('payload_capability', 'I', 4), # 4: G.711 u-law 64k
('precedence', 'I', 0),
('silence_suppression', 'I', 0),
('max_frames_per_pkt', 'I', 1),
('g723_bitrate', 'I', 0),
)
class StartTone(dpkt.Packet):
__byte_order__ = '<'
__hdr__ = (
('tone', 'I', 0x24), # 0x24: AlertingTone
)
class StopMediaTransmission(dpkt.Packet):
__byte_order__ = '<'
__hdr__ = (
('conference_id', 'I', 0),
('passthruparty_id', 'I', 0),
)
class SCCP(dpkt.Packet):
__byte_order__ = '<'
__hdr__ = (
('len', 'I', 0),
('rsvd', 'I', 0),
('msgid', 'I', 0),
('msg', '0s', ''),
)
_msgsw = {
KEYPAD_BUTTON:KeypadButton,
OPEN_RECEIVE_CHANNEL_ACK:OpenReceiveChannelAck,
START_TONE:StartTone,
SET_LAMP:SetLamp,
START_MEDIA_TRANSMIT:StartMediaTransmission,
STOP_MEDIA_TRANSMIT:StopMediaTransmission,
CALL_INFO:CallInfo,
DISPLAY_TEXT:DisplayText,
OPEN_RECEIVE_CHANNEL:OpenReceiveChannel,
CLOSE_RECEIVE_CHANNEL:CloseReceiveChannel,
CALL_STATE:CallState,
DISPLAY_PROMPT_STATUS:DisplayPromptStatus,
CLEAR_PROMPT_STATUS:ClearPromptStatus,
ACTIVATE_CALL_PLANE:ActivateCallPlane,
}
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
n = self.len - 4
if n > len(self.data):
raise dpkt.NeedData('not enough data')
self.msg, self.data = self.data[:n], self.data[n:]
try:
p = self._msgsw[self.msgid](self.msg)
setattr(self, p.__class__.__name__.lower(), p)
except (KeyError, dpkt.UnpackError):
pass
| {
"repo_name": "guke001/QMarkdowner",
"path": "dpkt/sccp.py",
"copies": "15",
"size": "5315",
"license": "mit",
"hash": -6013544520054758000,
"line_mean": 26.1173469388,
"line_max": 61,
"alpha_frac": 0.5061147695,
"autogenerated": false,
"ratio": 2.9792600896860986,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
"""A widget, which allows scrolling using buttons and a slider."""
from pygame import K_KP_PLUS, K_PLUS, K_RIGHT, K_DOWN, K_KP_MINUS, K_MINUS
from pygame import K_LEFT, K_UP, K_PAGEUP, K_PAGEDOWN, K_HOME, K_END, Rect
from Range import Range
from Constants import *
from StyleInformation import StyleInformation
import base
# Timer value for the button press delay.
_TIMER = 25
class ScrollBar (Range):
"""ScrollBar () -> ScrollBar
An abstract widget class, which is suitable for scrolling.
The ScrollBar widget works much the same like a Scale widget except
that it supports buttons for adjusting the value and that its
minimum value always is 0. It is suitable for widgets which need
scrolling ability and a scrolling logic.
Inheriting widgets have to implement the _get_value_from_coords()
and _get_coords_from_value() methods, which calculate the value of
the ScrollBar using a pair of coordinates and vice versa. Example
implementations can be found in the HScrollBar and VScrollBar widget
classes. They also need to implement the _get_button_coords()
method, which has to return a tuple of the both button coordinates
[(x, y, width, height)].
Default action (invoked by activate()):
Give the ScrollBar the input focus.
Mnemonic action (invoked by activate_mnemonic()):
None
Signals:
SIG_MOUSEDOWN - Invoked, when a mouse button is pressed on the
ScrollBar.
SIG_MOUSEUP - Invoked, when a mouse buttor is released on the
ScrollBar.
SIG_MOUSEMOVE - Invoked, when the mouse moves over the ScrollBar.
Attributes:
button_dec - Indicates, if the decrease button is pressed.
button_inc - Indicates, if the increase button is pressed.
"""
def __init__ (self):
Range.__init__ (self, 0, 1, 1)
# Signals.
self._signals[SIG_MOUSEDOWN] = []
self._signals[SIG_MOUSEMOVE] = []
self._signals[SIG_MOUSEUP] = []
self._signals[SIG_KEYDOWN] = None # Dummy for keyboard activation.
self._signals[SIG_TICK] = None # Dummy for automatic scrolling.
# Internal state handlers for the events. Those need to be known by
# the inheritors.
self._buttondec = False
self._buttoninc = False
self._timer = _TIMER
self._click = False
def activate (self):
"""S.activate () -> None
Activates the ScrollBar default action.
Activates the ScrollBar default action. This usually means giving
the ScrollBar the input focus.
"""
if not self.sensitive:
return
self.focus = True
def _get_button_coords (self, area):
"""S._get_button_coords (...) -> tuple
Gets a tuple with the coordinates of the in- and decrease buttons.
This method has to be implemented by inherited widgets.
"""
raise NotImplementedError
def _get_coords_from_value (self):
"""S._get_coords_from_value () -> float
Calculates the slider coordinates for the ScrollBar.
This method has to be implemented by inherited widgets.
"""
raise NotImplementedError
def _get_value_from_coords (self, area, coords):
"""S._get_value_from_coords (...) -> float
Calculates the slider coordinates for the ScrollBar.
This method has to be implemented by inherited widgets.
"""
raise NotImplementedError
def _get_slider_size (self):
"""S._get_slider_size (...) -> int
Calculates the size of the slider knob.
This method has to be implemented by inherited widgets.
"""
raise NotImplementedError
def _check_collision (self, pos, rect):
"""S._check_collirion (...) -> bool
Checks the collision of the given position with the passed rect.
"""
# Rect: (x, y, width, height), pos: (x, y).
return (pos[0] >= rect[0]) and (pos[0] <= (rect[2] + rect[0])) and \
(pos[1] >= rect[1]) and (pos[1] <= (rect[3] + rect[1]))
def set_minimum (self, minimum):
"""S.set_minimum (...) -> Exception
This method does not have any use.
"""
pass
def notify (self, event):
"""S.notify (...) -> None
Notifies the ScrollBar about an event.
"""
if not self.sensitive:
return
if event.signal in SIGNALS_MOUSE:
eventarea = self.rect_to_client ()
collision = eventarea.collidepoint (event.data.pos)
if event.signal == SIG_MOUSEDOWN and collision:
self.focus = True
# Act only on left clicks or scrollwheel events.
if event.data.button == 1:
self.state = STATE_ACTIVE
self.run_signal_handlers (SIG_MOUSEDOWN, event.data)
if event.data.button == 1:
buttons = self._get_button_coords (eventarea)
if self._check_collision (event.data.pos, buttons[0]):
self._buttondec = True
self._buttoninc = False
self._click = False
self.decrease ()
elif self._check_collision (event.data.pos, buttons[1]):
self._buttoninc = True
self._buttondec = False
self._click = False
self.increase ()
else:
self._click = True
self._buttondec = False
self._buttoninc = False
val = self._get_value_from_coords (eventarea,
event.data.pos)
if val != self.value:
self.value = val
# Mouse wheel.
elif event.data.button == 4:
self.decrease ()
elif event.data.button == 5:
self.increase ()
event.handled = True
elif event.signal == SIG_MOUSEMOVE:
dirty = False
if collision:
self.focus = True
if self.state == STATE_NORMAL:
self.state = STATE_ENTERED
self.run_signal_handlers (SIG_MOUSEMOVE, event.data)
buttons = self._get_button_coords (eventarea)
if not self._check_collision (event.data.pos, buttons[0]) \
and self._buttondec:
self._buttondec = False
dirty = True
if not self._check_collision (event.data.pos, buttons[1]) \
and self._buttoninc:
self._buttoninc = False
dirty = True
if self._click:
val = self._get_value_from_coords (eventarea,
event.data.pos)
if val != self.value:
self.value = val
dirty = False
self.dirty = dirty
event.handled = True
elif self.state == STATE_ENTERED:
self.state = STATE_NORMAL
elif event.signal == SIG_MOUSEUP:
if self._click or self._buttoninc or self._buttondec:
self._buttondec = False
self._buttoninc = False
self._click = False
if collision:
if event.data.button == 1:
if self.state == STATE_ACTIVE:
self.state = STATE_ENTERED
self.run_signal_handlers (SIG_MOUSEUP, event.data)
event.handled = True
else:
self.state = STATE_NORMAL
# Reset timer
self._timer = _TIMER
# The user holds the mouse clicked over one button.
elif (self._buttondec or self._buttoninc) and \
(event.signal == SIG_TICK):
# Wait half a second before starting to in/decrease.
if self._timer > 0:
self._timer -= 1
else:
if self._buttondec:
self.decrease ()
elif self._buttoninc:
self.increase ()
# Keyboard activation.
elif (event.signal == SIG_KEYDOWN) and self.focus:
if event.data.key in (K_KP_PLUS, K_PLUS, K_RIGHT, K_DOWN):
self.increase ()
event.handled = True
elif event.data.key in (K_KP_MINUS, K_MINUS, K_LEFT, K_UP):
self.decrease ()
event.handled = True
elif event.data.key == K_PAGEUP:
val = self.value - 10 * self.step
if val > self.minimum:
self.value = val
else:
self.value = self.minimum
event.handled = True
elif event.data.key == K_PAGEDOWN:
val = self.value + 10 * self.step
if val < self.maximum:
self.value = val
else:
self.value = self.maximum
event.handled = True
elif event.data.key == K_END:
self.value = self.maximum
event.handled = True
elif event.data.key == K_HOME:
self.value = self.minimum
event.handled = True
Range.notify (self, event)
button_dec = property (lambda self: self._buttondec,
doc = """Indicates, whether the decrease
button is pressed.""")
button_inc = property (lambda self: self._buttoninc,
doc = """Indicates, whether the increase
button is pressed.""")
class HScrollBar (ScrollBar):
"""HScrollBar (width, scroll) -> HScrollBar
A horizontal ScrollBar widget.
A ScrollBar widget with a horizontal orientation. By default, its
height is the sum of the button height (HSCROLLBAR_BUTTON_SIZE) and
the border drawn around it (2 * SCROLLBAR_BORDER) and has the passed
width. The scrolling area is the passed scroll value minus the width
of the ScrollBar.
Thus, if the area to scroll is 200 pixels wide and the ScrollBar is
about 100 pixels long, the ScrollBar its value range will go from 0
to 100 (maximum = scroll - width). If the ScrollBar is longer than
the area to scroll (scroll < width), then the value range will be 0.
Note: The minimum size of the scrollbar is at least twice its
size[1] parameter. This means, that it can display the both
scrolling buttons next to each other. This will override the passed
width value in the constructor, if necessary.
"""
def __init__ (self, width, scroll):
ScrollBar.__init__ (self)
# Minimum size for the two scrolling buttons next to each other
border = base.GlobalStyle.get_border_size \
(self.__class__, self.style,
StyleInformation.get ("SCROLLBAR_BORDER")) * 2
height = StyleInformation.get ("HSCROLLBAR_BUTTON_SIZE")[1] + border
if width < 2 * height:
width = 2 * height
self.lock ()
self.minsize = (width, height) # Default size.
self.maximum = scroll
self.unlock ()
def set_maximum (self, maximum):
"""H.set_maximum (...) -> None
Sets the maximum value to scroll.
The passed maximum value differs from maximum value of the
slider. The HScrollBar also subtracts its own height from the
scrolling maximum, so that the real maximum of its value range
can be expressed in the formula:
real_maximum = maximum - self.minsize[1]
That means, that if the HScrollBar is 100 pixels high and the
passed maximum value is 200, the scrolling range of the
HScrollBar will go from 0 to 100 (100 + size = 200).
Raises a ValueError, if the passed argument is smaller than
the first element of the ScrollBar its size.
"""
if maximum < self.minsize[0]:
raise ValueError ("maximum must be greater than or equal to %d"
% self.minsize[0])
ScrollBar.set_maximum (self, maximum - self.minsize[0])
self.dirty = True
def _get_button_coords (self, area):
"""H._get_button_coords (...) -> tuple
Gets a tuple with the coordinates of the in- and decrease buttons.
"""
border = base.GlobalStyle.get_border_size \
(self.__class__, self.style,
StyleInformation.get ("SCROLLBAR_BORDER"))
# Respect the set shadow for the ScrollBar.
button1 = (area.left + border, area.top + border,
area.height - 2 * border, area.height - 2 * border)
button2 = (area.left + area.width - area.height - border,
area.top + border, area.height - 2 * border,
area.height - 2 * border)
return (button1, button2)
def _get_slider_size (self):
"""H._get_slider_size () -> int
Calculates the size of the slider knob.
"""
border = base.GlobalStyle.get_border_size \
(self.__class__, self.style,
StyleInformation.get ("SCROLLBAR_BORDER"))
# Minimum slider size, if the scrollbar is big enough.
minsize = 10
fullsize = self.size[0] - 2 * self.size[1]
if fullsize == 0:
# If only the both scrolling buttons can be displayed, we will
# completely skip the slider.
return 0
# Full size.
fullsize += 2 * border
slider_width = fullsize
if self.maximum != 0:
slider_width = fullsize / (float (self.maximum) + fullsize) * \
fullsize
if slider_width < minsize:
slider_width = minsize
return int (slider_width)
def _get_coords_from_value (self):
"""H._get_coords_from_value () -> int
Calculates the slider coordinates for the HScrollBar.
"""
border = base.GlobalStyle.get_border_size \
(self.__class__, self.style,
StyleInformation.get ("SCROLLBAR_BORDER"))
val = 0
if self.maximum > 0:
slider = self._get_slider_size ()
# Start offset for scrolling - this is the height
# (button + 2 * border) - border plus the half of the
# slider.
sl_x = self.minsize[1] - border + float (slider) / 2
# Valid sliding range.
slide = self.minsize[0] - 2 * sl_x
step = self.maximum / float (slide)
val = self.value / step + sl_x
return val
return self.size[0] / 2
def _get_value_from_coords (self, area, coords):
"""H._get_value_from_coords (...) -> float
Calculates the slider coordinates for the HScrollBar.
"""
border = base.GlobalStyle.get_border_size \
(self.__class__, self.style,
StyleInformation.get ("SCROLLBAR_BORDER"))
val = 0
if self.maximum > 0:
slider = self._get_slider_size ()
sl_x = self.minsize[1] - border + float (slider) / 2
slide = self.minsize[0] - 2 * sl_x
n = coords[0] - area.left - sl_x
step = self.maximum / float (slide)
val = n * step
if val > self.maximum:
val = self.maximum
elif val < 0:
val = 0
return val
def draw_bg (self):
"""H.draw_bg () -> Surface
Draws the HScrollBar background surface and returns it.
Creates the visible surface of the HScrollBar and returns it to
the caller.
"""
return base.GlobalStyle.engine.draw_scrollbar (self,
ORIENTATION_HORIZONTAL)
def draw (self):
"""H.draw () -> None
Draws the HScrollBar surface and places its Buttons and slider on it.
"""
ScrollBar.draw (self)
cls = self.__class__
style = base.GlobalStyle
st = self.style or style.get_style (cls)
rect = self.image.get_rect ()
draw_rect = style.engine.draw_rect
draw_border = style.engine.draw_border
draw_arrow = style.engine.draw_arrow
# Create both buttons.
border = style.get_border_size \
(cls, st, StyleInformation.get ("SCROLLBAR_BORDER"))
button_type = StyleInformation.get ("SCROLLBAR_BUTTON_BORDER")
width_button = rect.height - 2 * border
# We use a temporary state here, so that just the buttons will
# have the typical sunken effect.
tmp_state = self.state
if self.state == STATE_ACTIVE:
tmp_state = STATE_NORMAL
# First button.
state_button = tmp_state
if self.button_dec:
state_button = STATE_ACTIVE
button1 = draw_rect (width_button, width_button, state_button, cls, st)
draw_border (button1, state_button, cls, st, button_type)
rect_button1 = button1.get_rect ()
# Draw the arrow.
draw_arrow (button1, ARROW_LEFT, state_button, cls, st)
rect_button1.x = border
rect_button1.centery = rect.centery
self.image.blit (button1, rect_button1)
# Second button
state_button = tmp_state
if self.button_inc:
state_button = STATE_ACTIVE
button2 = draw_rect (width_button, width_button, state_button, cls, st)
draw_border (button2, state_button, cls, st, button_type)
rect_button2 = button2.get_rect ()
# Draw the arrow.
draw_arrow (button2, ARROW_RIGHT, state_button, cls, st)
rect_button2.x = rect.width - width_button - border
rect_button2.centery = rect.centery
self.image.blit (button2, rect_button2)
# Create the slider.
slider_size = self._get_slider_size ()
if slider_size > 0:
sl = style.engine.draw_slider (slider_size, width_button,
tmp_state, cls, st)
r = sl.get_rect ()
r.centerx = self._get_coords_from_value ()
r.centery = rect.centery
self.image.blit (sl, r)
class VScrollBar (ScrollBar):
"""VScrollBar (height, scroll) -> VScrollBar
A vertical ScrollBar widget.
A ScrollBar widget with a vertical orientation. By default, its
width is the sum of the button width (VSCROLLBAR_BUTTON_SIZE) and
the border drawn around it (2 * SCROLLBAR_BORDER) and has the passed
height. The scrolling area is the passed scroll value minus the
height of the ScrollBar.
Thus, if the area to scroll is 200 pixels high and the ScrollBar is
about 100 pixels high, the ScrollBar its value range will go from 0
to 100 (maximum = scroll - height). If the ScrollBar is longer than
the area to scroll (scroll < height), then the value range will be 0.
Note: The minimum size of the scrollbar is at least twice its
size[0] parameter. This means, that it can display the both
scrolling buttons next to each other. This will override the passed
width value in the constructor, if necessary.
"""
def __init__ (self, height, scroll):
ScrollBar.__init__ (self)
# Minimum size for the two scrolling buttons next to each other.
border = base.GlobalStyle.get_border_size \
(self.__class__, self.style,
StyleInformation.get ("SCROLLBAR_BORDER")) * 2
width = StyleInformation.get ("VSCROLLBAR_BUTTON_SIZE")[0] + border
if height < 2 * width:
height = 2 * width
self.lock ()
self.minsize = (width, height) # Default size.
self.maximum = scroll
self.unlock ()
def set_maximum (self, maximum):
"""V.set_maximum (...) -> None
Sets the maximum value to scroll.
The passed maximum value differs from maximum value of the
slider. The VScrollBar also subtracts its own width from the
scrolling maximum, so that the real maximum of its value range
can be expressed in the formula:
real_maximum = maximum - self.minsize[0]
That means, that if the VScrollBar is 100 pixels long and the
passed maximum value is 200, the scrolling range of the
VScrollBar will go from 0 to 100 (100 + size = 200).
Raises a ValueError, if the passed argument is smaller than
the second element of the ScrollBar its size.
"""
if maximum < self.minsize[1]:
raise ValueError ("maximum must be greater than or equal to %d"
% self.minsize[1])
ScrollBar.set_maximum (self, maximum - self.minsize[1])
self.dirty = True
def _get_button_coords (self, area):
"""V._get_button_coords (...) -> tuple
Gets a tuple with the coordinates of the in- and decrease buttons.
"""
border = base.GlobalStyle.get_border_size \
(self.__class__, self.style,
StyleInformation.get ("SCROLLBAR_BORDER"))
# Respect the set shadow for the ScrollBar.
button1 = (area.left + border, area.top + border,
area.width - 2 * border, area.width - 2 * border)
button2 = (area.left + border,
area.top + area.height - area.width - border,
area.width - 2 * border, area.width - border)
return (button1, button2)
def _get_slider_size (self):
"""V._get_slider_size () -> int
Calculates the size of the slider knob.
"""
# Minimum slider size.
minsize = 10
if (self.size[1] - 2 * self.size[0]) == 0:
# If only the both scrolling buttons can be displayed, we will
# completely skip the slider.
return 0
# Full size.
fullsize = self.size[1] - 2 * self.size[0]
slider_height = fullsize
if self.maximum != 0:
slider_height = fullsize / (float (self.maximum) + fullsize) * \
fullsize
if slider_height < minsize:
slider_height = minsize
return int (slider_height)
def _get_coords_from_value (self):
"""V._get_coords_from_value () -> int
Calculates the slider coordinates for the VScrollBar.
"""
border = base.GlobalStyle.get_border_size \
(self.__class__, self.style,
StyleInformation.get ("SCROLLBAR_BORDER"))
val = 0
if self.maximum > 0:
slider = self._get_slider_size ()
sl_y = self.minsize[0] - border + float (slider) / 2
slide = self.minsize[1] - 2 * sl_y
step = self.maximum / float (slide)
val = self.value / step + sl_y
return val
return self.size[1] / 2
def _get_value_from_coords (self, area, coords):
"""V._get_value_from_coords (...) -> float
Calculates the slider coordinates for the VScrollBar.
"""
border = base.GlobalStyle.get_border_size \
(self.__class__, self.style,
StyleInformation.get ("SCROLLBAR_BORDER"))
val = 0
if self.maximum > 0:
slider = self._get_slider_size ()
# Start offset for scrolling - this is the width
# (button + 2 * border) - border plus the half of the
# slider.
sl_y = self.minsize[0] - border + float (slider) / 2
# Valid sliding range.
slide = self.minsize[1] - 2 * sl_y
n = coords[1] - area.top - sl_y
step = self.maximum / float (slide)
val = n * step
if val > self.maximum:
val = self.maximum
elif val < 0:
val = 0
return val
def draw_bg (self):
"""V.draw_bg (...) -> Surface
Draws the VScrollBar background surface and returns it.
Creates the visible surface of the VScrollBar and returns it to
the caller.
"""
return base.GlobalStyle.engine.draw_scrollbar (self,
ORIENTATION_VERTICAL)
def draw (self):
"""V.draw () -> None
Draws the VScrollBar surface and places its Buttons and slider on it.
"""
ScrollBar.draw (self)
cls = self.__class__
style = base.GlobalStyle
st = self.style or style.get_style (cls)
rect = self.image.get_rect ()
draw_rect = style.engine.draw_rect
draw_border = style.engine.draw_border
draw_arrow = style.engine.draw_arrow
# Create both buttons.
border = style.get_border_size \
(cls, st, StyleInformation.get ("SCROLLBAR_BORDER"))
button_type = StyleInformation.get ("SCROLLBAR_BUTTON_BORDER")
width_button = rect.width - 2 * border
# We use a temporary state here, so that just the buttons will
# have the typical sunken effect.
tmp_state = self.state
if self.state == STATE_ACTIVE:
tmp_state = STATE_NORMAL
# First button.
state_button = tmp_state
if self.button_dec:
state_button = STATE_ACTIVE
button1 = draw_rect (width_button, width_button, state_button, cls, st)
draw_border (button1, state_button, cls, st, button_type)
rect_button1 = button1.get_rect ()
# Draw the arrow.
draw_arrow (button1, ARROW_UP, state_button, cls, st)
rect_button1.y = border
rect_button1.centerx = rect.centerx
self.image.blit (button1, rect_button1)
# Second button
state_button = tmp_state
if self.button_inc:
state_button = STATE_ACTIVE
button2 = draw_rect (width_button, width_button, state_button, cls, st)
draw_border (button2, state_button, cls, st, button_type)
rect_button2 = button2.get_rect ()
# Draw the arrow.
draw_arrow (button2, ARROW_DOWN, state_button, cls, st)
rect_button2.y = rect.height - width_button - border
rect_button2.centerx = rect.centerx
self.image.blit (button2, rect_button2)
# Create the slider.
slider_size = self._get_slider_size ()
if slider_size > 0:
sl = style.engine.draw_slider (width_button, slider_size,
tmp_state, cls, st)
r = sl.get_rect ()
r.centerx = rect.centerx
r.centery = self._get_coords_from_value ()
self.image.blit (sl, r)
| {
"repo_name": "prim/ocempgui",
"path": "ocempgui/widgets/ScrollBar.py",
"copies": "1",
"size": "28886",
"license": "bsd-2-clause",
"hash": -3268360133049812500,
"line_mean": 37.5146666667,
"line_max": 79,
"alpha_frac": 0.5596828914,
"autogenerated": false,
"ratio": 4.3145631067961165,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5374245998196117,
"avg_score": null,
"num_lines": null
} |
"""A widget class, which can contain other widgets and uses optional
scrollbars."""
from pygame import K_RIGHT, K_LEFT, K_HOME, K_END, K_DOWN, K_UP, K_PAGEDOWN
from pygame import K_PAGEUP
from ScrollBar import HScrollBar, VScrollBar
from Bin import Bin
from BaseWidget import BaseWidget
from ViewPort import ViewPort
from StyleInformation import StyleInformation
from Constants import *
import base
class ScrolledWindow (Bin):
"""ScrolledWindow (width, height) -> ScrolledWindow
A widget class, which supports horizontal and vertical scrolling.
The ScrolledWindow is a viewport, which enables its child to be
scrolled horizontally and vertically. It offers various scrolling
types, which customize the bahviour of the supplied scrollbars.
The scrolling behaviour of the ScrolledWindow can be adjusted
through the 'scrolling' attribute or set_scrolling() method and can
be one of the SCROLL_TYPES constants.
Default action (invoked by activate()):
Gives the ScrolledWindow the input focus.
Mnemonic action (invoked by activate_mnemonic()):
None
Signals:
SIG_KEYDOWN - Invoked, when a key is pressed while the ScrolledWindow
has the input focus.
SIG_MOUSEDOWN - Invoked, when a mouse button is pressed over the
ScrolledWindow.
Attributes:
scrolling - The scrolling behaviour of the ScrolledWindow.
vscrollbar - The vertical scrollbar of the ScrolledWindow.
hscrollbar - The horizontal scrollbar of the ScrolledWindow.
"""
def __init__ (self, width, height):
Bin.__init__ (self)
self._scrolling = SCROLL_AUTO
# Scrollbars.
self._vscroll = VScrollBar (height, height)
self._vscroll.connect_signal (SIG_VALCHANGED, self._scroll_child)
self._vscroll.parent = self
self._hscroll = HScrollBar (width, width)
self._hscroll.connect_signal (SIG_VALCHANGED, self._scroll_child)
self._hscroll.parent = self
self._vscroll_visible = False
self._hscroll_visible = False
# Scrolling will be handled directly by the ScrolledWindow.
self._hscroll.set_focus = lambda self: False
self._vscroll.set_focus = lambda self: False
self.controls.append (self._vscroll)
self.controls.append (self._hscroll)
self._signals[SIG_KEYDOWN] = None # Dummy
self._signals[SIG_MOUSEDOWN] = []
# Respect the size.
if width < self._hscroll.minsize[0]:
width = self._hscroll.minsize[0]
if height < self._vscroll.minsize[1]:
height = self._vscroll.minsize[1]
self.minsize = width, height
def set_scrolling (self, scrolling):
"""S.set_scrolling (...) -> None
Sets the scrolling behaviour for the ScrolledWindow.
The scrolling can be a value of the SCROLL_TYPES list.
SCROLL_AUTO causes the ScrolledList to display its scrollbars on
demand only, SCROLL_ALWAYS will show the scrollbars permanently
and SCROLL_NEVER will disable the scrollbars.
Raises a ValueError, if the passed argument is not a value of
the SCROLL_TYPES tuple.
"""
if scrolling not in SCROLL_TYPES:
raise ValueError ("scrolling must be a value from SCROLL_TYPES")
if self._scrolling != scrolling:
self._scrolling = scrolling
self.dirty = True
def set_child (self, child=None):
"""B.set_child (...) -> None
Sets the child to display in the ScrolledList.
Creates a parent-child relationship from the ScrolledList to a
widget. If the widget does not support native scrolling, it will
be packed into a ViewPort.
"""
self.lock ()
if child and not isinstance (child, ViewPort):
self.vscrollbar.value = 0
self.hscrollbar.value = 0
child = ViewPort (child)
child.minsize = self.minsize
Bin.set_child (self, child)
self.unlock ()
def activate (self):
"""S.activate () -> None
Activates the ScrolledWindow default action.
Activates the ScrolledWindow default action. This usually means
giving the ScrolledWindow the input focus.
"""
if not self.sensitive:
return
self.focus = True
def notify (self, event):
"""S.notify (...) -> None
Notifies the ScrolledWindow about an event.
"""
if not self.sensitive:
return
if event.signal in SIGNALS_MOUSE:
eventarea = self.rect_to_client ()
if event.signal == SIG_MOUSEDOWN:
if eventarea.collidepoint (event.data.pos):
self.focus = True
self.run_signal_handlers (SIG_MOUSEDOWN, event.data)
# Mouse wheel.
for c in self.controls:
c.notify (event)
if not event.handled:
if self._vscroll_visible:
if event.data.button == 4:
self.vscrollbar.decrease ()
elif event.data.button == 5:
self.vscrollbar.increase ()
elif self._hscroll_visible:
if event.data.button == 4:
self.hscrollbar.decrease ()
elif event.data.button == 5:
self.hscrollbar.increase ()
event.handled = True
elif (event.signal == SIG_KEYDOWN) and self.focus:
if self._hscroll_visible:
# Horizontal scrollbar key movement
if event.data.key == K_RIGHT:
self.hscrollbar.increase ()
event.handled = True
elif event.data.key == K_LEFT:
self.hscrollbar.decrease ()
event.handled = True
elif event.data.key == K_END:
self.hscrollbar.value = self.hscrollbar.maximum
event.handled = True
elif event.data.key == K_HOME:
self.hscrollbar.value = self.hscrollbar.minimum
event.handled = True
if self._vscroll_visible:
# Vertical scrollbar key movement
if event.data.key == K_DOWN:
self.vscrollbar.increase ()
event.handled = True
elif event.data.key == K_UP:
self.vscrollbar.decrease ()
event.handled = True
elif event.data.key == K_PAGEUP:
val = self.vscrollbar.value - 10 * self.vscrollbar.step
if val > self.vscrollbar.minimum:
self.vscrollbar.value = val
else:
self.vscrollbar.value = self.vscrollbar.minimum
event.handled = True
elif event.data.key == K_PAGEDOWN:
val = self.vscrollbar.value + 10 * self.vscrollbar.step
if val < self.vscrollbar.maximum:
self.vscrollbar.value = val
else:
self.vscrollbar.value = self.vscrollbar.maximum
event.handled = True
elif event.data.key == K_END:
self.vscrollbar.value = self.vscrollbar.maximum
event.handled = True
elif event.data.key == K_HOME:
self.vscrollbar.value = self.vscrollbar.minimum
event.handled = True
Bin.notify (self, event)
def _scroll_child (self):
"""S._scroll_child () -> None
Scrolls the child of the ScrolledWindow.
"""
if self.child != None:
self.child.lock ()
self.child.hadjustment = - int (self.hscrollbar.value)
self.child.vadjustment = - int (self.vscrollbar.value)
self.child.unlock ()
def _update_scrollbars (self):
"""S._update_scrollbars (...) -> bool, bool
Updates the size and maximum values of the attached scrollbars.
Updates the size and values of the attached scrollbars and
returns boolean values about their visibility in the order
hscrollbar, vscrollbar.
"""
old_vals = self._vscroll_visible, self._hscroll_visible
if self.scrolling == SCROLL_NEVER:
if self.child:
self.child.minsize = self.minsize
self.hscrollbar.sensitive = False
self.vscrollbar.sensitive = False
self._vscroll_visible = False
self._hscroll_visible = False
return False, False
self.hscrollbar.bottom = self.bottom - self.top
self.vscrollbar.right = self.right - self.left
width = 0
height = 0
if self.child:
width = self.child.real_width + 2 * self.padding
height = self.child.real_height + 2 * self.padding
# We are using the draw_border() method for the inner surface,
# so we have to add the borders to the scrolling maximum.
if self.scrolling == SCROLL_ALWAYS:
self.vscrollbar.minsize = (self.vscrollbar.minsize[0],
self.minsize[1] - \
self.hscrollbar.height)
self.hscrollbar.minsize = (self.minsize[0] - self.vscrollbar.width,
self.hscrollbar.minsize[1])
if self.child:
self.child.maxsize = \
(self.minsize[0] - self.vscrollbar.size[0],
self.minsize[1] - self.hscrollbar.size[1])
self.child.minsize = self.child.maxsize
if width < self.hscrollbar.minsize[0]:
width = self.hscrollbar.minsize[0]
self.hscrollbar.maximum = width
if height < self.vscrollbar.minsize[1]:
height = self.vscrollbar.minsize[1]
self.vscrollbar.maximum = height
self._vscroll_visible = True
self._hscroll_visible = True
self.vscrollbar.sensitive = True
self.hscrollbar.sensitive = True
elif self.scrolling == SCROLL_AUTO:
# Check the sizes, so we can determine, how the scrollbars
# need to be adjusted.
self._hscroll_visible = self.minsize[0] < width
self._vscroll_visible = self.minsize[1] < height
if self._hscroll_visible:
self._vscroll_visible = (self.minsize[1] -
self.hscrollbar.height) < height
if self._vscroll_visible:
self._hscroll_visible = (self.minsize[0] -
self.vscrollbar.width) < width
if self._vscroll_visible and self._hscroll_visible:
# Both scrollbars need to be shown.
self.vscrollbar.minsize = (self.vscrollbar.minsize[0],
self.minsize[1] - \
self.hscrollbar.height)
self.hscrollbar.minsize = (self.minsize[0] - \
self.vscrollbar.width,
self.hscrollbar.minsize[1])
if self.child:
self.child.maxsize = \
(self.minsize[0] - self.vscrollbar.size[0],
self.minsize[1] - self.hscrollbar.size[1])
self.child.minsize = self.child.maxsize
self.vscrollbar.maximum = height
self.hscrollbar.maximum = width
self.vscrollbar.sensitive = True
self.hscrollbar.sensitive = True
elif self._vscroll_visible:
# Only the vertical.
self.vscrollbar.minsize = (self.vscrollbar.minsize[0],
self.minsize[1])
if self.child:
self.child.maxsize = \
(self.minsize[0] - self.vscrollbar.minsize[0],
self.minsize[1])
self.child.minsize = self.child.maxsize
self.vscrollbar.maximum = height
self.vscrollbar.sensitive = True
self.hscrollbar.sensitive = False
elif self._hscroll_visible:
# Only the horizontal.
self.hscrollbar.minsize = (self.minsize[0],
self.hscrollbar.minsize[1])
if self.child:
self.child.maxsize = \
(self.minsize[0],
self.minsize[1] - self.hscrollbar.size[1])
self.child.minsize = self.child.maxsize
self.hscrollbar.maximum = width
self.hscrollbar.sensitive = True
self.vscrollbar.sensitive = False
else:
if self.child:
self.child.minsize = self.minsize
# Neither vertical nor horizontal
self.hscrollbar.sensitive = False
self.vscrollbar.sensitive = False
if not self.sensitive:
self.hscrollbar.sensitive = False
self.vscrollbar.sensitive = False
return self._hscroll_visible, self._vscroll_visible
def draw_bg (self):
"""S.draw_bg () -> Surface
Draws the ScrolledWindow background surface and returns it.
Creates the visible background surface of the ScrolledWindow and
returns it to the caller.
"""
return base.GlobalStyle.engine.draw_scrolledwindow (self)
def draw (self):
"""W.draw () -> None
Draws the ScrolledWindow surface.
Creates the visible surface of the ScrolledWindow.
"""
Bin.draw (self)
blit = self.image.blit
# Update the srollbars.
self.hscrollbar.lock ()
self.vscrollbar.lock ()
hscroll, vscroll = self._update_scrollbars ()
self.hscrollbar.unlock ()
self.vscrollbar.unlock ()
if hscroll:
blit (self.hscrollbar.image, self.hscrollbar.rect)
if vscroll:
blit (self.vscrollbar.image, self.vscrollbar.rect)
if self.child:
blit (self.child.image, self.child.rect)
scrolling = property (lambda self: self._scrolling,
lambda self, var: self.set_scrolling (var),
doc = "The scrolling behaviour for the " \
"ScrolledWindow.")
vscrollbar = property (lambda self: self._vscroll,
doc = "The vertical scrollbar.")
hscrollbar = property (lambda self: self._hscroll,
doc = "The herizontal scrollbar.")
| {
"repo_name": "prim/ocempgui",
"path": "ocempgui/widgets/ScrolledWindow.py",
"copies": "1",
"size": "16723",
"license": "bsd-2-clause",
"hash": 7641933255451440000,
"line_mean": 40.2913580247,
"line_max": 79,
"alpha_frac": 0.5622196974,
"autogenerated": false,
"ratio": 4.4054267650158065,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5467646462415806,
"avg_score": null,
"num_lines": null
} |
# $Id: sctp.py 23 2006-11-08 15:45:33Z dugsong $
# -*- coding: utf-8 -*-
"""Stream Control Transmission Protocol."""
from __future__ import print_function
from __future__ import absolute_import
from . import dpkt
from . import crc32c
# Stream Control Transmission Protocol
# http://tools.ietf.org/html/rfc2960
# Chunk Types
DATA = 0
INIT = 1
INIT_ACK = 2
SACK = 3
HEARTBEAT = 4
HEARTBEAT_ACK = 5
ABORT = 6
SHUTDOWN = 7
SHUTDOWN_ACK = 8
ERROR = 9
COOKIE_ECHO = 10
COOKIE_ACK = 11
ECNE = 12
CWR = 13
SHUTDOWN_COMPLETE = 14
class SCTP(dpkt.Packet):
"""Stream Control Transmission Protocol.
TODO: Longer class information....
Attributes:
__hdr__: Header fields of SCTP.
TODO.
"""
__hdr__ = (
('sport', 'H', 0),
('dport', 'H', 0),
('vtag', 'I', 0),
('sum', 'I', 0)
)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
l = []
while self.data:
chunk = Chunk(self.data)
l.append(chunk)
self.data = self.data[len(chunk):]
self.data = self.chunks = l
def __len__(self):
return self.__hdr_len__ + sum(map(len, self.data))
def __bytes__(self):
l = [bytes(x) for x in self.data]
if self.sum == 0:
s = crc32c.add(0xffffffff, self.pack_hdr())
for x in l:
s = crc32c.add(s, x)
self.sum = crc32c.done(s)
return self.pack_hdr() + b''.join(l)
class Chunk(dpkt.Packet):
__hdr__ = (
('type', 'B', INIT),
('flags', 'B', 0),
('len', 'H', 0)
)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
self.data = self.data[:self.len - self.__hdr_len__]
__s = b'\x80\x44\x00\x50\x00\x00\x00\x00\x30\xba\xef\x54\x01\x00\x00\x3c\x3b\xb9\x9c\x46\x00\x01\xa0\x00\x00\x0a\xff\xff\x2b\x2d\x7e\xb2\x00\x05\x00\x08\x9b\xe6\x18\x9b\x00\x05\x00\x08\x9b\xe6\x18\x9c\x00\x0c\x00\x06\x00\x05\x00\x00\x80\x00\x00\x04\xc0\x00\x00\x04\xc0\x06\x00\x08\x00\x00\x00\x00'
def test_sctp_pack():
sctp = SCTP(__s)
assert (__s == bytes(sctp))
sctp.sum = 0
assert (__s == bytes(sctp))
def test_sctp_unpack():
sctp = SCTP(__s)
assert (sctp.sport == 32836)
assert (sctp.dport == 80)
assert (len(sctp.chunks) == 1)
assert (len(sctp) == 72)
chunk = sctp.chunks[0]
assert (chunk.type == INIT)
assert (chunk.len == 60)
if __name__ == '__main__':
test_sctp_pack()
test_sctp_unpack()
print('Tests Successful...')
| {
"repo_name": "dimagol/trex-core",
"path": "scripts/external_libs/dpkt-1.9.1/dpkt/sctp.py",
"copies": "3",
"size": "2523",
"license": "apache-2.0",
"hash": -6877362430263517000,
"line_mean": 22.8018867925,
"line_max": 297,
"alpha_frac": 0.5596512089,
"autogenerated": false,
"ratio": 2.5850409836065573,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9640266159715323,
"avg_score": 0.0008852065582469533,
"num_lines": 106
} |
# $Id: sctp.py 23 2006-11-08 15:45:33Z dugsong $
# -*- coding: utf-8 -*-
"""Stream Control Transmission Protocol."""
import dpkt
import crc32c
# Stream Control Transmission Protocol
# http://tools.ietf.org/html/rfc2960
# Chunk Types
DATA = 0
INIT = 1
INIT_ACK = 2
SACK = 3
HEARTBEAT = 4
HEARTBEAT_ACK = 5
ABORT = 6
SHUTDOWN = 7
SHUTDOWN_ACK = 8
ERROR = 9
COOKIE_ECHO = 10
COOKIE_ACK = 11
ECNE = 12
CWR = 13
SHUTDOWN_COMPLETE = 14
class SCTP(dpkt.Packet):
__hdr__ = (
('sport', 'H', 0),
('dport', 'H', 0),
('vtag', 'I', 0),
('sum', 'I', 0)
)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
l = []
while self.data:
chunk = Chunk(self.data)
l.append(chunk)
self.data = self.data[len(chunk):]
self.data = self.chunks = l
def __len__(self):
return self.__hdr_len__ + sum(map(len, self.data))
def __str__(self):
l = [str(x) for x in self.data]
if self.sum == 0:
s = crc32c.add(0xffffffffL, self.pack_hdr())
for x in l:
s = crc32c.add(s, x)
self.sum = crc32c.done(s)
return self.pack_hdr() + ''.join(l)
class Chunk(dpkt.Packet):
__hdr__ = (
('type', 'B', INIT),
('flags', 'B', 0),
('len', 'H', 0)
)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
self.data = self.data[:self.len - self.__hdr_len__]
__s = '\x80\x44\x00\x50\x00\x00\x00\x00\x30\xba\xef\x54\x01\x00\x00\x3c\x3b\xb9\x9c\x46\x00\x01\xa0\x00\x00\x0a\xff\xff\x2b\x2d\x7e\xb2\x00\x05\x00\x08\x9b\xe6\x18\x9b\x00\x05\x00\x08\x9b\xe6\x18\x9c\x00\x0c\x00\x06\x00\x05\x00\x00\x80\x00\x00\x04\xc0\x00\x00\x04\xc0\x06\x00\x08\x00\x00\x00\x00'
def test_sctp_pack():
sctp = SCTP(__s)
assert (__s == str(sctp))
sctp.sum = 0
assert (__s == str(sctp))
def test_sctp_unpack():
sctp = SCTP(__s)
assert (sctp.sport == 32836)
assert (sctp.dport == 80)
assert (len(sctp.chunks) == 1)
assert (len(sctp) == 72)
chunk = sctp.chunks[0]
assert (chunk.type == INIT)
assert (chunk.len == 60)
if __name__ == '__main__':
test_sctp_pack()
test_sctp_unpack()
print 'Tests Successful...' | {
"repo_name": "hexcap/dpkt",
"path": "dpkt/sctp.py",
"copies": "6",
"size": "2256",
"license": "bsd-3-clause",
"hash": -4052172062442528300,
"line_mean": 22.7578947368,
"line_max": 296,
"alpha_frac": 0.5505319149,
"autogenerated": false,
"ratio": 2.462882096069869,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6013414010969869,
"avg_score": null,
"num_lines": null
} |
# $Id: sctp.py 23 2006-11-08 15:45:33Z dugsong $
"""Stream Control Transmission Protocol."""
import dpkt, crc32c
# Stream Control Transmission Protocol
# http://tools.ietf.org/html/rfc2960
# Chunk Types
DATA = 0
INIT = 1
INIT_ACK = 2
SACK = 3
HEARTBEAT = 4
HEARTBEAT_ACK = 5
ABORT = 6
SHUTDOWN = 7
SHUTDOWN_ACK = 8
ERROR = 9
COOKIE_ECHO = 10
COOKIE_ACK = 11
ECNE = 12
CWR = 13
SHUTDOWN_COMPLETE = 14
class SCTP(dpkt.Packet):
__hdr__ = (
('sport', 'H', 0),
('dport', 'H', 0),
('vtag', 'I', 0),
('sum', 'I', 0)
)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
l = []
while self.data:
chunk = Chunk(self.data)
l.append(chunk)
self.data = self.data[len(chunk):]
self.data = self.chunks = l
def __len__(self):
return self.__hdr_len__ + \
sum(map(len, self.data))
def __str__(self):
l = [ str(x) for x in self.data ]
if self.sum == 0:
s = crc32c.add(0xffffffffL, self.pack_hdr())
for x in l:
s = crc32c.add(s, x)
self.sum = crc32c.done(s)
return self.pack_hdr() + ''.join(l)
class Chunk(dpkt.Packet):
__hdr__ = (
('type', 'B', INIT),
('flags', 'B', 0),
('len', 'H', 0)
)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
self.data = self.data[:self.len - self.__hdr_len__]
if __name__ == '__main__':
import unittest
class SCTPTestCase(unittest.TestCase):
def testPack(self):
sctp = SCTP(self.s)
self.failUnless(self.s == str(sctp))
sctp.sum = 0
self.failUnless(self.s == str(sctp))
def testUnpack(self):
sctp = SCTP(self.s)
self.failUnless(sctp.sport == 32836)
self.failUnless(sctp.dport == 80)
self.failUnless(len(sctp.chunks) == 1)
self.failUnless(len(sctp) == 72)
chunk = sctp.chunks[0]
self.failUnless(chunk.type == INIT)
self.failUnless(chunk.len == 60)
s = '\x80\x44\x00\x50\x00\x00\x00\x00\x30\xba\xef\x54\x01\x00\x00\x3c\x3b\xb9\x9c\x46\x00\x01\xa0\x00\x00\x0a\xff\xff\x2b\x2d\x7e\xb2\x00\x05\x00\x08\x9b\xe6\x18\x9b\x00\x05\x00\x08\x9b\xe6\x18\x9c\x00\x0c\x00\x06\x00\x05\x00\x00\x80\x00\x00\x04\xc0\x00\x00\x04\xc0\x06\x00\x08\x00\x00\x00\x00'
unittest.main()
| {
"repo_name": "zhakui/QMarkdowner",
"path": "dpkt/sctp.py",
"copies": "15",
"size": "2468",
"license": "mit",
"hash": 8760624498417286000,
"line_mean": 26.4222222222,
"line_max": 302,
"alpha_frac": 0.5344408428,
"autogenerated": false,
"ratio": 2.6061246040126718,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
# $Id: SD.py,v 1.10 2008-08-05 00:20:44 gosselin_a Exp $
# $Log: not supported by cvs2svn $
# Revision 1.9 2008/06/30 02:59:57 gosselin_a
# Fixed definition of equivNumericTypes list.
#
# Revision 1.8 2008/06/30 02:41:44 gosselin_a
# Preleminary check-in of changes leading to the 0.8 revision.
# - switch to numpy, Numeric now unsupported
# - better documentation of the compression features
# - some bug fixes
#
# Revision 1.7 2005/07/14 01:36:41 gosselin_a
# pyhdf-0.7-3
# Ported to HDF4.2r1.
# Support for SZIP compression on SDS datasets.
# All classes are now 'new-style' classes, deriving from 'object'.
# Update documentation.
#
# Revision 1.6 2005/01/25 18:17:53 gosselin_a
# Importer le symbole 'HDF4Error' a partir du module SD.
#
# Revision 1.5 2004/08/02 17:06:20 gosselin
# pyhdf-0.7.2
#
# Revision 1.4 2004/08/02 15:36:04 gosselin
# pyhdf-0.7-1
#
# Revision 1.3 2004/08/02 15:22:59 gosselin
# pyhdf -0.6-1
#
# Revision 1.2 2004/08/02 15:00:34 gosselin
# pyhdf 0.5-2
#
# Author: Andre Gosselin
# Maurice Lamontagne Institute
# Andre.Gosselin@dfo-mpo.gc.ca
"""
SD (scientific dataset) API (:mod:`pyhdf.SD`)
=============================================
A module of the pyhdf package implementing the SD (scientific
dataset) API of the NCSA HDF4 library.
(see: hdf.ncsa.uiuc.edu)
Introduction
------------
SD is one of the modules composing pyhdf, a python package implementing
the NCSA HDF library and letting one manage HDF files from within a python
program. Two versions of the HDF library currently exist, version 4 and
version 5. pyhdf only implements version 4 of the library. Many
different APIs are to be found inside the HDF4 specification.
Currently, pyhdf implements just a few of those: the SD, VS and V APIs.
Other APIs should be added in the future (GR, AN, etc).
The SD module implements the SD API of the HDF4 library, supporting what
are known as "scientific datasets". The HDF SD API has many similarities
with the netCDF API, another popular API for dealing with scientific
datasets. netCDF files can be in fact read and modified using the SD
module (but cannot be created from scratch).
SD module key features
----------------------
SD key features are as follows.
- Almost every routine of the original SD API has been implemented inside
pyhdf. Only a few have been ignored, most of them being of a rare use:
- SDsetnbitdataset()
- All chunking/tiling routines : SDgetchunkinfo(), SDreadchunk(),
SDsetchunk(), SDsetchunkcache(), SDwritechunk()
- SDsetblocksize()
- SDisdimval_bwcomp(), SDsetdimval_comp()
- It is quite straightforward to go from a C version to a python version
of a program accessing the SD API, and to learn SD usage by refering to
the C API documentation.
- A few high-level python methods have been developped to ease
programmers task. Of greatest interest are those allowing access
to SD datasets through familiar python idioms.
- Attributes can be read/written like ordinary python class
attributes.
- Datasets can be read/written like ordinary python lists using
multidimensional indices and so-called "extended slice syntax", with
strides allowed.
See "High level attribute access" and "High level variable access"
sections for details.
- SD offers methods to retrieve a dictionnary of the attributes,
dimensions and variables defined on a dataset, and of the attributes
set on a variable and a dimension. Querying a dataset is thus geatly
simplified.
- SD datasets are read/written through "numpy", a sophisticated
python package for efficiently handling multi-dimensional arrays of
numbers. numpy can nicely extend the SD functionnality, eg.
adding/subtracting arrays with the '+/-' operators.
Accessing the SD module
-----------------------
To access the SD API a python program can say one of:
>>> import pyhdf.SD # must prefix names with "pyhdf.SD."
>>> from pyhdf import SD # must prefix names with "SD."
>>> from pyhdf.SD import * # names need no prefix
This document assumes the last import style is used.
numpy will also need to be imported:
>>> from numpy import *
Package components
------------------
pyhdf is a proper Python package, eg a collection of modules stored under
a directory whose name is that of the package and which stores an
__init__.py file. Following the normal installation procedure, this
directory will be <python-lib>/site-packages/pyhdf', where <python-lib>
stands for the python installation directory.
For each HDF API exists a corresponding set of modules.
The following modules are related to the SD API.
_hdfext
C extension module responsible for wrapping the HDF
C-library for all python modules
hdfext
python module implementing some utility functions
complementing the _hdfext extension module
error
defines the HDF4Error exception
SD
python module wrapping the SD API routines inside
an OOP framework
_hdfext and hdfext were generated using the SWIG preprocessor.
SWIG is however *not* needed to run the package. Those two modules
are meant to do their work in the background, and should never be called
directly. Only 'pyhdf.SD' should be imported by the user program.
Prerequisites
-------------
The following software must be installed in order for pyhdf release 0.8 to
work.
HDF (v4) library, release 4.2r1
pyhdf does *not* include the HDF4 library, which must
be installed separately.
HDF is available at:
"http://hdf.ncsa.uiuc.edu/obtain.html".
HDF4.2r1 in turn relies on the following packages :
======= ============== ===========================================
libjpeg (jpeg library) release 6b
libz (zlib library) release 1.1.4 or above
libsz (SZIP library) release 2.0; this package is optional
if pyhdf is installed with NOSZIP macro set
======= ============== ===========================================
The SD module also needs:
numpy python package
SD variables are read/written using the array data type provided
by the python NumPy package. Note that since version 0.8 of
pyhdf, version 1.0.5 or above of NumPy is needed.
numpy is available at:
"http://www.numpy.org".
Documentation
-------------
pyhdf has been written so as to stick as closely as possible to
the naming conventions and calling sequences documented inside the
"HDF User s Guide" manual. Even if pyhdf gives an OOP twist
to the C API, the manual can be easily used as a documentary source
for pyhdf, once the class to which a function belongs has been
identified, and of course once requirements imposed by the Python
langage have been taken into account. Consequently, this documentation
will not attempt to provide an exhaustive coverage of the HDF SD
API. For this, the user is referred to the above manual.
The documentation of each pyhdf method will indicate the name
of the equivalent routine inside the C API.
This document (in both its text and html versions) has been completely
produced using "pydoc", the Python documentation generator (which
made its debut in the 2.1 Python release). pydoc can also be used
as an on-line help tool. For example, to know everything about
the SD.SDS class, say:
>>> from pydoc import help
>>> from pyhdf.SD import *
>>> help(SDS)
To be more specific and get help only for the get() method of the
SDS class:
>>> help(SDS.get) # or...
>>> help(vinst.get) # if vinst is an SDS instance
pydoc can also be called from the command line, as in::
% pydoc pyhdf.SD.SDS # doc for the whole SDS class
% pydoc pyhdf.SD.SDS.get # doc for the SDS.get method
Summary of differences between the pyhdf and C SD API
-----------------------------------------------------
Most of the differences between the pyhdf and C SD API can
be summarized as follows.
- In the C API, every function returns an integer status code, and values
computed by the function are returned through one or more pointers
passed as arguments.
- In pyhdf, error statuses are returned through the Python exception
mechanism, and values are returned as the method result. When the
C API specifies that multiple values are returned, pyhdf returns a
tuple of values, which are ordered similarly to the pointers in the
C function argument list.
Error handling
--------------
All errors that the C SD API reports with a SUCCESS/FAIL error code
are reported by pyhdf using the Python exception mechanism.
When the C library reports a FAIL status, pyhdf raises an HDF4Error
exception (a subclass of Exception) with a descriptive message.
Unfortunately, the C library is rarely informative about the cause of
the error. pyhdf does its best to try to document the error, but most
of the time cannot do more than saying "execution error".
Attribute access: low and high level
------------------------------------
In the SD API, attributes can be of many types (integer, float, string,
etc) and can be single or multi-valued. Attributes can be set either at
the dataset, the variable or the dimension level. This can can be achieved
in two ways.
- By calling the get()/set() method of an attribute instance. In the
following example, HDF file 'example.hdf' is created, and string
attribute 'title' is attached to the file and given value
'example'.
>>> from pyhdf.SD import *
>>> d = SD('example.hdf',SDC.WRITE|SDC.CREATE) # create file
>>> att = d.attr('title') # create attribute instance
>>> att.set(SDC.CHAR, 'example') # set attribute type and value
>>> print(att.get()) # get attribute value
>>>
- By handling the attribute like an ordinary Python class attribute.
The above example can then be rewritten as follows:
>>> from pyhdf.SD import *
>>> d = SD('example.hdf',SDC.WRITE|SDC.CREATE) # create dataset
>>> d.title = 'example' # set attribute type and value
>>> print(d.title) # get attribute value
>>>
What has been said above applies as well to multi-valued attributes.
>>> att = d.attr('values') # With an attribute instance
>>> att.set(SDC.INT32, (1,2,3,4,5)) # Assign 5 ints as attribute value
>>> att.get() # Get attribute values
[1, 2, 3, 4, 5]
>>> d.values = (1,2,3,4,5) # As a Python class attribute
>>> d.values # Get attribute values
[1, 2, 3, 4, 5]
When the attribute is known by its name , standard functions 'setattr()'
and 'getattr()' can be used to replace the dot notation.
Above example becomes:
>>> setattr(d, 'values', (1,2,3,4,5))
>>> getattr(d, 'values')
[1, 2, 3, 4, 5]
Handling a SD attribute like a Python class attribute is admittedly
more natural, and also much simpler. Some control is however lost in
doing so.
- Attribute type cannot be specified. pyhdf automatically selects one of
three types according to the value(s) assigned to the attribute:
SDC.CHAR if value is a string, SDC.INT32 if all values are integral,
SDC.DOUBLE if one value is a float.
- Consequently, byte values cannot be assigned.
- Attribute properties (length, type, index number) can only be queried
through methods of an attribute instance.
Variable access: low and high level
-----------------------------------
Similarly to attributes, datasets can be read/written in two ways.
The first way is through the get()/set() methods of a dataset instance.
Those methods accept parameters to specify the starting indices, the count
of values to read/write, and the strides along each dimension. For example,
if 'v' is a 4x4 array:
>>> v.get() # complete array
>>> v.get(start=(0,0),count=(1,4)) # first row
>>> v.get(start=(0,1),count=(2,2), # second and third columns of
... stride=(2,1)) # first and third row
The second way is by indexing and slicing the variable like a Python
sequence. pyhdf here follows most of the rules used to index and slice
numpy arrays. Thus an HDF dataset can be seen almost as a numpy
array, except that data is read from/written to a file instead of memory.
Extended indexing let you access variable elements with the familiar
[i,j,...] notation, with one index per dimension. For example, if 'm' is a
rank 3 dataset, one could write:
>>> m[0,3,5] = m[0,5,3]
When indexing is used to select a dimension in a 'get' operation, this
dimension is removed from the output array, thus reducing its rank by 1. A
rank 0 array is converted to a scalar. Thus, for a 3x3x3 'm' dataset
(rank 3) of integer type :
>>> a = m[0] # a is a 3x3 array (rank 2)
>>> a = m[0,0] # a is a 3 element array (rank 1)
>>> a = m[0,0,0] # a is an integer (rank 0 array becomes a scalar)
Had this rule not be followed, m[0,0,0] would have resulted in a single
element array, which could complicate computations.
Extended slice syntax allows slicing HDF datasets along each of its
dimensions, with the specification of optional strides to step through
dimensions at regular intervals. For each dimension, the slice syntax
is: "i:j[:stride]", the stride being optional. As with ordinary slices,
the starting and ending values of a slice can be omitted to refer to the
first and last element, respectively, and the end value can be negative to
indicate that the index is measured relative to the tail instead of the
beginning. Omitted dimensions are assumed to be sliced from beginning to
end. Thus:
>>> m[0] # treated as 'm[0,:,:]'.
Example above with get()/set() methods can thus be rewritten as follows:
>>> v[:] # complete array
>>> v[:1] # first row
>>> v[::2,1:3] # second and third columns of first and third row
Indexes and slices can be freely mixed, eg:
>>> m[:2,3,1:3:2]
Note that, countrary to indexing, a slice never reduces the rank of the
output array, even if its length is 1. For example, given a 3x3x3 'm'
dataset:
>>> a = m[0] # indexing: a is a 3x3 array (rank 2)
>>> a = m[0:1] # slicing: a is a 1x3x3 array (rank 3)
As can easily be seen, extended slice syntax is much more elegant and
compact, and offers a few possibilities not easy to achieve with the
get()/sett() methods. Negative indices offer a nice example:
>>> v[-2:] # last two rows
>>> v[-3:-1] # second and third row
>>> v[:,-1] # last column
Reading/setting multivalued HDF attributes and variables
--------------------------------------------------------
Multivalued HDF attributes are set using a python sequence (tuple or
list). Reading such an attribute returns a python list. The easiest way to
read/set an attribute is by handling it like a Python class attribute
(see "High level attribute access"). For example:
>>> d=SD('test.hdf',SDC.WRITE|SDC.CREATE) # create file
>>> d.integers = (1,2,3,4) # define multivalued integer attr
>>> d.integers # get the attribute value
[1, 2, 3, 4]
The easiest way to set multivalued HDF datasets is to assign to a
subset of the dataset, using "[:]" to assign to the whole dataset
(see "High level variable access"). The assigned value can be a python
sequence, which can be multi-leveled when assigning to a multdimensional
dataset. For example:
>>> d=SD('test.hdf',SDC.WRITE|SDC.CREATE) # create file
>>> v1=d.create('v1',SDC.INT32,3) # 3-elem vector
>>> v1[:]=[1,2,3] # assign 3-elem python list
>>> v2=d.create('d2',SDC.INT32,(3,3)) # create 3x3 variable
# The list assigned to v2 is composed
# of 3 lists, each representing a row of v2.
>>> v2[:]=[[1,2,3],[11,12,13],[21,22,23]]
The assigned value can also be a numpy array. Rewriting example above:
>>> v1=array([1,2,3])
>>> v2=array([[1,2,3],[11,12,13],[21,22,23]])
Note how we use indexing expressions 'v1[:]' and 'v2[:]' when assigning
using python sequences, and just the variable names when assigning numpy
arrays.
Reading an HDF dataset always returns a numpy array, except if
indexing is used and produces a rank-0 array, in which case a scalar is
returned.
netCDF files
------------
Files written in the popular Unidata netCDF format can be read and updated
using the HDF SD API. However, pyhdf cannot create netCDF formatted
files from scratch. The python 'pycdf' package can be used for that.
When accessing netCDF files through pyhdf, one should be aware of the
following differences between the netCDF and the HDF SD libraries.
- Differences in terminology can be confusing. What netCDF calls a
'dataset' is called a 'file' or 'SD interface' in HDF. What HDF calls
a dataset is called a 'variable' in netCDF parlance.
- In the netCDF API, dimensions are defined at the global (netCDF dataset)
level. Thus, two netCDF variables defined over dimensions X and Y
necessarily have the same rank and shape.
- In the HDF SD API, dimensions are defined at the HDF dataset level,
except when they are named. Dimensions with the same name are considered
to be "shared" between all the file datasets. They must be of the same
length, and they share all their scales and attributes. For example,
setting an attribute on a shared dimension affects all datasets sharing
that dimension.
- When two or more netCDF variables are based on the unlimited dimension,
they automatically grow in sync. If variables A and B use the unlimited
dimension, adding "records" to A along its unlimited dimension
implicitly adds records in B (which are left in an undefined state and
filled with the fill_value when the file is refreshed).
- In HDF, unlimited dimensions behave independently. If HDF datasets A and
B are based on an unlimited dimension, adding records to A does not
affect the number of records to B. This is true even if the unlimited
dimensions bear the same name (they do not appear to be "shared" as is
the case when the dimensions are fixed).
Classes summary
---------------
pyhdf wraps the SD API using different types of python classes::
SD HDF SD interface (almost synonymous with the subset of the
HDF file holding all the SD datasets)
SDS scientific dataset
SDim dataset dimension
SDAttr attribute (either at the file, dataset or dimension level)
SDC constants (opening modes, data types, etc)
In more detail::
SD The SD class implements the HDF SD interface as applied to a given
file. This class encapsulates the "SD interface" identifier
(referred to as "sd_id" in the C API documentation), and all
the SD API top-level functions.
To create an SD instance, call the SD() constructor.
methods:
constructors:
SD() open an existing HDF file or create a new one,
returning an SD instance
attr() create an SDAttr (attribute) instance to access
an existing file attribute or create a new one;
"dot notation" can also be used to get and set
an attribute
create() create a new dataset, returning an SDS instance
select() locate an existing dataset given its name or
index number, returning an SDS instance
file closing
end() end access to the SD interface and close the
HDF file
inquiry
attributes() return a dictionnary describing every global
attribute attached to the HDF file
datasets() return a dictionnary describing every dataset
stored inside the file
info() get the number of datasets stored in the file
and the number of attributes attached to it
nametoindex() get a dataset index number given the dataset
name
reftoindex() get a dataset index number given the dataset
reference number
misc
setfillmode() set the fill mode for all the datasets in
the file
SDAttr The SDAttr class defines an attribute, either at the file (SD),
dataset (SDS) or dimension (SDim) level. The class encapsulates
the object to which the attribute is attached, and the attribute
name.
To create an SDAttr instance, obtain an instance for an SD (file),
SDS (dataset) or dimension (SDim) object, and call its attr()
method.
NOTE. An attribute can also be read/written like
a python class attribute, using the familiar
dot notation. See "High level attribute access".
methods:
read/write value
get() get the attribute value
set() set the attribute value
inquiry
index() get the attribute index number
info() get the attribute name, type and number of
values
SDC The SDC class holds contants defining file opening modes and
data types. Constants are named after their C API counterparts.
file opening modes:
SDC.CREATE create file if non existent
SDC.READ read-only mode
SDC.TRUNC truncate file if already exists
SDC.WRITE read-write mode
data types:
SDC.CHAR 8-bit character
SDC.CHAR8 8-bit character
SDC.UCHAR unsigned 8-bit integer
SDC.UCHAR8 unsigned 8-bit integer
SDC.INT8 signed 8-bit integer
SDC.UINT8 unsigned 8-bit integer
SDC.INT16 signed 16-bit integer
SDC.UINT16 unsigned 16-bit intege
SDC.INT32 signed 32-bit integer
SDC.UINT32 unsigned 32-bit integer
SDC.FLOAT32 32-bit floating point
SDC.FLOAT64 64-bit floaring point
dataset fill mode:
SDC.FILL
SDC.NOFILL
dimension:
SDC.UNLIMITED dimension can grow dynamically
data compression:
SDC.COMP_NONE
SDC.COMP_RLE
SDC.COMP_NBIT
SDC.COMP_SKPHUFF
SDC.COMP_DEFLATE
SDC.COMP_SZIP
SDC.COMP_SZIP_EC
SDC.COMP_SZIP_NN
SDC.COMP_SZIP_RAW
SDS The SDS class implements an HDF scientific dataset (SDS) object.
To create an SDS instance, call the create() or select() methods
of an SD instance.
methods:
constructors
attr() create an SDAttr (attribute) instance to access
an existing dataset attribute or create a
new one; "dot notation" can also be used to get
and set an attribute
dim() return an SDim (dimension) instance for a given
dataset dimension, given the dimension index
number
dataset closing
endaccess() terminate access to the dataset
inquiry
attributes() return a dictionnary describing every
attribute defined on the dataset
checkempty() determine whether the dataset is empty
dimensions() return a dictionnary describing all the
dataset dimensions
info() get the dataset name, rank, dimension lengths,
data type and number of attributes
iscoordvar() determine whether the dataset is a coordinate
variable (holds a dimension scale)
isrecord() determine whether the dataset is appendable
(the dataset dimension 0 is unlimited)
ref() get the dataset reference number
reading/writing data values
get() read data from the dataset
set() write data to the dataset
A dataset can also be read/written using the
familiar index and slice notation used to
access python sequences. See "High level
variable access".
reading/writing standard attributes
getcal() get the dataset calibration coefficients:
scale_factor, scale_factor_err, add_offset,
add_offset_err, calibrated_nt
getdatastrs() get the dataset standard string attributes:
long_name, units, format, coordsys
getfillvalue() get the dataset fill value:
_FillValue
getrange() get the dataset min and max values:
valid_range
setcal() set the dataset calibration coefficients
setdatastrs() set the dataset standard string attributes
setfillvalue() set the dataset fill value
setrange() set the dataset min and max values
compression
getcompress() get info about the dataset compression type and mode
setcompress() set the dataset compression type and mode
misc
setexternalfile() store the dataset in an external file
SDim The SDdim class implements a dimension object.
To create an SDim instance, call the dim() method of an SDS
(dataset) instance.
Methods:
constructors
attr() create an SDAttr (attribute) instance to access
an existing dimension attribute or create a
new one; "dot notation" can also be used to
get and set an attribute
inquiry
attributes() return a dictionnary describing every
attribute defined on the dimension
info() get the dimension name, length, scale data type
and number of attributes
length() return the current dimension length
reading/writing dimension data
getscale() get the dimension scale values
setname() set the dimension name
setscale() set the dimension scale values
reading/writing standard attributes
getstrs() get the dimension standard string attributes:
long_name, units, format
setstrs() set the dimension standard string attributes
Data types
----------
Data types come into play when first defining datasets and their attributes,
and later when querying the definition of those objects.
Data types are specified using the symbolic constants defined inside the
SDC class of the SD module.
- CHAR and CHAR8 (equivalent): an 8-bit character.
- UCHAR, UCHAR8 and UINT8 (equivalent): unsigned 8-bit values (0 to 255)
- INT8: signed 8-bit values (-128 to 127)
- INT16: signed 16-bit values
- UINT16: unsigned 16 bit values
- INT32: signed 32 bit values
- UINT32: unsigned 32 bit values
- FLOAT32: 32 bit floating point values (C floats)
- FLOAT64: 64 bit floating point values (C doubles)
There is no explicit "string" type. To simulate a string, set the
type to CHAR, and set the length to a value of 'n' > 1. This creates and
"array of characters", close to a string (except that strings will always
be of length 'n', right-padded with spaces if necessary).
Programming models
------------------
Writing
^^^^^^^
The following code can be used as a model to create an SD dataset.
It shows how to use the most important functionnalities
of the SD interface needed to initialize a dataset.
A real program should of course add error handling::
# Import SD and numpy.
from pyhdf.SD import *
from numpy import *
fileName = 'template.hdf'
# Create HDF file.
hdfFile = SD(fileName ,SDC.WRITE|SDC.CREATE)
# Assign a few attributes at the file level
hdfFile.author = 'It is me...'
hdfFile.priority = 2
# Create a dataset named 'd1' to hold a 3x3 float array.
d1 = hdfFile.create('d1', SDC.FLOAT32, (3,3))
# Set some attributs on 'd1'
d1.description = 'Sample 3x3 float array'
d1.units = 'celsius'
# Name 'd1' dimensions and assign them attributes.
dim1 = d1.dim(0)
dim2 = d1.dim(1)
dim1.setname('width')
dim2.setname('height')
dim1.units = 'm'
dim2.units = 'cm'
# Assign values to 'd1'
d1[0] = (14.5, 12.8, 13.0) # row 1
d1[1:] = ((-1.3, 0.5, 4.8), # row 2 and
(3.1, 0.0, 13.8)) # row 3
# Close dataset
d1.endaccess()
# Close file
hdfFile.end()
Reading
^^^^^^^
The following code, which reads the dataset created above, can also serve as
a model for any program which needs to access an SD dataset::
# Import SD and numpy.
from pyhdf.SD import *
from numpy import *
fileName = 'template.hdf'
# Open file in read-only mode (default)
hdfFile = SD(fileName)
# Display attributes.
print "file:", fileName
print "author:", hdfFile.author
print "priority:", hdfFile.priority
# Open dataset 'd1'
d1 = hdfFile.select('d1')
# Display dataset attributes.
print "dataset:", 'd1'
print "description:",d1.description
print "units:", d1.units
# Display dimensions info.
dim1 = d1.dim(0)
dim2 = d1.dim(1)
print "dimensions:"
print "dim1: name=", dim1.info()[0],
print "length=", dim1.length(),
print "units=", dim1.units
print "dim2: name=", dim2.info()[0],
print "length=", dim2.length(),
print "units=", dim2.units
# Show dataset values
print d1[:]
# Close dataset
d1.endaccess()
# Close file
hdfFile.end()
Examples
--------
Example-1
^^^^^^^^^
The following simple example exercises some important pyhdf.SD methods. It
shows how to create an HDF dataset, define attributes and dimensions,
create variables, and assign their contents.
Suppose we have a series of text files each defining a 2-dimensional real-
valued matrix. First line holds the matrix dimensions, and following lines
hold matrix values, one row per line. The following procedure will load
into an HDF dataset the contents of any one of those text files. The
procedure computes the matrix min and max values, storing them as
dataset attributes. It also assigns to the variable the group of
attributes passed as a dictionnary by the calling program. Note how simple
such an assignment becomes with pyhdf: the dictionnary can contain any
number of attributes, of different types, single or multi-valued. Doing
the same in a conventional language would be a much more challenging task.
Error checking is minimal, to keep example as simple as possible
(admittedly a rather poor excuse ...)::
from numpy import *
from pyhdf.SD import *
import os
def txtToHDF(txtFile, hdfFile, varName, attr):
try: # Catch pyhdf errors
# Open HDF file in update mode, creating it if non existent.
d = SD(hdfFile, SDC.WRITE|SDC.CREATE)
# Open text file and get matrix dimensions on first line.
txt = open(txtFile)
ni, nj = map(int, txt.readline().split())
# Define an HDF dataset of 32-bit floating type (SDC.FLOAT32)
# with those dimensions.
v = d.create(varName, SDC.FLOAT32, (ni, nj))
# Assign attributes passed as argument inside dict 'attr'.
for attrName in attr.keys():
setattr(v, attrName, attr[attrName])
# Load variable with lines of data. Compute min and max
# over the whole matrix.
i = 0
while i < ni:
elems = map(float, txt.readline().split())
v[i] = elems # load row i
minE = min(elems)
maxE = max(elems)
if i:
minVal = min(minVal, minE)
maxVal = max(maxVal, maxE)
else:
minVal = minE
maxVal = maxE
i += 1
# Set variable min and max attributes.
v.minVal = minVal
v.maxVal = maxVal
# Close dataset and file objects (not really necessary, since
# closing is automatic when objects go out of scope.
v.endaccess()
d.end()
txt.close()
except HDF4Error, msg:
print "HDF4Error:", msg
We could now call the procedure as follows::
hdfFile = 'table.hdf'
try: # Delete if exists.
os.remove(hdfFile)
except:
pass
# Load contents of file 'temp.txt' into dataset 'temperature'
# an assign the attributes 'title', 'units' and 'valid_range'.
txtToHDF('temp.txt', hdfFile, 'temperature',
{'title' : 'temperature matrix',
'units' : 'celsius',
'valid_range': (-2.8,27.0)})
# Load contents of file 'depth.txt' into dataset 'depth'
# and assign the same attributes as above.
txtToHDF('depth.txt', hdfFile, 'depth',
{'title' : 'depth matrix',
'units' : 'meters',
'valid_range': (0, 500.0)})
Example 2
^^^^^^^^^
This example shows a usefull python program that will display the
structure of the SD component of any HDF file whose name is given on
the command line. After the HDF file is opened, high level inquiry methods
are called to obtain dictionnaries descrybing attributes, dimensions and
datasets. The rest of the program mostly consists in nicely formatting
the contents of those dictionaries::
import sys
from pyhdf.SD import *
from numpy import *
# Dictionnary used to convert from a numeric data type to its symbolic
# representation
typeTab = {
SDC.CHAR: 'CHAR',
SDC.CHAR8: 'CHAR8',
SDC.UCHAR8: 'UCHAR8',
SDC.INT8: 'INT8',
SDC.UINT8: 'UINT8',
SDC.INT16: 'INT16',
SDC.UINT16: 'UINT16',
SDC.INT32: 'INT32',
SDC.UINT32: 'UINT32',
SDC.FLOAT32: 'FLOAT32',
SDC.FLOAT64: 'FLOAT64'
}
printf = sys.stdout.write
def eol(n=1):
printf("%s" % chr(10) * n)
hdfFile = sys.argv[1] # Get first command line argument
try: # Catch pyhdf.SD errors
# Open HDF file named on the command line
f = SD(hdfFile)
# Get global attribute dictionnary
attr = f.attributes(full=1)
# Get dataset dictionnary
dsets = f.datasets()
# File name, number of attributes and number of variables.
printf("FILE INFO"); eol()
printf("-------------"); eol()
printf("%-25s%s" % ("File:", hdfFile)); eol()
printf("%-25s%d" % (" file attributes:", len(attr))); eol()
printf("%-25s%d" % (" datasets:", len(dsets))); eol()
eol();
# Global attribute table.
if len(attr) > 0:
printf("File attributes"); eol(2)
printf(" name idx type len value"); eol()
printf(" -------------------- --- ------- --- -----"); eol()
# Get list of attribute names and sort them lexically
attNames = attr.keys()
attNames.sort()
for name in attNames:
t = attr[name]
# t[0] is the attribute value
# t[1] is the attribute index number
# t[2] is the attribute type
# t[3] is the attribute length
printf(" %-20s %3d %-7s %3d %s" %
(name, t[1], typeTab[t[2]], t[3], t[0])); eol()
eol()
# Dataset table
if len(dsets) > 0:
printf("Datasets (idx:index num, na:n attributes, cv:coord var)"); eol(2)
printf(" name idx type na cv dimension(s)"); eol()
printf(" -------------------- --- ------- -- -- ------------"); eol()
# Get list of dataset names and sort them lexically
dsNames = dsets.keys()
dsNames.sort()
for name in dsNames:
# Get dataset instance
ds = f.select(name)
# Retrieve the dictionary of dataset attributes so as
# to display their number
vAttr = ds.attributes()
t = dsets[name]
# t[0] is a tuple of dimension names
# t[1] is a tuple of dimension lengths
# t[2] is the dataset type
# t[3] is the dataset index number
printf(" %-20s %3d %-7s %2d %-2s " %
(name, t[3], typeTab[t[2]], len(vAttr),
ds.iscoordvar() and 'X' or ''))
# Display dimension info.
n = 0
for d in t[0]:
printf("%s%s(%d)" % (n > 0 and ', ' or '', d, t[1][n]))
n += 1
eol()
eol()
# Dataset info.
if len(dsNames) > 0:
printf("DATASET INFO"); eol()
printf("-------------"); eol(2)
for name in dsNames:
# Access the dataset
dsObj = f.select(name)
# Get dataset attribute dictionnary
dsAttr = dsObj.attributes(full=1)
if len(dsAttr) > 0:
printf("%s attributes" % name); eol(2)
printf(" name idx type len value"); eol()
printf(" -------------------- --- ------- --- -----"); eol()
# Get the list of attribute names and sort them alphabetically.
attNames = dsAttr.keys()
attNames.sort()
for nm in attNames:
t = dsAttr[nm]
# t[0] is the attribute value
# t[1] is the attribute index number
# t[2] is the attribute type
# t[3] is the attribute length
printf(" %-20s %3d %-7s %3d %s" %
(nm, t[1], typeTab[t[2]], t[3], t[0])); eol()
eol()
# Get dataset dimension dictionnary
dsDim = dsObj.dimensions(full=1)
if len(dsDim) > 0:
printf ("%s dimensions" % name); eol(2)
printf(" name idx len unl type natt");eol()
printf(" -------------------- --- ----- --- ------- ----");eol()
# Get the list of dimension names and sort them alphabetically.
dimNames = dsDim.keys()
dimNames.sort()
for nm in dimNames:
t = dsDim[nm]
# t[0] is the dimension length
# t[1] is the dimension index number
# t[2] is 1 if the dimension is unlimited, 0 if not
# t[3] is the the dimension scale type, 0 if no scale
# t[4] is the number of attributes
printf(" %-20s %3d %5d %s %-7s %4d" %
(nm, t[1], t[0], t[2] and "X" or " ",
t[3] and typeTab[t[3]] or "", t[4])); eol()
eol()
except HDF4Error, msg:
print "HDF4Error", msg
"""
import os, sys, types
from . import hdfext as _C
from .six.moves import xrange
from .error import _checkErr, HDF4Error
# List of names we want to be imported by an "from pyhdf.SD import *"
# statement
__all__ = ['SD', 'SDAttr', 'SDC', 'SDS', 'SDim', 'HDF4Error']
try:
import numpy as _toto
del _toto
except ImportError:
raise HDF4Error("numpy package required but not installed")
class SDC(object):
"""The SDC class holds contants defining opening modes and data types.
file opening modes:
========== === ===============================
SDC.CREATE 4 create file if non existent
SDC.READ 1 read-only mode
SDC.TRUNC 256 truncate file if already exists
SDC.WRITE 2 read-write mode
========== === ===============================
data types:
=========== === ===============================
SDC.CHAR 4 8-bit character
SDC.CHAR8 4 8-bit character
SDC.UCHAR 3 unsigned 8-bit integer
SDC.UCHAR8 3 unsigned 8-bit integer
SDC.INT8 20 signed 8-bit integer
SDC.UINT8 21 unsigned 8-bit integer
SDC.INT16 22 signed 16-bit integer
SDC.UINT16 23 unsigned 16-bit intege
SDC.INT32 24 signed 32-bit integer
SDC.UINT32 25 unsigned 32-bit integer
SDC.FLOAT32 5 32-bit floating point
SDC.FLOAT64 6 64-bit floaring point
=========== === ===============================
dataset fill mode:
=========== ===
SDC.FILL 0
SDC.NOFILL 256
=========== ===
dimension:
============= === ===============================
SDC.UNLIMITED 0 dimension can grow dynamically
============= === ===============================
data compression:
================= ===
SDC.COMP_NONE 0
SDC.COMP_RLE 1
SDC.COMP_NBIT 2
SDC.COMP_SKPHUFF 3
SDC.COMP_DEFLATE 4
SDC.COMP_SZIP 5
SDC.COMP_SZIP_EC 4
SDC.COMP_SZIP_NN 32
SDC.COMP_SZIP_RAW 128
================= ===
"""
CREATE = _C.DFACC_CREATE
READ = _C.DFACC_READ
TRUNC = 0x100 # specific to pyhdf
WRITE = _C.DFACC_WRITE
CHAR = _C.DFNT_CHAR8
CHAR8 = _C.DFNT_CHAR8
UCHAR = _C.DFNT_UCHAR8
UCHAR8 = _C.DFNT_UCHAR8
INT8 = _C.DFNT_INT8
UINT8 = _C.DFNT_UINT8
INT16 = _C.DFNT_INT16
UINT16 = _C.DFNT_UINT16
INT32 = _C.DFNT_INT32
UINT32 = _C.DFNT_UINT32
FLOAT32 = _C.DFNT_FLOAT32
FLOAT64 = _C.DFNT_FLOAT64
FILL = _C.SD_FILL
NOFILL = _C.SD_NOFILL
UNLIMITED = _C.SD_UNLIMITED
COMP_NONE = _C.COMP_CODE_NONE
COMP_RLE = _C.COMP_CODE_RLE
COMP_NBIT = _C.COMP_CODE_NBIT
COMP_SKPHUFF = _C.COMP_CODE_SKPHUFF
COMP_DEFLATE = _C.COMP_CODE_DEFLATE
COMP_SZIP = _C.COMP_CODE_SZIP
COMP_SZIP_EC = 4
COMP_SZIP_NN = 32
COMP_SZIP_RAW = 128
# Types with an equivalent in the numpy package
# NOTE:
# CHAR8 and INT8 are handled similarly (signed byte -128,...,0,...127)
# UCHAR8 and UINT8 are treated equivalently (unsigned byte: 0,1,...,255)
# UINT16 and UINT32 are supported
# INT64 and UINT64 are not yet supported py pyhdf
equivNumericTypes = [FLOAT32, FLOAT64,
INT8, UINT8,
INT16, UINT16,
INT32, UINT32,
CHAR8, UCHAR8]
class SDAttr(object):
def __init__(self, obj, index_or_name):
"""Init an SDAttr instance. Should not be called directly by
the user program. An SDAttr instance must be created through
the attr() methods of the SD, SDS or SDim classes.
"""
# Args
# obj object instance to which the attribute refers
# (SD, SDS, SDDim)
# index_or_name attribute index or name
#
# Class private attributes:
# _obj object instance
# _index attribute index or None
# _name attribute name or None
self._obj = obj
# Name is given, may exist or not.
if isinstance(index_or_name, type('')):
self._name = index_or_name
self._index = None
# Index is given. Must exist.
else:
self._index = index_or_name
status, self._name, data_type, n_values = \
_C.SDattrinfo(self._obj._id, self._index)
_checkErr('set', status, 'illegal attribute index')
def info(self):
"""Retrieve info about the attribute : name, data type and
number of values.
Args::
no argument
Returns::
3-element tuple holding:
- attribute name
- attribute data type (see constants SDC.xxx)
- number of values in the attribute; for a string-valued
attribute (data type SDC.CHAR8), the number of values
corresponds to the string length
C library equivalent : SDattrinfo
"""
if self._index is None:
try:
self._index = self._obj.findattr(self._name)
except HDF4Error:
raise HDF4Error("info: cannot convert name to index")
status, self._name, data_type, n_values = \
_C.SDattrinfo(self._obj._id, self._index)
_checkErr('info', status, 'illegal attribute index')
return self._name, data_type, n_values
def index(self):
"""Retrieve the attribute index number.
Args::
no argument
Returns::
attribute index number (starting at 0)
C library equivalent : SDfindattr
"""
self._index = _C.SDfindattr(self._obj._id, self._name)
_checkErr('find', self._index, 'illegal attribute name')
return self._index
def get(self):
"""Retrieve the attribute value.
Args::
no argument
Returns::
attribute value(s); a list is returned if the attribute
is made up of more than one value, except in the case of a
string-valued attribute (data type SDC.CHAR8) where the
values are returned as a string
C library equivalent : SDreadattr
Attributes can also be read like ordinary python attributes,
using the dot notation. See "High level attribute access".
"""
if self._index is None:
try:
self._index = self._obj.findattr(self._name)
except HDF4Error:
raise HDF4Error("get: cannot convert name to index")
# Obtain attribute type and the number of values.
status, self._name, data_type, n_values = \
_C.SDattrinfo(self._obj._id, self._index)
_checkErr('read', status, 'illegal attribute index')
# Get attribute value.
convert = _array_to_ret
if data_type == SDC.CHAR8:
buf = _C.array_byte(n_values)
convert = _array_to_str
elif data_type in [SDC.UCHAR8, SDC.UINT8]:
buf = _C.array_byte(n_values)
elif data_type == SDC.INT8:
buf = _C.array_int8(n_values)
elif data_type == SDC.INT16:
buf = _C.array_int16(n_values)
elif data_type == SDC.UINT16:
buf = _C.array_uint16(n_values)
elif data_type == SDC.INT32:
buf = _C.array_int32(n_values)
elif data_type == SDC.UINT32:
buf = _C.array_uint32(n_values)
elif data_type == SDC.FLOAT32:
buf = _C.array_float32(n_values)
elif data_type == SDC.FLOAT64:
buf = _C.array_float64(n_values)
else:
raise HDF4Error("read: attribute index %d has an "\
"illegal or unupported type %d" % \
(self._index, data_type))
status = _C.SDreadattr(self._obj._id, self._index, buf)
_checkErr('read', status, 'illegal attribute index')
return convert(buf, n_values)
def set(self, data_type, values):
"""Update/Create a new attribute and set its value(s).
Args::
data_type : attribute data type (see constants SDC.xxx)
values : attribute value(s); specify a list to create
a multi-valued attribute; a string valued
attribute can be created by setting 'data_type'
to SDC.CHAR8 and 'values' to the corresponding
string
Returns::
None
C library equivalent : SDsetattr
Attributes can also be written like ordinary python attributes,
using the dot notation. See "High level attribute access".
"""
try:
n_values = len(values)
except:
n_values = 1
values = [values]
if data_type == SDC.CHAR8:
buf = _C.array_byte(n_values)
# Allow values to be passed as a string.
# Noop if a list is passed.
values = list(values)
for n in range(n_values):
values[n] = ord(values[n])
elif data_type in [SDC.UCHAR8, SDC.UINT8]:
buf = _C.array_byte(n_values)
elif data_type == SDC.INT8:
# SWIG refuses negative values here. We found that if we
# pass them as byte values, it will work.
buf = _C.array_int8(n_values)
values = list(values)
for n in range(n_values):
v = values[n]
if v >= 0:
v &= 0x7f
else:
v = abs(v) & 0x7f
if v:
v = 256 - v
else:
v = 128 # -128 in 2s complement
values[n] = v
elif data_type == SDC.INT16:
buf = _C.array_int16(n_values)
elif data_type == SDC.UINT16:
buf = _C.array_uint16(n_values)
elif data_type == SDC.INT32:
buf = _C.array_int32(n_values)
elif data_type == SDC.UINT32:
buf = _C.array_uint32(n_values)
elif data_type == SDC.FLOAT32:
buf = _C.array_float32(n_values)
elif data_type == SDC.FLOAT64:
buf = _C.array_float64(n_values)
else:
raise HDF4Error("set: illegal or unimplemented data_type")
for n in range(n_values):
buf[n] = values[n]
status = _C.SDsetattr(self._obj._id, self._name,
data_type, n_values, buf)
_checkErr('set', status, 'illegal attribute')
# Init index following attribute creation.
self._index = _C.SDfindattr(self._obj._id, self._name)
_checkErr('find', self._index, 'illegal attribute')
class SD(object):
"""The SD class implements an HDF SD interface.
To instantiate an SD class, call the SD() constructor.
To set attributes on an SD instance, call the SD.attr()
method to create an attribute instance, then call the methods
of this instance. """
def __init__(self, path, mode=SDC.READ):
"""SD constructor. Initialize an SD interface on an HDF file,
creating the file if necessary.
Args::
path name of the HDF file on which to open the SD interface
mode file opening mode; this mode is a set of binary flags
which can be ored together
SDC.CREATE combined with SDC.WRITE to create file
if it does not exist
SDC.READ open file in read-only access (default)
SDC.TRUNC if combined with SDC.WRITE, overwrite
file if it already exists
SDC.WRITE open file in read-write mode; if file
exists it is updated, unless SDC.TRUNC is
set, in which case it is erased and
recreated; if file does not exist, an
error is raised unless SDC.CREATE is set,
in which case the file is created
Note an important difference in the way CREATE is
handled by the C library and the pyhdf package.
For the C library, CREATE indicates that a new file
should always be created, overwriting an existing one if
any. For pyhdf, CREATE indicates a new file should be
created only if it does not exist, and the overwriting
of an already existing file must be explicitly asked
for by setting the TRUNC flag.
Those differences were introduced so as to harmonize
the way files are opened in the pycdf and pyhdf
packages. Also, this solves a limitation in the
hdf (and netCDF) library, where there is no easy way
to implement the frequent requirement that an existent
file be opened in read-write mode, or created
if it does not exist.
Returns::
an SD instance
C library equivalent : SDstart
"""
# Private attributes:
# _id: file id
# Make sure _id is initialized in case __del__ is called
# when the SD object goes out of scope after failing to
# open file. Failure to do so may put python into an infinite loop
# (thanks to Richard.Andrews@esands.com for reporting this bug).
self._id = None
# See if file exists.
exists = os.path.exists(path)
# We must have either WRITE or READ flag.
if SDC.WRITE & mode:
if exists:
if SDC.TRUNC & mode:
try:
os.remove(path)
except Exception as msg:
raise HDF4Error(msg)
mode = SDC.CREATE|SDC.WRITE
else:
mode = SDC.WRITE
else:
if SDC.CREATE & mode:
mode |= SDC.WRITE
else:
raise HDF4Error("SD: no such file")
elif SDC.READ & mode:
if exists:
mode = SDC.READ
else:
raise HDF4Error("SD: no such file")
else:
raise HDF4Error("SD: bad mode, READ or WRITE must be set")
id = _C.SDstart(path, mode)
_checkErr('SD', id, "cannot open %s" % path)
self._id = id
def __del__(self):
"""Delete the instance, first calling the end() method
if not already done. """
try:
if self._id:
self.end()
except:
pass
def __getattr__(self, name):
# Get value(s) of SD attribute 'name'.
return _getattr(self, name)
def __setattr__(self, name, value):
# Set value(s) of SD attribute 'name'.
# A name starting with an underscore will be treated as
# a standard python attribute, and as an HDF attribute
# otherwise.
_setattr(self, name, value, ['_id'])
def end(self):
"""End access to the SD interface and close the HDF file.
Args::
no argument
Returns::
None
The instance should not be used afterwards.
The 'end()' method is implicitly called when the
SD instance is deleted.
C library equivalent : SDend
"""
status = _C.SDend(self._id)
_checkErr('end', status, "cannot execute")
self._id = None
def info(self):
"""Retrieve information about the SD interface.
Args::
no argument
Returns::
2-element tuple holding:
number of datasets inside the file
number of file attributes
C library equivalent : SDfileinfo
"""
status, n_datasets, n_file_attrs = _C.SDfileinfo(self._id)
_checkErr('info', status, "cannot execute")
return n_datasets, n_file_attrs
def nametoindex(self, sds_name):
"""Return the index number of a dataset given the dataset name.
Args::
sds_name : dataset name
Returns::
index number of the dataset
C library equivalent : SDnametoindex
"""
sds_idx = _C.SDnametoindex(self._id, sds_name)
_checkErr('nametoindex', sds_idx, 'non existent SDS')
return sds_idx
def reftoindex(self, sds_ref):
"""Returns the index number of a dataset given the dataset
reference number.
Args::
sds_ref : dataset reference number
Returns::
dataset index number
C library equivalent : SDreftoindex
"""
sds_idx = _C.SDreftoindex(self._id, sds_ref)
_checkErr('reftoindex', sds_idx, 'illegal SDS ref number')
return sds_idx
def setfillmode(self, fill_mode):
"""Set the fill mode for all the datasets in the file.
Args::
fill_mode : fill mode; one of :
SDC.FILL write the fill value to all the datasets
of the file by default
SDC.NOFILL do not write fill values to all datasets
of the file by default
Returns::
previous fill mode value
C library equivalent: SDsetfillmode
"""
if not fill_mode in [SDC.FILL, SDC.NOFILL]:
raise HDF4Error("bad fill mode")
old_mode = _C.SDsetfillmode(self._id, fill_mode)
_checkErr('setfillmode', old_mode, 'cannot execute')
return old_mode
def create(self, name, data_type, dim_sizes):
"""Create a dataset.
Args::
name dataset name
data_type type of the data, set to one of the SDC.xxx
constants;
dim_sizes lengths of the dataset dimensions; a one-
dimensional array is specified with an integer,
an n-dimensional array with an n-element sequence
of integers; the length of the first dimension can
be set to SDC.UNLIMITED to create an unlimited
dimension (a "record" variable).
IMPORTANT: netCDF and HDF differ in the way
the UNLIMITED dimension is handled. In netCDF,
all variables of a dataset with an unlimited
dimension grow in sync, eg adding a record to
a variable will implicitly extend other record
variables. In HDF, each record variable grows
independently of each other.
Returns::
SDS instance for the dataset
C library equivalent : SDcreate
"""
# Validate args.
if isinstance(dim_sizes, type(1)): # allow k instead of [k]
# for a 1-dim arr
dim_sizes = [dim_sizes]
rank = len(dim_sizes)
buf = _C.array_int32(rank)
for n in range(rank):
buf[n] = dim_sizes[n]
id = _C.SDcreate(self._id, name, data_type, rank, buf)
_checkErr('CREATE', id, "cannot execute")
return SDS(self, id)
def select(self, name_or_index):
"""Locate a dataset.
Args::
name_or_index dataset name or index number
Returns::
SDS instance for the dataset
C library equivalent : SDselect
"""
if isinstance(name_or_index, type(1)):
idx = name_or_index
else:
try:
idx = self.nametoindex(name_or_index)
except HDF4Error:
raise HDF4Error("select: non-existent dataset")
id = _C.SDselect(self._id, idx)
_checkErr('select', id, "cannot execute")
return SDS(self, id)
def attr(self, name_or_index):
"""Create an SDAttr instance representing a global
attribute (defined at the level of the SD interface).
Args::
name_or_index attribute name or index number; if a name is
given, the attribute may not exist; in that
case, it will be created when the SDAttr
instance set() method is called
Returns::
SDAttr instance for the attribute. Call the methods of this
class to query, read or set the attribute.
C library equivalent : no equivalent
"""
return SDAttr(self, name_or_index)
def attributes(self, full=0):
"""Return a dictionnary describing every global
attribute attached to the SD interface.
Args::
full true to get complete info about each attribute
false to report only each attribute value
Returns::
Empty dictionnary if no global attribute defined
Otherwise, dictionnary where each key is the name of a
global attribute. If parameter 'full' is false,
key value is the attribute value. If 'full' is true,
key value is a tuple with the following elements:
- attribute value
- attribute index number
- attribute type
- attribute length
C library equivalent : no equivalent
"""
# Get the number of global attributes.
nsds, natts = self.info()
# Inquire each attribute
res = {}
for n in range(natts):
a = self.attr(n)
name, aType, nVal = a.info()
if full:
res[name] = (a.get(), a.index(), aType, nVal)
else:
res[name] = a.get()
return res
def datasets(self):
"""Return a dictionnary describing all the file datasets.
Args::
no argument
Returns::
Empty dictionnary if no dataset is defined.
Otherwise, dictionnary whose keys are the file dataset names,
and values are tuples describing the corresponding datasets.
Each tuple holds the following elements in order:
- tuple holding the names of the dimensions defining the
dataset coordinate axes
- tuple holding the dataset shape (dimension lengths);
if a dimension is unlimited, the reported length corresponds
to the dimension current length
- dataset type
- dataset index number
C library equivalent : no equivalent
"""
# Get number of datasets
nDs = self.info()[0]
# Inquire each var
res = {}
for n in range(nDs):
# Get dataset info.
v = self.select(n)
vName, vRank, vLen, vType, vAtt = v.info()
if vRank < 2: # need a sequence
vLen = [vLen]
# Get dimension info.
dimNames = []
dimLengths = []
for dimNum in range(vRank):
d = v.dim(dimNum)
dimNames.append(d.info()[0])
dimLengths.append(vLen[dimNum])
res[vName] = (tuple(dimNames), tuple(dimLengths),
vType, n)
return res
class SDS(object):
"""The SDS class implements an HDF dataset object.
To create an SDS instance, call the create() or select()
methods of the SD class. To set attributes on an SDS instance,
call the SDS.attr() method to create an attribute instance,
then call the methods of this instance. Attributes can also be
set using the "dot notation". """
def __init__(self, sd, id):
"""This constructor should not be called by the user program.
Call the SD.create() and SD.select() methods instead.
"""
# Args
# sd : SD instance
# id : SDS identifier
# Private attributes
# _sd SD intance
# _id SDS identifier
self._sd = sd
self._id = id
def __del__(self):
# Delete the instance, first calling the endaccess() method
# if not already done.
try:
if self._id:
self.endaccess()
except:
pass
def __getattr__(self, name):
# Get value(s) of SDS attribute 'name'.
return _getattr(self, name)
def __setattr__(self, name, value):
# Set value(s) of SDS attribute 'name'.
_setattr(self, name, value, ['_sd', '_id'])
def __len__(self): # Needed for slices like "-2:" but why ?
return 0
def __getitem__(self, elem):
# This special method is used to index the SDS dataset
# using the "extended slice syntax". The extended slice syntax
# is a perfect match for the "start", "count" and "stride"
# arguments to the SDreaddara() function, and is much more easy
# to use.
# Compute arguments to 'SDreaddata_0()'.
start, count, stride = self.__buildStartCountStride(elem)
# Get elements.
return self.get(start, count, stride)
def __setitem__(self, elem, data):
# This special method is used to assign to the SDS dataset
# using "extended slice syntax". The extended slice syntax
# is a perfect match for the "start", "count" and "stride"
# arguments to the SDwritedata() function, and is much more easy
# to use.
# Compute arguments to 'SDwritedata_0()'.
start, count, stride = self.__buildStartCountStride(elem)
# A sequence type is needed. Convert a single number to a list.
if type(data) in [int, float]:
data = [data]
# Assign.
self.set(data, start, count, stride)
def endaccess(self):
"""Terminates access to the SDS.
Args::
no argument
Returns::
None.
The SDS instance should not be used afterwards.
The 'endaccess()' method is implicitly called when
the SDS instance is deleted.
C library equivalent : SDendaccess
"""
status = _C.SDendaccess(self._id)
_checkErr('endaccess', status, "cannot execute")
self._id = None # Invalidate identifier
def dim(self, dim_index):
"""Get an SDim instance given a dimension index number.
Args::
dim_index index number of the dimension (numbering starts at 0)
C library equivalent : SDgetdimid
"""
id = _C.SDgetdimid(self._id, dim_index)
_checkErr('dim', id, 'invalid SDS identifier or dimension index')
return SDim(self, id, dim_index)
def get(self, start=None, count=None, stride=None):
"""Read data from the dataset.
Args::
start : indices where to start reading in the data array;
default to 0 on all dimensions
count : number of values to read along each dimension;
default to the current length of all dimensions
stride : sampling interval along each dimension;
default to 1 on all dimensions
For n-dimensional datasets, those 3 parameters are entered
using lists. For one-dimensional datasets, integers
can also be used.
Note that, to read the whole dataset contents, one should
simply call the method with no argument.
Returns::
numpy array initialized with the data.
C library equivalent : SDreaddata
The dataset can also be read using the familiar indexing and
slicing notation, like ordinary python sequences.
See "High level variable access".
"""
# Obtain SDS info.
try:
sds_name, rank, dim_sizes, data_type, n_attrs = self.info()
if isinstance(dim_sizes, type(1)):
dim_sizes = [dim_sizes]
except HDF4Error:
raise HDF4Error('get : cannot execute')
# Validate args.
if start is None:
start = [0] * rank
elif isinstance(start, type(1)):
start = [start]
if count is None:
count = dim_sizes
if count[0] == 0:
count[0] = 1
elif isinstance(count, type(1)):
count = [count]
if stride is None:
stride = [1] * rank
elif isinstance(stride, type(1)):
stride = [stride]
if len(start) != rank or len(count) != rank or len(stride) != rank:
raise HDF4Error('get : start, stride or count ' \
'do not match SDS rank')
for n in range(rank):
if start[n] < 0 or start[n] + \
(abs(count[n]) - 1) * stride[n] >= dim_sizes[n]:
raise HDF4Error('get arguments violate ' \
'the size (%d) of dimension %d' \
% (dim_sizes[n], n))
if not data_type in SDC.equivNumericTypes:
raise HDF4Error('get cannot currrently deal with '\
'the SDS data type')
return _C._SDreaddata_0(self._id, data_type, start, count, stride)
def set(self, data, start=None, count=None, stride=None):
"""Write data to the dataset.
Args::
data : array of data to write; can be given as a numpy
array, or as Python sequence (whose elements can be
imbricated sequences)
start : indices where to start writing in the dataset;
default to 0 on all dimensions
count : number of values to write along each dimension;
default to the current length of dataset dimensions
stride : sampling interval along each dimension;
default to 1 on all dimensions
For n-dimensional datasets, those 3 parameters are entered
using lists. For one-dimensional datasets, integers
can also be used.
Note that, to write the whole dataset at once, one has simply
to call the method with the dataset values in parameter
'data', omitting all other parameters.
Returns::
None.
C library equivalent : SDwritedata
The dataset can also be written using the familiar indexing and
slicing notation, like ordinary python sequences.
See "High level variable access".
"""
# Obtain SDS info.
try:
sds_name, rank, dim_sizes, data_type, n_attrs = self.info()
if isinstance(dim_sizes, type(1)):
dim_sizes = [dim_sizes]
except HDF4Error:
raise HDF4Error('set : cannot execute')
# Validate args.
if start is None:
start = [0] * rank
elif isinstance(start, type(1)):
start = [start]
if count is None:
count = dim_sizes
if count[0] == 0:
count[0] = 1
elif isinstance(count, type(1)):
count = [count]
if stride is None:
stride = [1] * rank
elif isinstance(stride, type(1)):
stride = [stride]
if len(start) != rank or len(count) != rank or len(stride) != rank:
raise HDF4Error('set : start, stride or count '\
'do not match SDS rank')
unlimited = self.isrecord()
for n in range(rank):
ok = 1
if start[n] < 0:
ok = 0
elif n > 0 or not unlimited:
if start[n] + (abs(count[n]) - 1) * stride[n] >= dim_sizes[n]:
ok = 0
if not ok:
raise HDF4Error('set arguments violate '\
'the size (%d) of dimension %d' \
% (dim_sizes[n], n))
# ??? Check support for UINT16
if not data_type in SDC.equivNumericTypes:
raise HDF4Error('set cannot currrently deal '\
'with the SDS data type')
_C._SDwritedata_0(self._id, data_type, start, count, data, stride)
def __buildStartCountStride(self, elem):
# Create the 'start', 'count', 'slice' and 'stride' tuples that
# will be passed to '_SDreaddata_0'/'_SDwritedata_0'.
# start starting indices along each dimension
# count count of values along each dimension; a value of -1
# indicates that and index, not a slice, was applied to
# the dimension; in that case, the dimension should be
# dropped from the output array.
# stride strides along each dimension
# Make sure the indexing expression does not exceed the variable
# number of dimensions.
dsName, nDims, shape, dsType, nAttr = self.info()
if isinstance(elem, tuple):
if len(elem) > nDims:
raise HDF4Error("get", 0,
"indexing expression exceeds variable "
"number of dimensions")
else: # Convert single index to sequence
elem = [elem]
if isinstance(shape, int):
shape = [shape]
start = []
count = []
stride = []
n = -1
unlimited = self.isrecord()
for e in elem:
n += 1
# See if the dimension is unlimited (always at index 0)
unlim = n == 0 and unlimited
# Simple index
if isinstance(e, int):
isslice = False
if e < 0 :
e += shape[n]
# Respect standard python list behavior: it is illegal to
# specify an out of bound index (except for the
# unlimited dimension).
if e < 0 or (not unlim and e >= shape[n]):
raise IndexError("index out of range")
beg = e
end = e + 1
inc = 1
# Slice index. Respect Python syntax for slice upper bounds,
# which are not included in the resulting slice. Also, if the
# upper bound exceed the dimension size, truncate it.
elif isinstance(e, slice):
isslice = True
# None or 0 means not specified
if e.start:
beg = e.start
if beg < 0:
beg += shape[n]
else:
beg = 0
# None of maxint means not specified
if e.stop and e.stop != sys.maxsize:
end = e.stop
if end < 0:
end += shape[n]
else:
end = shape[n]
# None means not specified
if e.step:
inc = e.step
else:
inc = 1
# Bug
else:
raise ValueError("Bug: unexpected element type to __getitem__")
# Clip end index (except if unlimited dimension)
# and compute number of elements to get.
if not unlim and end > shape[n]:
end = shape[n]
if isslice:
cnt = (end - beg) // inc
if cnt * inc < end - beg:
cnt += 1
else:
cnt = -1
start.append(beg)
count.append(cnt)
stride.append(inc)
# Complete missing dimensions
while n < nDims - 1:
n += 1
start.append(0)
count.append(shape[n])
stride.append(1)
# Done
return start, count, stride
def info(self):
"""Retrieves information about the dataset.
Args::
no argument
Returns::
5-element tuple holding:
- dataset name
- dataset rank (number of dimensions)
- dataset shape, that is a list giving the length of each
dataset dimension; if the first dimension is unlimited, then
the first value of the list gives the current length of the
unlimited dimension
- data type (one of the SDC.xxx values)
- number of attributes defined for the dataset
C library equivalent : SDgetinfo
"""
buf = _C.array_int32(_C.H4_MAX_VAR_DIMS)
status, sds_name, rank, data_type, n_attrs = \
_C.SDgetinfo(self._id, buf)
_checkErr('info', status, "cannot execute")
dim_sizes = _array_to_ret(buf, rank)
return sds_name, rank, dim_sizes, data_type, n_attrs
def checkempty(self):
"""Determine whether the dataset is empty.
Args::
no argument
Returns::
True(1) if dataset is empty, False(0) if not
C library equivalent : SDcheckempty
"""
status, emptySDS = _C.SDcheckempty(self._id)
_checkErr('checkempty', status, 'invalid SDS identifier')
return emptySDS
def ref(self):
"""Get the reference number of the dataset.
Args::
no argument
Returns::
dataset reference number
C library equivalent : SDidtoref
"""
sds_ref = _C.SDidtoref(self._id)
_checkErr('idtoref', sds_ref, 'illegal SDS identifier')
return sds_ref
def iscoordvar(self):
"""Determine whether the dataset is a coordinate variable
(holds a dimension scale). A coordinate variable is created
when a dimension is assigned a set of scale values.
Args::
no argument
Returns::
True(1) if the dataset represents a coordinate variable,
False(0) if not
C library equivalent : SDiscoordvar
"""
return _C.SDiscoordvar(self._id) # no error status here
def isrecord(self):
"""Determines whether the dataset is appendable
(contains an unlimited dimension). Note that if true, then
the unlimited dimension is always dimension number 0.
Args::
no argument
Returns::
True(1) if the dataset is appendable, False(0) if not.
C library equivalent : SDisrecord
"""
return _C.SDisrecord(self._id) # no error status here
def getcal(self):
"""Retrieve the SDS calibration coefficients.
Args::
no argument
Returns::
5-element tuple holding:
- cal: calibration factor (attribute 'scale_factor')
- cal_error : calibration factor error
(attribute 'scale_factor_err')
- offset: calibration offset (attribute 'add_offset')
- offset_err : offset error (attribute 'add_offset_err')
- data_type : type of the data resulting from applying
the calibration formula to the dataset values
(attribute 'calibrated_nt')
An exception is raised if no calibration data are defined.
Original dataset values 'orival' are converted to calibrated
values 'calval' through the formula::
calval = cal * (orival - offset)
The calibration coefficients are part of the so-called
"standard" SDS attributes. The values inside the tuple returned
by 'getcal' are those of the following attributes, in order::
scale_factor, scale_factor_err, add_offset, add_offset_err,
calibrated_nt
C library equivalent: SDgetcal()
"""
status, cal, cal_error, offset, offset_err, data_type = \
_C.SDgetcal(self._id)
_checkErr('getcal', status, 'no calibration record')
return cal, cal_error, offset, offset_err, data_type
def getdatastrs(self):
"""Retrieve the dataset standard string attributes.
Args::
no argument
Returns::
4-element tuple holding:
- dataset label string (attribute 'long_name')
- dataset unit (attribute 'units')
- dataset output format (attribute 'format')
- dataset coordinate system (attribute 'coordsys')
The values returned by 'getdatastrs' are part of the
so-called "standard" SDS attributes. Those 4 values
correspond respectively to the following attributes::
long_name, units, format, coordsys .
C library equivalent: SDgetdatastrs
"""
status, label, unit, format, coord_system = \
_C.SDgetdatastrs(self._id, 128)
_checkErr('getdatastrs', status, 'cannot execute')
return label, unit, format, coord_system
def getfillvalue(self):
"""Retrieve the dataset fill value.
Args::
no argument
Returns::
dataset fill value (attribute '_FillValue')
An exception is raised if the fill value is not set.
The fill value is part of the so-called "standard" SDS
attributes, and corresponds to the following attribute::
_FillValue
C library equivalent: SDgetfillvalue
"""
# Obtain SDS data type.
try:
sds_name, rank, dim_sizes, data_type, n_attrs = \
self.info()
except HDF4Error:
raise HDF4Error('getfillvalue : invalid SDS identifier')
n_values = 1 # Fill value stands for 1 value.
convert = _array_to_ret
if data_type == SDC.CHAR8:
buf = _C.array_byte(n_values)
convert = _array_to_str
elif data_type in [SDC.UCHAR8, SDC.UINT8]:
buf = _C.array_byte(n_values)
elif data_type == SDC.INT8:
buf = _C.array_int8(n_values)
elif data_type == SDC.INT16:
buf = _C.array_int16(n_values)
elif data_type == SDC.UINT16:
buf = _C.array_uint16(n_values)
elif data_type == SDC.INT32:
buf = _C.array_int32(n_values)
elif data_type == SDC.UINT32:
buf = _C.array_uint32(n_values)
elif data_type == SDC.FLOAT32:
buf = _C.array_float32(n_values)
elif data_type == SDC.FLOAT64:
buf = _C.array_float64(n_values)
else:
raise HDF4Error("getfillvalue: SDS has an illegal type or " \
"unsupported type %d" % data_type)
status = _C.SDgetfillvalue(self._id, buf)
_checkErr('getfillvalue', status, 'fill value not set')
return convert(buf, n_values)
def getrange(self):
"""Retrieve the dataset min and max values.
Args::
no argument
Returns::
(min, max) tuple (attribute 'valid_range')
Note that those are the values as stored
by the 'setrange' method. 'getrange' does *NOT* compute the
min and max from the current dataset contents.
An exception is raised if the range is not set.
The range returned by 'getrange' is part of the so-called
"standard" SDS attributes. It corresponds to the following
attribute::
valid_range
C library equivalent: SDgetrange
"""
# Obtain SDS data type.
try:
sds_name, rank, dim_sizes, data_type, n_attrs = \
self.info()
except HDF4Error:
raise HDF4Error('getrange : invalid SDS identifier')
n_values = 1
convert = _array_to_ret
if data_type == SDC.CHAR8:
buf1 = _C.array_byte(n_values)
buf2 = _C.array_byte(n_values)
convert = _array_to_str
elif data_type in [SDC.UCHAR8, SDC.UINT8]:
buf1 = _C.array_byte(n_values)
buf2 = _C.array_byte(n_values)
elif data_type == SDC.INT8:
buf1 = _C.array_int8(n_values)
buf2 = _C.array_int8(n_values)
elif data_type == SDC.INT16:
buf1 = _C.array_int16(n_values)
buf2 = _C.array_int16(n_values)
elif data_type == SDC.UINT16:
buf1 = _C.array_uint16(n_values)
buf2 = _C.array_uint16(n_values)
elif data_type == SDC.INT32:
buf1 = _C.array_int32(n_values)
buf2 = _C.array_int32(n_values)
elif data_type == SDC.UINT32:
buf1 = _C.array_uint32(n_values)
buf2 = _C.array_uint32(n_values)
elif data_type == SDC.FLOAT32:
buf1 = _C.array_float32(n_values)
buf2 = _C.array_float32(n_values)
elif data_type == SDC.FLOAT64:
buf1 = _C.array_float64(n_values)
buf2 = _C.array_float64(n_values)
else:
raise HDF4Error("getrange: SDS has an illegal or " \
"unsupported type %d" % data)
# Note: The C routine returns the max in buf1 and the min
# in buf2. We swap the values returned by the Python
# interface, since it is more natural to return
# min first, then max.
status = _C.SDgetrange(self._id, buf1, buf2)
_checkErr('getrange', status, 'range not set')
return convert(buf2, n_values), convert(buf1, n_values)
def setcal(self, cal, cal_error, offset, offset_err, data_type):
"""Set the dataset calibration coefficients.
Args::
cal the calibraton factor (attribute 'scale_factor')
cal_error calibration factor error
(attribute 'scale_factor_err')
offset offset value (attribute 'add_offset')
offset_err offset error (attribute 'add_offset_err')
data_type data type of the values resulting from applying the
calibration formula to the dataset values
(one of the SDC.xxx constants)
(attribute 'calibrated_nt')
Returns::
None
See method 'getcal' for the definition of the calibration
formula.
Calibration coefficients are part of the so-called standard
SDS attributes. Calling 'setcal' is equivalent to setting
the following attributes, which correspond to the method
parameters, in order::
scale_factor, scale_factor_err, add_offset, add_offset_err,
calibrated_nt
C library equivalent: SDsetcal
"""
status = _C.SDsetcal(self._id, cal, cal_error,
offset, offset_err, data_type)
_checkErr('setcal', status, 'cannot execute')
def setdatastrs(self, label, unit, format, coord_sys):
"""Set the dataset standard string type attributes.
Args::
label dataset label (attribute 'long_name')
unit dataset unit (attribute 'units')
format dataset format (attribute 'format')
coord_sys dataset coordinate system (attribute 'coordsys')
Returns::
None
Those strings are part of the so-called standard
SDS attributes. Calling 'setdatastrs' is equivalent to setting
the following attributes, which correspond to the method
parameters, in order::
long_name, units, format, coordsys
C library equivalent: SDsetdatastrs
"""
status = _C.SDsetdatastrs(self._id, label, unit, format, coord_sys)
_checkErr('setdatastrs', status, 'cannot execute')
def setfillvalue(self, fill_val):
"""Set the dataset fill value.
Args::
fill_val dataset fill value (attribute '_FillValue')
Returns::
None
The fill value is part of the so-called "standard" SDS
attributes. Calling 'setfillvalue' is equivalent to setting
the following attribute::
_FillValue
C library equivalent: SDsetfillvalue
"""
# Obtain SDS data type.
try:
sds_name, rank, dim_sizes, data_type, n_attrs = self.info()
except HDF4Error:
raise HDF4Error('setfillvalue : cannot execute')
n_values = 1 # Fill value stands for 1 value.
if data_type == SDC.CHAR8:
buf = _C.array_byte(n_values)
elif data_type in [SDC.UCHAR8, SDC.UINT8]:
buf = _C.array_byte(n_values)
elif data_type == SDC.INT8:
# SWIG refuses negative values here. We found that if we
# pass them as byte values, it will work.
buf = _C.array_int8(n_values)
if fill_val >= 0:
fill_val &= 0x7f
else:
fill_val = abs(fill_val) & 0x7f
if fill_val:
fill_val = 256 - fill_val
else:
fill_val = 128 # -128 in 2's complement
elif data_type == SDC.INT16:
buf = _C.array_int16(n_values)
elif data_type == SDC.UINT16:
buf = _C.array_uint16(n_values)
elif data_type == SDC.INT32:
buf = _C.array_int32(n_values)
elif data_type == SDC.UINT32:
buf = _C.array_uint32(n_values)
elif data_type == SDC.FLOAT32:
buf = _C.array_float32(n_values)
elif data_type == SDC.FLOAT64:
buf = _C.array_float64(n_values)
else:
raise HDF4Error("setfillvalue: SDS has an illegal or " \
"unsupported type %d" % data_type)
buf[0] = fill_val
status = _C.SDsetfillvalue(self._id, buf)
_checkErr('setfillvalue', status, 'cannot execute')
def setrange(self, min, max):
"""Set the dataset min and max values.
Args::
min dataset minimum value (attribute 'valid_range')
max dataset maximum value (attribute 'valid_range')
Returns::
None
The data range is part of the so-called "standard" SDS
attributes. Calling method 'setrange' is equivalent to
setting the following attribute with a 2-element [min,max]
array::
valid_range
C library equivalent: SDsetrange
"""
# Obtain SDS data type.
try:
sds_name, rank, dim_sizes, data_type, n_attrs = self.info()
except HDF4Error:
raise HDF4Error('setrange : cannot execute')
n_values = 1
if data_type == SDC.CHAR8:
buf1 = _C.array_byte(n_values)
buf2 = _C.array_byte(n_values)
elif data_type in [SDC.UCHAR8, SDC.UINT8]:
buf1 = _C.array_byte(n_values)
buf2 = _C.array_byte(n_values)
elif data_type == SDC.INT8:
# SWIG refuses negative values here. We found that if we
# pass them as byte values, it will work.
buf1 = _C.array_int8(n_values)
buf2 = _C.array_int8(n_values)
v = min
if v >= 0:
v &= 0x7f
else:
v = abs(v) & 0x7f
if v:
v = 256 - v
else:
v = 128 # -128 in 2's complement
min = v
v = max
if v >= 0:
v &= 0x7f
else:
v = abs(v) & 0x7f
if v:
v = 256 - v
else:
v = 128 # -128 in 2's complement
max = v
elif data_type == SDC.INT16:
buf1 = _C.array_int16(n_values)
buf2 = _C.array_int16(n_values)
elif data_type == SDC.UINT16:
buf1 = _C.array_uint16(n_values)
buf2 = _C.array_uint16(n_values)
elif data_type == SDC.INT32:
buf1 = _C.array_int32(n_values)
buf2 = _C.array_int32(n_values)
elif data_type == SDC.UINT32:
buf1 = _C.array_uint32(n_values)
buf2 = _C.array_uint32(n_values)
elif data_type == SDC.FLOAT32:
buf1 = _C.array_float32(n_values)
buf2 = _C.array_float32(n_values)
elif data_type == SDC.FLOAT64:
buf1 = _C.array_float64(n_values)
buf2 = _C.array_float64(n_values)
else:
raise HDF4Error("SDsetrange: SDS has an illegal or " \
"unsupported type %d" % data_type)
buf1[0] = max
buf2[0] = min
status = _C.SDsetrange(self._id, buf1, buf2)
_checkErr('setrange', status, 'cannot execute')
def getcompress(self):
"""Retrieves info about dataset compression type and mode.
Args::
no argument
Returns::
tuple holding:
- compression type (one of the SDC.COMP_xxx constants)
- optional values, depending on the compression type
COMP_NONE 0 value no additional value
COMP_SKPHUFF 1 value : skip size
COMP_DEFLATE 1 value : gzip compression level (1 to 9)
COMP_SZIP 5 values : options mask,
pixels per block (2 to 32)
pixels per scanline,
bits per pixel (number of bits in the SDS datatype)
pixels (number of elements in the SDS)
Note: in the context of an SDS, the word "pixel"
should really be understood as meaning "data element",
eg a cell value inside a multidimensional grid.
Test the options mask against constants SDC.COMP_SZIP_NN
and SDC.COMP_SZIP_EC, eg :
if optionMask & SDC.COMP_SZIP_EC:
print "EC encoding scheme used"
An exception is raised if dataset is not compressed.
.. note::
Starting with v0.8, an exception is always raised if
pyhdf was installed with the NOCOMPRESS macro set.
C library equivalent: SDgetcompress
"""
status, comp_type, value, v2, v3, v4, v5 = _C._SDgetcompress(self._id)
_checkErr('getcompress', status, 'no compression')
if comp_type == SDC.COMP_NONE:
return (comp_type,)
elif comp_type == SDC.COMP_SZIP:
return comp_type, value, v2, v3, v4, v5
else:
return comp_type, value
def setcompress(self, comp_type, value=0, v2=0):
"""Compresses the dataset using a specified compression method.
Args::
comp_type compression type, identified by one of the
SDC.COMP_xxx constants
value,v2 auxiliary value(s) needed by some compression types
SDC.COMP_SKPHUFF Skipping-Huffman; compression value=data size in bytes, v2 is ignored
SDC.COMP_DEFLATE Gzip compression; value=deflate level (1 to 9), v2 is ignored
SDC.COMP_SZIP Szip compression; value=encoding scheme (SDC.COMP_SZIP_EC or
SDC.COMP_SZIP_NN), v2=pixels per block (2 to 32)
Returns::
None
.. note::
Starting with v0.8, an exception is always raised if
pyhdf was installed with the NOCOMPRESS macro set.
SDC.COMP_DEFLATE applies the GZIP compression to the dataset,
and the value varies from 1 to 9, according to the level of
compression desired.
SDC.COMP_SZIP compresses the dataset using the SZIP algorithm. See the HDF User's Guide
for details about the encoding scheme and the number of pixels per block. SZIP is new
with HDF 4.2.
'setcompress' must be called before writing to the dataset.
The dataset must be written all at once, unless it is
appendable (has an unlimited dimension). Updating the dataset
in not allowed. Refer to the HDF user's guide for more details
on how to use data compression.
C library equivalent: SDsetcompress
"""
status = _C._SDsetcompress(self._id, comp_type, value, v2)
_checkErr('setcompress', status, 'cannot execute')
def setexternalfile(self, filename, offset=0):
"""Store the dataset data in an external file.
Args::
filename external file name
offset offset in bytes where to start writing in
the external file
Returns::
None
C library equivalent : SDsetexternalfile
"""
status = _C.SDsetexternalfile(self._id, filename, offset)
_checkErr('setexternalfile', status, 'execution error')
def attr(self, name_or_index):
"""Create an SDAttr instance representing an SDS
(dataset) attribute.
Args::
name_or_index attribute name or index number; if a name is
given, the attribute may not exist
Returns::
SDAttr instance for the attribute. Call the methods of this
class to query, read or set the attribute.
C library equivalent : no equivalent
"""
return SDAttr(self, name_or_index)
def attributes(self, full=0):
"""Return a dictionnary describing every attribute defined
on the dataset.
Args::
full true to get complete info about each attribute
false to report only each attribute value
Returns::
Empty dictionnary if no attribute defined.
Otherwise, dictionnary where each key is the name of a
dataset attribute. If parameter 'full' is false,
key value is the attribute value. If 'full' is true,
key value is a tuple with the following elements:
- attribute value
- attribute index number
- attribute type
- attribute length
C library equivalent : no equivalent
"""
# Get the number of dataset attributes.
natts = self.info()[4]
# Inquire each attribute
res = {}
for n in range(natts):
a = self.attr(n)
name, aType, nVal = a.info()
if full:
res[name] = (a.get(), a.index(), aType, nVal)
else:
res[name] = a.get()
return res
def dimensions(self, full=0):
"""Return a dictionnary describing every dataset dimension.
Args::
full true to get complete info about each dimension
false to report only each dimension length
Returns::
Dictionnary where each key is a dimension name. If no name
has been given to the dimension, the key is set to
'fakeDimx' where 'x' is the dimension index number.
If parameter 'full' is false, key value is the dimension
length. If 'full' is true, key value is a 5-element tuple
with the following elements:
- dimension length; for an unlimited dimension, the reported
length is the current dimension length
- dimension index number
- 1 if the dimension is unlimited, 0 otherwise
- dimension scale type, or 0 if no scale is defined for
the dimension
- number of attributes defined on the dimension
C library equivalent : no equivalent
"""
# Get the number of dimensions and their lengths.
nDims, dimLen = self.info()[1:3]
if isinstance(dimLen, int): # need a sequence
dimLen = [dimLen]
# Check if the dataset is appendable.
unlim = self.isrecord()
# Inquire each dimension
res = {}
for n in range(nDims):
d = self.dim(n)
# The length reported by info() is 0 for an unlimited dimension.
# Rather use the lengths reported by SDS.info()
name, k, scaleType, nAtt = d.info()
length = dimLen[n]
if full:
res[name] = (length, n, unlim and n == 0,
scaleType, nAtt)
else:
res[name] = length
return res
class SDim(object):
"""The SDim class implements a dimension object.
There can be one dimension object for each dataset dimension.
To create an SDim instance, call the dim() method of an SDS class
instance. To set attributes on an SDim instance, call the
SDim.attr() method to create an attribute instance, then call the
methods of this instance. Attributes can also be set using the
"dot notation". """
def __init__(self, sds, id, index):
"""Init an SDim instance. This method should not be called
directly by the user program. To create an SDim instance,
call the SDS.dim() method.
"""
# Args:
# sds SDS instance
# id dimension identifier
# index index number of the dimension
# SDim private attributes
# _sds sds instance
# _id dimension identifier
# _index dimension index number
self._sds = sds
self._id = id
self._index = index
def __getattr__(self, name):
# Get value(s) of SDim attribute 'name'.
return _getattr(self, name)
def __setattr__(self, name, value):
# Set value(s) of SDim attribute 'name'.
_setattr(self, name, value, ['_sds', '_id', '_index'])
def info(self):
"""Return info about the dimension instance.
Args::
no argument
Returns::
4-element tuple holding:
- dimension name; 'fakeDimx' is returned if the dimension
has not been named yet, where 'x' is the dimension
index number
- dimension length; 0 is returned if the dimension is unlimited;
call the SDim.length() or SDS.info() methods to obtain the
current dimension length
- scale data type (one of the SDC.xxx constants); 0 is
returned if no scale has been set on the dimension
- number of attributes attached to the dimension
C library equivalent : SDdiminfo
"""
status, dim_name, dim_size, data_type, n_attrs = \
_C.SDdiminfo(self._id)
_checkErr('info', status, 'cannot execute')
return dim_name, dim_size, data_type, n_attrs
def length(self):
"""Return the dimension length. This method is usefull
to quickly retrieve the current length of an unlimited
dimension.
Args::
no argument
Returns::
dimension length; if the dimension is unlimited, the
returned value is the current dimension length
C library equivalent : no equivalent
"""
return self._sds.info()[2][self._index]
def setname(self, dim_name):
"""Set the dimension name.
Args::
dim_name dimension name; setting 2 dimensions to the same
name make the dimensions "shared"; in order to be
shared, the dimesions must be deined similarly.
Returns::
None
C library equivalent : SDsetdimname
"""
status = _C.SDsetdimname(self._id, dim_name)
_checkErr('setname', status, 'cannot execute')
def getscale(self):
"""Obtain the scale values along a dimension.
Args::
no argument
Returns::
list with the scale values; the list length is equal to the
dimension length; the element type is equal to the dimension
data type, as set when the 'setdimscale()' method was called.
C library equivalent : SDgetdimscale
"""
# Get dimension info. If data_type is 0, no scale have been set
# on the dimension.
status, dim_name, dim_size, data_type, n_attrs = _C.SDdiminfo(self._id)
_checkErr('getscale', status, 'cannot execute')
if data_type == 0:
raise HDF4Error("no scale set on that dimension")
# dim_size is 0 for an unlimited dimension. The actual length is
# obtained through SDgetinfo.
if dim_size == 0:
dim_size = self._sds.info()[2][self._index]
# Get scale values.
if data_type in [SDC.UCHAR8, SDC.UINT8]:
buf = _C.array_byte(dim_size)
elif data_type == SDC.INT8:
buf = _C.array_int8(dim_size)
elif data_type == SDC.INT16:
buf = _C.array_int16(dim_size)
elif data_type == SDC.UINT16:
buf = _C.array_uint16(dim_size)
elif data_type == SDC.INT32:
buf = _C.array_int32(dim_size)
elif data_type == SDC.UINT32:
buf = _C.array_uint32(dim_size)
elif data_type == SDC.FLOAT32:
buf = _C.array_float32(dim_size)
elif data_type == SDC.FLOAT64:
buf = _C.array_float64(dim_size)
else:
raise HDF4Error("getscale: dimension has an "\
"illegal or unsupported type %d" % data_type)
status = _C.SDgetdimscale(self._id, buf)
_checkErr('getscale', status, 'cannot execute')
return _array_to_ret(buf, dim_size)
def setscale(self, data_type, scale):
"""Initialize the scale values along the dimension.
Args::
data_type data type code (one of the SDC.xxx constants)
scale sequence holding the scale values; the number of
values must match the current length of the dataset
along that dimension
C library equivalent : SDsetdimscale
Setting a scale on a dimension generates what HDF calls a
"coordinate variable". This is a rank 1 dataset similar to any
other dataset, which is created to hold the scale values. The
dataset name is identical to that of the dimension on which
setscale() is called, and the data type passed in 'data_type'
determines the type of the dataset. To distinguish between such
a dataset and a "normal" dataset, call the iscoordvar() method
of the dataset instance.
"""
try:
n_values = len(scale)
except:
n_values = 1
# Validate args
info = self._sds.info()
if info[1] == 1:
dim_size = info[2]
else:
dim_size = info[2][self._index]
if n_values != dim_size:
raise HDF4Error('number of scale values (%d) does not match ' \
'dimension size (%d)' % (n_values, dim_size))
if data_type == SDC.CHAR8:
buf = _C.array_byte(n_values)
# Allow a string as the scale argument.
# Becomes a noop if already a list.
scale = list(scale)
for n in range(n_values):
scale[n] = ord(scale[n])
elif data_type in [SDC.UCHAR8, SDC.UINT8]:
buf = _C.array_byte(n_values)
elif data_type == SDC.INT8:
# SWIG refuses negative values here. We found that if we
# pass them as byte values, it will work.
buf = _C.array_int8(n_values)
scale = list(scale)
for n in range(n_values):
v = scale[n]
if v >= 0:
v &= 0x7f
else:
v = abs(v) & 0x7f
if v:
v = 256 - v
else:
v = 128 # -128 in 2's complement
scale[n] = v
elif data_type == SDC.INT16:
buf = _C.array_int16(n_values)
elif data_type == SDC.UINT16:
buf = _C.array_uint16(n_values)
elif data_type == SDC.INT32:
buf = _C.array_int32(n_values)
elif data_type == SDC.UINT32:
buf = _C.array_uint32(n_values)
elif data_type == SDC.FLOAT32:
buf = _C.array_float32(n_values)
elif data_type == SDC.FLOAT64:
buf = _C.array_float64(n_values)
else:
raise HDF4Error("setscale: illegal or usupported data_type")
if n_values == 1:
buf[0] = scale
else:
for n in range(n_values):
buf[n] = scale[n]
status = _C.SDsetdimscale(self._id, n_values, data_type, buf)
_checkErr('setscale', status, 'cannot execute')
def getstrs(self):
"""Retrieve the dimension standard string attributes.
Args::
no argument
Returns::
3-element tuple holding:
-dimension label (attribute 'long_name')
-dimension unit (attribute 'units')
-dimension format (attribute 'format')
An exception is raised if the standard attributes have
not been set.
C library equivalent: SDgetdimstrs
"""
status, label, unit, format = _C.SDgetdimstrs(self._id, 128)
_checkErr('getstrs', status, 'cannot execute')
return label, unit, format
def setstrs(self, label, unit, format):
"""Set the dimension standard string attributes.
Args::
label dimension label (attribute 'long_name')
unit dimension unit (attribute 'units')
format dimension format (attribute 'format')
Returns::
None
C library equivalent: SDsetdimstrs
"""
status = _C.SDsetdimstrs(self._id, label, unit, format)
_checkErr('setstrs', status, 'cannot execute')
def attr(self, name_or_index):
"""Create an SDAttr instance representing an SDim
(dimension) attribute.
Args::
name_or_index attribute name or index number; if a name is
given, the attribute may not exist; in that
case, the attribute is created when the
instance set() method is called
Returns::
SDAttr instance for the attribute. Call the methods of this
class to query, read or set the attribute.
C library equivalent : no equivalent
"""
return SDAttr(self, name_or_index)
def attributes(self, full=0):
"""Return a dictionnary describing every attribute defined
on the dimension.
Args::
full true to get complete info about each attribute
false to report only each attribute value
Returns::
Empty dictionnary if no attribute defined.
Otherwise, dictionnary where each key is the name of a
dimension attribute. If parameter 'full' is false,
key value is the attribute value. If 'full' is true,
key value is a tuple with the following elements:
- attribute value
- attribute index number
- attribute type
- attribute length
C library equivalent : no equivalent
"""
# Get the number of dataset attributes.
natts = self.info()[3]
# Inquire each attribute
res = {}
for n in range(natts):
a = self.attr(n)
name, aType, nVal = a.info()
if full:
res[name] = (a.get(), a.index(), aType, nVal)
else:
res[name] = a.get()
return res
###########################
# Support functions
###########################
def _getattr(obj, name):
# Called by the __getattr__ method of the SD, SDS and SDim objects.
# Python will call __getattr__ to see if the class wants to
# define certain missing methods (__str__, __len__, etc).
# Always fail if the name starts with two underscores.
if name[:2] == '__':
raise AttributeError
# See if we deal with an SD attribute.
a = SDAttr(obj, name)
try:
index = a.index()
except HDF4Error:
raise AttributeError("attribute not found")
# Return attribute value(s).
return a.get()
def _setattr(obj, name, value, privAttr):
# Called by the __setattr__ method of the SD, SDS and SDim objects.
# Be careful with private attributes.
#if name in privAttr:
if name[0] == '_':
obj.__dict__[name] = value
return
# Treat everything else as an HDF attribute.
if type(value) not in [list, tuple]:
value = [value]
typeList = []
for v in value:
t = type(v)
# Prohibit mixing numeric types and strings.
if t in [int, float] and \
not str in typeList:
if t not in typeList:
typeList.append(t)
# Prohibit sequence of strings or a mix of numbers and string.
elif t == str and not typeList:
typeList.append(t)
else:
typeList = []
break
if str in typeList:
xtype = SDC.CHAR8
value = value[0]
# double is "stronger" than int
elif float in typeList:
xtype = SDC.FLOAT64
elif int in typeList:
xtype = SDC.INT32
else:
raise HDF4Error("Illegal attribute value")
# Assign value
try:
a = SDAttr(obj, name)
a.set(xtype, value)
except HDF4Error as msg:
raise HDF4Error("cannot set attribute: %s" % msg)
def _array_to_ret(buf, nValues):
# Convert array 'buf' to a scalar or a list.
if nValues == 1:
ret = buf[0]
else:
ret = []
for i in xrange(nValues):
ret.append(buf[i])
return ret
def _array_to_str(buf, nValues):
# Convert array of bytes 'buf' to a string.
# Return empty string if there is no value.
if nValues == 0:
return ""
# When there is just one value, _array_to_ret returns a scalar
# over which we cannot iterate.
if nValues == 1:
chrs = [chr(buf[0])]
else:
chrs = [chr(b) for b in _array_to_ret(buf, nValues)]
return ''.join(chrs)
| {
"repo_name": "ryfeus/lambda-packs",
"path": "HDF4_H5_NETCDF/source2.7/pyhdf/SD.py",
"copies": "1",
"size": "117733",
"license": "mit",
"hash": -3060043927394084000,
"line_mean": 34.0604526504,
"line_max": 113,
"alpha_frac": 0.5522071127,
"autogenerated": false,
"ratio": 4.285719486003422,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5337926598703422,
"avg_score": null,
"num_lines": null
} |
# $Id: selftest.py 2213 2005-01-11 18:49:47Z fredrik $
# elementtree selftest program
# this test script uses Python's "doctest" module to check that the
# *test script* works as expected.
import sys
try:
from StringIO import StringIO
BytesIO = StringIO
except ImportError:
from io import BytesIO, StringIO
from lxml import etree as ElementTree
def stdout():
if sys.version_info[0] < 3:
return sys.stdout
class bytes_stdout(object):
def write(self, data):
if isinstance(data, bytes):
data = data.decode('ISO8859-1')
sys.stdout.write(data)
return bytes_stdout()
def unserialize(text):
file = StringIO(text)
tree = ElementTree.parse(file)
return tree.getroot()
def serialize(elem, encoding=None):
file = BytesIO()
tree = ElementTree.ElementTree(elem)
if encoding:
tree.write(file, encoding=encoding)
else:
tree.write(file)
result = file.getvalue()
if sys.version_info[0] >= 3:
result = result.decode('ISO8859-1')
result = result.replace(' />', '/>')
if result[-1:] == '\n':
result = result[:-1]
return result
def summarize(elem):
return elem.tag
def summarize_list(seq):
return list(map(summarize, seq))
SAMPLE_XML = unserialize("""
<body>
<tag>text</tag>
<tag />
<section>
<tag>subtext</tag>
</section>
</body>
""")
SAMPLE_XML_NS = unserialize("""
<body xmlns="http://effbot.org/ns">
<tag>text</tag>
<tag />
<section>
<tag>subtext</tag>
</section>
</body>
""")
# interface tests
def check_string(string):
len(string)
for char in string:
if len(char) != 1:
print("expected one-character string, got %r" % char)
new_string = string + ""
new_string = string + " "
string[:0]
def check_mapping(mapping):
len(mapping)
keys = mapping.keys()
items = mapping.items()
for key in keys:
item = mapping[key]
mapping["key"] = "value"
if mapping["key"] != "value":
print("expected value string, got %r" % mapping["key"])
def check_element(element):
if not hasattr(element, "tag"):
print("no tag member")
if not hasattr(element, "attrib"):
print("no attrib member")
if not hasattr(element, "text"):
print("no text member")
if not hasattr(element, "tail"):
print("no tail member")
check_string(element.tag)
check_mapping(element.attrib)
if element.text is not None:
check_string(element.text)
if element.tail is not None:
check_string(element.tail)
def check_element_tree(tree):
check_element(tree.getroot())
def element():
"""
Test element tree interface.
>>> element = ElementTree.Element("tag")
>>> check_element(element)
>>> tree = ElementTree.ElementTree(element)
>>> check_element_tree(tree)
"""
def parsefile():
"""
Test parsing from file. Note that we're opening the files in
here; by default, the 'parse' function opens the file in binary
mode, and doctest doesn't filter out carriage returns.
>>> file = open("samples/simple.xml", "rb")
>>> tree = ElementTree.parse(file)
>>> file.close()
>>> tree.write(stdout())
<root>
<element key="value">text</element>
<element>text</element>tail
<empty-element/>
</root>
>>> file = open("samples/simple-ns.xml", "rb")
>>> tree = ElementTree.parse(file)
>>> file.close()
>>> tree.write(stdout())
<root xmlns="http://namespace/">
<element key="value">text</element>
<element>text</element>tail
<empty-element/>
</root>
"""
def writefile():
"""
>>> elem = ElementTree.Element("tag")
>>> elem.text = "text"
>>> serialize(elem)
'<tag>text</tag>'
>>> ElementTree.SubElement(elem, "subtag").text = "subtext"
>>> serialize(elem)
'<tag>text<subtag>subtext</subtag></tag>'
"""
def encoding():
r"""
Test encoding issues.
>>> elem = ElementTree.Element("tag")
>>> elem.text = u'abc'
>>> serialize(elem)
'<tag>abc</tag>'
>>> serialize(elem, "utf-8")
'<tag>abc</tag>'
>>> serialize(elem, "us-ascii")
'<tag>abc</tag>'
>>> serialize(elem, "iso-8859-1").lower()
"<?xml version='1.0' encoding='iso-8859-1'?>\n<tag>abc</tag>"
>>> elem.text = "<&\"\'>"
>>> serialize(elem)
'<tag><&"\'></tag>'
>>> serialize(elem, "utf-8")
'<tag><&"\'></tag>'
>>> serialize(elem, "us-ascii") # cdata characters
'<tag><&"\'></tag>'
>>> serialize(elem, "iso-8859-1").lower()
'<?xml version=\'1.0\' encoding=\'iso-8859-1\'?>\n<tag><&"\'></tag>'
>>> elem.attrib["key"] = "<&\"\'>"
>>> elem.text = None
>>> serialize(elem)
'<tag key="<&"\'>"/>'
>>> serialize(elem, "utf-8")
'<tag key="<&"\'>"/>'
>>> serialize(elem, "us-ascii")
'<tag key="<&"\'>"/>'
>>> serialize(elem, "iso-8859-1").lower()
'<?xml version=\'1.0\' encoding=\'iso-8859-1\'?>\n<tag key="<&"\'>"/>'
>>> elem.text = u'\xe5\xf6\xf6<>'
>>> elem.attrib.clear()
>>> serialize(elem)
'<tag>åöö<></tag>'
>>> serialize(elem, "utf-8")
'<tag>\xc3\xa5\xc3\xb6\xc3\xb6<></tag>'
>>> serialize(elem, "us-ascii")
'<tag>åöö<></tag>'
>>> serialize(elem, "iso-8859-1").lower()
"<?xml version='1.0' encoding='iso-8859-1'?>\n<tag>\xe5\xf6\xf6<></tag>"
>>> elem.attrib["key"] = u'\xe5\xf6\xf6<>'
>>> elem.text = None
>>> serialize(elem)
'<tag key="åöö<>"/>'
>>> serialize(elem, "utf-8")
'<tag key="\xc3\xa5\xc3\xb6\xc3\xb6<>"/>'
>>> serialize(elem, "us-ascii")
'<tag key="åöö<>"/>'
>>> serialize(elem, "iso-8859-1").lower()
'<?xml version=\'1.0\' encoding=\'iso-8859-1\'?>\n<tag key="\xe5\xf6\xf6<>"/>'
"""
if sys.version_info[0] >= 3:
encoding.__doc__ = encoding.__doc__.replace("u'", "'")
def qname():
"""
Test QName handling.
1) decorated tags
>>> elem = ElementTree.Element("{uri}tag")
>>> serialize(elem) # 1.1
'<ns0:tag xmlns:ns0="uri"/>'
## 2) decorated attributes
## >>> elem.attrib["{uri}key"] = "value"
## >>> serialize(elem) # 2.1
## '<ns0:tag ns0:key="value" xmlns:ns0="uri"/>'
"""
def cdata():
"""
Test CDATA handling (etc).
>>> serialize(unserialize("<tag>hello</tag>"))
'<tag>hello</tag>'
>>> serialize(unserialize("<tag>hello</tag>"))
'<tag>hello</tag>'
>>> serialize(unserialize("<tag><![CDATA[hello]]></tag>"))
'<tag>hello</tag>'
"""
def find():
"""
Test find methods (including xpath syntax).
>>> elem = SAMPLE_XML
>>> elem.find("tag").tag
'tag'
>>> ElementTree.ElementTree(elem).find("tag").tag
'tag'
>>> elem.find("section/tag").tag
'tag'
>>> ElementTree.ElementTree(elem).find("section/tag").tag
'tag'
>>> elem.findtext("tag")
'text'
>>> elem.findtext("tog", "default")
'default'
>>> ElementTree.ElementTree(elem).findtext("tag")
'text'
>>> elem.findtext("section/tag")
'subtext'
>>> ElementTree.ElementTree(elem).findtext("section/tag")
'subtext'
>>> summarize_list(elem.findall("tag"))
['tag', 'tag']
>>> summarize_list(elem.findall("*"))
['tag', 'tag', 'section']
>>> summarize_list(elem.findall(".//tag"))
['tag', 'tag', 'tag']
>>> summarize_list(elem.findall("section/tag"))
['tag']
>>> summarize_list(elem.findall("section//tag"))
['tag']
>>> summarize_list(elem.findall("section/*"))
['tag']
>>> summarize_list(elem.findall("section//*"))
['tag']
>>> summarize_list(elem.findall("section/.//*"))
['tag']
>>> summarize_list(elem.findall("*/*"))
['tag']
>>> summarize_list(elem.findall("*//*"))
['tag']
>>> summarize_list(elem.findall("*/tag"))
['tag']
>>> summarize_list(elem.findall("*/./tag"))
['tag']
>>> summarize_list(elem.findall("./tag"))
['tag', 'tag']
>>> summarize_list(elem.findall(".//tag"))
['tag', 'tag', 'tag']
>>> summarize_list(elem.findall("././tag"))
['tag', 'tag']
>>> summarize_list(ElementTree.ElementTree(elem).findall("/tag"))
['tag', 'tag']
>>> summarize_list(ElementTree.ElementTree(elem).findall("./tag"))
['tag', 'tag']
>>> elem = SAMPLE_XML_NS
>>> summarize_list(elem.findall("tag"))
[]
>>> summarize_list(elem.findall("{http://effbot.org/ns}tag"))
['{http://effbot.org/ns}tag', '{http://effbot.org/ns}tag']
>>> summarize_list(elem.findall(".//{http://effbot.org/ns}tag"))
['{http://effbot.org/ns}tag', '{http://effbot.org/ns}tag', '{http://effbot.org/ns}tag']
"""
# XXX only deep copying is supported
def copy():
"""
Test copy handling (etc).
>>> import copy
>>> e1 = unserialize("<tag>hello<foo/></tag>")
>>> # e2 = copy.copy(e1)
>>> e3 = copy.deepcopy(e1)
>>> e1.find("foo").tag = "bar"
>>> serialize(e1).replace(' ', '')
'<tag>hello<bar/></tag>'
## >>> serialize(e2).replace(' ', '')
## '<tag>hello<bar/></tag>'
>>> serialize(e3).replace(' ', '')
'<tag>hello<foo/></tag>'
"""
def attrib():
"""
Test attribute handling.
>>> elem = ElementTree.Element("tag")
>>> elem.get("key") # 1.1
>>> elem.get("key", "default") # 1.2
'default'
>>> elem.set("key", "value")
>>> elem.get("key") # 1.3
'value'
>>> elem = ElementTree.Element("tag", key="value")
>>> elem.get("key") # 2.1
'value'
>>> elem.attrib # 2.2
{'key': 'value'}
>>> elem = ElementTree.Element("tag", {"key": "value"})
>>> elem.get("key") # 3.1
'value'
>>> elem.attrib # 3.2
{'key': 'value'}
>>> elem = ElementTree.Element("tag", {"key": "other"}, key="value")
>>> elem.get("key") # 4.1
'value'
>>> elem.attrib # 4.2
{'key': 'value'}
"""
def makeelement():
"""
Test makeelement handling.
>>> elem = ElementTree.Element("tag")
>>> subelem = elem.makeelement("subtag", {"key": "value"})
>>> elem.append(subelem)
>>> serialize(elem)
'<tag><subtag key="value"/></tag>'
>>> elem.clear()
>>> serialize(elem)
'<tag/>'
>>> elem.append(subelem)
>>> serialize(elem)
'<tag><subtag key="value"/></tag>'
"""
## def observer():
## """
## Test observers.
## >>> def observer(action, elem):
## ... print("%s %s" % (action, elem.tag))
## >>> builder = ElementTree.TreeBuilder()
## >>> builder.addobserver(observer)
## >>> parser = ElementTree.XMLParser(builder)
## >>> file = open("samples/simple.xml", "rb")
## >>> parser.feed(file.read())
## start root
## start element
## end element
## start element
## end element
## start empty-element
## end empty-element
## end root
## >>> file.close()
## """
ENTITY_XML = """\
<!DOCTYPE points [
<!ENTITY % user-entities SYSTEM 'user-entities.xml'>
%user-entities;
]>
<document>&entity;</document>
"""
## def entity():
## """
## Test entity handling.
## 1) bad entities
## >>> ElementTree.XML("<document>&entity;</document>")
## Traceback (most recent call last):
## SyntaxError: undefined entity: line 1, column 10
## 2) custom entity
## >>> parser = ElementTree.XMLParser()
## >>> parser.entity["entity"] = "text"
## >>> parser.feed(ENTITY_XML)
## >>> root = parser.close()
## >>> serialize(root)
## '<document>text</document>'
## """
if __name__ == "__main__":
import doctest, selftest2
failed, tested = doctest.testmod(selftest2)
print("%d tests ok." % (tested - failed))
if failed > 0:
print("%d tests failed. Exiting with non-zero return code." % failed)
sys.exit(1)
| {
"repo_name": "lxml/lxml",
"path": "src/lxml/tests/selftest2.py",
"copies": "1",
"size": "12001",
"license": "bsd-3-clause",
"hash": -8048577246838475000,
"line_mean": 25.5508849558,
"line_max": 91,
"alpha_frac": 0.5513707191,
"autogenerated": false,
"ratio": 3.2780661021578803,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.43294368212578804,
"avg_score": null,
"num_lines": null
} |
# $Id: selftest.py 3276 2007-09-12 06:52:30Z fredrik $
# -*- coding: iso-8859-1 -*-
# elementtree selftest program
# this test script uses Python's "doctest" module to check that the
# *test script* works as expected.
# TODO: add more elementtree method tests
# TODO: add xml/html parsing tests
# TODO: etc
import re, sys
def stdout():
if sys.version_info[0] < 3:
return sys.stdout
class bytes_stdout(object):
def write(self, data):
if isinstance(data, bytes):
data = data.decode('ISO8859-1')
sys.stdout.write(data)
return bytes_stdout()
try:
from StringIO import StringIO as BytesIO
except ImportError:
from io import BytesIO
from lxml import etree as ElementTree
from lxml import _elementpath as ElementPath
from lxml import ElementInclude
ET = ElementTree
#from elementtree import ElementTree
#from elementtree import ElementPath
#from elementtree import ElementInclude
#from elementtree import HTMLTreeBuilder
#from elementtree import SimpleXMLWriter
def fix_compatibility(xml_data):
xml_data = re.sub(r'\s*xmlns:[a-z0-9]+="http://www.w3.org/2001/XInclude"', '', xml_data)
xml_data = xml_data.replace(' />', '/>')
if xml_data[-1:] == '\n':
xml_data = xml_data[:-1]
return xml_data
def serialize(elem, **options):
file = BytesIO()
tree = ElementTree.ElementTree(elem)
tree.write(file, **options)
if sys.version_info[0] < 3:
try:
encoding = options["encoding"]
except KeyError:
encoding = "utf-8"
else:
encoding = 'ISO8859-1'
result = fix_compatibility(file.getvalue().decode(encoding))
if sys.version_info[0] < 3:
result = result.encode(encoding)
return result
def summarize(elem):
return elem.tag
def summarize_list(seq):
return list(map(summarize, seq))
def normalize_crlf(tree):
for elem in tree.getiterator():
if elem.text: elem.text = elem.text.replace("\r\n", "\n")
if elem.tail: elem.tail = elem.tail.replace("\r\n", "\n")
SAMPLE_XML = ElementTree.XML("""
<body>
<tag class='a'>text</tag>
<tag class='b' />
<section>
<tag class='b' id='inner'>subtext</tag>
</section>
</body>
""")
#
# interface tests
def check_string(string):
len(string)
for char in string:
if len(char) != 1:
print("expected one-character string, got %r" % char)
new_string = string + ""
new_string = string + " "
string[:0]
def check_string_or_none(value):
if value is None:
return
return check_string(value)
def check_mapping(mapping):
len(mapping)
keys = mapping.keys()
items = mapping.items()
for key in keys:
item = mapping[key]
mapping["key"] = "value"
if mapping["key"] != "value":
print("expected value string, got %r" % mapping["key"])
def check_element(element):
if not hasattr(element, "tag"):
print("no tag member")
if not hasattr(element, "attrib"):
print("no attrib member")
if not hasattr(element, "text"):
print("no text member")
if not hasattr(element, "tail"):
print("no tail member")
check_string(element.tag)
check_mapping(element.attrib)
check_string_or_none(element.text)
check_string_or_none(element.tail)
for elem in element:
check_element(elem)
def check_element_tree(tree):
check_element(tree.getroot())
# --------------------------------------------------------------------
# element tree tests
def sanity():
"""
>>> from elementtree.ElementTree import *
>>> from elementtree.ElementInclude import *
>>> from elementtree.ElementPath import *
>>> from elementtree.HTMLTreeBuilder import *
>>> from elementtree.SimpleXMLWriter import *
>>> from elementtree.TidyTools import *
"""
# doesn't work with lxml.etree
del sanity
def version():
"""
>>> ElementTree.VERSION
'1.3a2'
"""
# doesn't work with lxml.etree
del version
def interface():
"""
Test element tree interface.
>>> element = ElementTree.Element("tag")
>>> check_element(element)
>>> tree = ElementTree.ElementTree(element)
>>> check_element_tree(tree)
"""
def simpleops():
"""
>>> elem = ElementTree.XML("<body><tag/></body>")
>>> serialize(elem)
'<body><tag/></body>'
>>> e = ElementTree.Element("tag2")
>>> elem.append(e)
>>> serialize(elem)
'<body><tag/><tag2/></body>'
>>> elem.remove(e)
>>> serialize(elem)
'<body><tag/></body>'
>>> elem.insert(0, e)
>>> serialize(elem)
'<body><tag2/><tag/></body>'
>>> elem.remove(e)
>>> elem.extend([e])
>>> serialize(elem)
'<body><tag/><tag2/></body>'
>>> elem.remove(e)
"""
def simplefind():
"""
Test find methods using the elementpath fallback.
>>> CurrentElementPath = ElementTree.ElementPath
>>> ElementTree.ElementPath = ElementTree._SimpleElementPath()
>>> elem = SAMPLE_XML
>>> elem.find("tag").tag
'tag'
>>> ElementTree.ElementTree(elem).find("tag").tag
'tag'
>>> elem.findtext("tag")
'text'
>>> elem.findtext("tog")
>>> elem.findtext("tog", "default")
'default'
>>> ElementTree.ElementTree(elem).findtext("tag")
'text'
>>> summarize_list(elem.findall("tag"))
['tag', 'tag']
>>> summarize_list(elem.findall(".//tag"))
['tag', 'tag', 'tag']
Path syntax doesn't work in this case.
>>> elem.find("section/tag")
>>> elem.findtext("section/tag")
>>> elem.findall("section/tag")
[]
>>> ElementTree.ElementPath = CurrentElementPath
"""
# doesn't work with lxml.etree
del simplefind
def find():
"""
Test find methods (including xpath syntax).
>>> elem = SAMPLE_XML
>>> elem.find("tag").tag
'tag'
>>> ElementTree.ElementTree(elem).find("tag").tag
'tag'
>>> elem.find("section/tag").tag
'tag'
>>> ElementTree.ElementTree(elem).find("section/tag").tag
'tag'
>>> elem.findtext("tag")
'text'
>>> elem.findtext("tog")
>>> elem.findtext("tog", "default")
'default'
>>> ElementTree.ElementTree(elem).findtext("tag")
'text'
>>> elem.findtext("section/tag")
'subtext'
>>> ElementTree.ElementTree(elem).findtext("section/tag")
'subtext'
>>> summarize_list(elem.findall("tag"))
['tag', 'tag']
>>> summarize_list(elem.findall("*"))
['tag', 'tag', 'section']
>>> summarize_list(elem.findall(".//tag"))
['tag', 'tag', 'tag']
>>> summarize_list(elem.findall("section/tag"))
['tag']
>>> summarize_list(elem.findall("section//tag"))
['tag']
>>> summarize_list(elem.findall("section/*"))
['tag']
>>> summarize_list(elem.findall("section//*"))
['tag']
>>> summarize_list(elem.findall("section/.//*"))
['tag']
>>> summarize_list(elem.findall("*/*"))
['tag']
>>> summarize_list(elem.findall("*//*"))
['tag']
>>> summarize_list(elem.findall("*/tag"))
['tag']
>>> summarize_list(elem.findall("*/./tag"))
['tag']
>>> summarize_list(elem.findall("./tag"))
['tag', 'tag']
>>> summarize_list(elem.findall(".//tag"))
['tag', 'tag', 'tag']
>>> summarize_list(elem.findall("././tag"))
['tag', 'tag']
>>> summarize_list(elem.findall(".//tag[@class]"))
['tag', 'tag', 'tag']
>>> summarize_list(elem.findall(".//tag[@class='a']"))
['tag']
>>> summarize_list(elem.findall(".//tag[@class='b']"))
['tag', 'tag']
>>> summarize_list(elem.findall(".//tag[@id]"))
['tag']
>>> summarize_list(elem.findall(".//section[tag]"))
['section']
>>> summarize_list(elem.findall(".//section[element]"))
[]
>>> summarize_list(elem.findall("../tag"))
[]
>>> summarize_list(elem.findall("section/../tag"))
['tag', 'tag']
>>> summarize_list(ElementTree.ElementTree(elem).findall("./tag"))
['tag', 'tag']
FIXME: ET's Path module handles this case incorrectly; this gives
a warning in 1.3, and the behaviour will be modified in 1.4.
>>> summarize_list(ElementTree.ElementTree(elem).findall("/tag"))
['tag', 'tag']
"""
def bad_find():
"""
Check bad or unsupported path expressions.
>>> elem = SAMPLE_XML
>>> elem.findall("/tag")
Traceback (most recent call last):
SyntaxError: cannot use absolute path on element
# this is supported in ET 1.3:
#>>> elem.findall("section//")
#Traceback (most recent call last):
#SyntaxError: invalid path
"""
def parsefile():
"""
Test parsing from file.
>>> tree = ElementTree.parse("samples/simple.xml")
>>> normalize_crlf(tree)
>>> tree.write(stdout())
<root>
<element key="value">text</element>
<element>text</element>tail
<empty-element/>
</root>
>>> tree = ElementTree.parse("samples/simple-ns.xml")
>>> normalize_crlf(tree)
>>> tree.write(stdout())
<root xmlns="http://namespace/">
<element key="value">text</element>
<element>text</element>tail
<empty-element/>
</root>
## <ns0:root xmlns:ns0="http://namespace/">
## <ns0:element key="value">text</ns0:element>
## <ns0:element>text</ns0:element>tail
## <ns0:empty-element/>
## </ns0:root>
"""
def parsehtml():
"""
Test HTML parsing.
>>> # p = HTMLTreeBuilder.TreeBuilder()
>>> p = ElementTree.HTMLParser()
>>> p.feed("<p><p>spam<b>egg</b></p>")
>>> serialize(p.close())
'<p>spam<b>egg</b></p>'
"""
# doesn't work with lxml.etree
del parsehtml
def parseliteral():
r"""
>>> element = ElementTree.XML("<html><body>text</body></html>")
>>> ElementTree.ElementTree(element).write(stdout())
<html><body>text</body></html>
>>> element = ElementTree.fromstring("<html><body>text</body></html>")
>>> ElementTree.ElementTree(element).write(stdout())
<html><body>text</body></html>
## >>> sequence = ["<html><body>", "text</bo", "dy></html>"]
## >>> element = ElementTree.fromstringlist(sequence)
## >>> ElementTree.ElementTree(element).write(stdout())
## <html><body>text</body></html>
>>> print(repr(ElementTree.tostring(element)).lstrip('b'))
'<html><body>text</body></html>'
# looks different in lxml
# >>> print(ElementTree.tostring(element, "ascii"))
# <?xml version='1.0' encoding='ascii'?>
# <html><body>text</body></html>
>>> _, ids = ElementTree.XMLID("<html><body>text</body></html>")
>>> len(ids)
0
>>> _, ids = ElementTree.XMLID("<html><body id='body'>text</body></html>")
>>> len(ids)
1
>>> ids["body"].tag
'body'
"""
def simpleparsefile():
"""
Test the xmllib-based parser.
>>> from elementtree import SimpleXMLTreeBuilder
>>> parser = SimpleXMLTreeBuilder.TreeBuilder()
>>> tree = ElementTree.parse("samples/simple.xml", parser)
>>> normalize_crlf(tree)
>>> tree.write(sys.stdout)
<root>
<element key="value">text</element>
<element>text</element>tail
<empty-element />
</root>
"""
# doesn't work with lxml.etree
del simpleparsefile
def iterparse():
"""
Test iterparse interface.
>>> iterparse = ElementTree.iterparse
>>> context = iterparse("samples/simple.xml")
>>> for action, elem in context:
... print("%s %s" % (action, elem.tag))
end element
end element
end empty-element
end root
>>> context.root.tag
'root'
>>> context = iterparse("samples/simple-ns.xml")
>>> for action, elem in context:
... print("%s %s" % (action, elem.tag))
end {http://namespace/}element
end {http://namespace/}element
end {http://namespace/}empty-element
end {http://namespace/}root
>>> events = ()
>>> context = iterparse("samples/simple.xml", events)
>>> for action, elem in context:
... print("%s %s" % (action, elem.tag))
>>> events = ()
>>> context = iterparse("samples/simple.xml", events=events)
>>> for action, elem in context:
... print("%s %s" % (action, elem.tag))
>>> events = ("start", "end")
>>> context = iterparse("samples/simple.xml", events)
>>> for action, elem in context:
... print("%s %s" % (action, elem.tag))
start root
start element
end element
start element
end element
start empty-element
end empty-element
end root
>>> events = ("start", "end", "start-ns", "end-ns")
>>> context = iterparse("samples/simple-ns.xml", events)
>>> for action, elem in context:
... if action in ("start", "end"):
... print("%s %s" % (action, elem.tag))
... else:
... print("%s %s" % (action, elem))
start-ns ('', 'http://namespace/')
start {http://namespace/}root
start {http://namespace/}element
end {http://namespace/}element
start {http://namespace/}element
end {http://namespace/}element
start {http://namespace/}empty-element
end {http://namespace/}empty-element
end {http://namespace/}root
end-ns None
"""
def fancyparsefile():
"""
Test the "fancy" parser.
Sanity check.
>>> from elementtree import XMLTreeBuilder
>>> parser = XMLTreeBuilder.FancyTreeBuilder()
>>> tree = ElementTree.parse("samples/simple.xml", parser)
>>> normalize_crlf(tree)
>>> tree.write(sys.stdout)
<root>
<element key="value">text</element>
<element>text</element>tail
<empty-element />
</root>
Callback check.
>>> class MyFancyParser(XMLTreeBuilder.FancyTreeBuilder):
... def start(self, elem):
... print("START %s" % elem.tag)
... def end(self, elem):
... print("END %s" % elem.tag)
>>> parser = MyFancyParser()
>>> tree = ElementTree.parse("samples/simple.xml", parser)
START root
START element
END element
START element
END element
START empty-element
END empty-element
END root
"""
# doesn't work with lxml.etree
del fancyparsefile
def writefile():
"""
>>> elem = ElementTree.Element("tag")
>>> elem.text = "text"
>>> serialize(elem)
'<tag>text</tag>'
>>> ElementTree.SubElement(elem, "subtag").text = "subtext"
>>> serialize(elem)
'<tag>text<subtag>subtext</subtag></tag>'
## Test tag suppression
## >>> elem.tag = None
## >>> serialize(elem)
## 'text<subtag>subtext</subtag>'
"""
def writestring():
"""
>>> elem = ElementTree.XML("<html><body>text</body></html>")
>>> print(repr(ElementTree.tostring(elem)).lstrip('b'))
'<html><body>text</body></html>'
>>> elem = ElementTree.fromstring("<html><body>text</body></html>")
>>> print(repr(ElementTree.tostring(elem)).lstrip('b'))
'<html><body>text</body></html>'
"""
def encoding():
r"""
Test encoding issues.
>>> elem = ElementTree.Element("tag")
>>> elem.text = u'abc'
>>> serialize(elem)
'<tag>abc</tag>'
>>> serialize(elem, encoding="utf-8")
'<tag>abc</tag>'
>>> serialize(elem, encoding="us-ascii")
'<tag>abc</tag>'
>>> serialize(elem, encoding="iso-8859-1").lower()
"<?xml version='1.0' encoding='iso-8859-1'?>\n<tag>abc</tag>"
>>> elem.text = "<&\"\'>"
>>> serialize(elem)
'<tag><&"\'></tag>'
>>> serialize(elem, encoding="utf-8")
'<tag><&"\'></tag>'
>>> serialize(elem, encoding="us-ascii") # cdata characters
'<tag><&"\'></tag>'
>>> serialize(elem, encoding="iso-8859-1").lower()
'<?xml version=\'1.0\' encoding=\'iso-8859-1\'?>\n<tag><&"\'></tag>'
>>> elem.attrib["key"] = "<&\"\'>"
>>> elem.text = None
>>> serialize(elem)
'<tag key="<&"\'>"/>'
>>> serialize(elem, encoding="utf-8")
'<tag key="<&"\'>"/>'
>>> serialize(elem, encoding="us-ascii")
'<tag key="<&"\'>"/>'
>>> serialize(elem, encoding="iso-8859-1").lower()
'<?xml version=\'1.0\' encoding=\'iso-8859-1\'?>\n<tag key="<&"\'>"/>'
>>> elem.text = u'\xe5\xf6\xf6<>'
>>> elem.attrib.clear()
>>> serialize(elem)
'<tag>åöö<></tag>'
>>> serialize(elem, encoding="utf-8")
'<tag>\xc3\xa5\xc3\xb6\xc3\xb6<></tag>'
>>> serialize(elem, encoding="us-ascii")
'<tag>åöö<></tag>'
>>> serialize(elem, encoding="iso-8859-1").lower()
"<?xml version='1.0' encoding='iso-8859-1'?>\n<tag>\xe5\xf6\xf6<></tag>"
>>> elem.attrib["key"] = u'\xe5\xf6\xf6<>'
>>> elem.text = None
>>> serialize(elem)
'<tag key="åöö<>"/>'
>>> serialize(elem, encoding="utf-8")
'<tag key="\xc3\xa5\xc3\xb6\xc3\xb6<>"/>'
>>> serialize(elem, encoding="us-ascii")
'<tag key="åöö<>"/>'
>>> serialize(elem, encoding="iso-8859-1").lower()
'<?xml version=\'1.0\' encoding=\'iso-8859-1\'?>\n<tag key="\xe5\xf6\xf6<>"/>'
"""
if sys.version_info[0] >= 3:
encoding.__doc__ = encoding.__doc__.replace("u'", "'")
def methods():
r"""
Test serialization methods.
>>> e = ET.XML("<html><link/><script>1 < 2</script></html>")
>>> e.tail = "\n"
>>> serialize(e)
'<html><link /><script>1 < 2</script></html>\n'
>>> serialize(e, method=None)
'<html><link /><script>1 < 2</script></html>\n'
>>> serialize(e, method="xml")
'<html><link /><script>1 < 2</script></html>\n'
>>> serialize(e, method="html")
'<html><link><script>1 < 2</script></html>\n'
>>> serialize(e, method="text")
'1 < 2\n'
"""
# doesn't work with lxml.etree
del methods
def iterators():
"""
Test iterators.
>>> e = ET.XML("<html><body>this is a <i>paragraph</i>.</body>..</html>")
>>> summarize_list(e.iter())
['html', 'body', 'i']
>>> summarize_list(e.find("body").iter())
['body', 'i']
>>> "".join(e.itertext())
'this is a paragraph...'
>>> "".join(e.find("body").itertext())
'this is a paragraph.'
"""
ENTITY_XML = """\
<!DOCTYPE points [
<!ENTITY % user-entities SYSTEM 'user-entities.xml'>
%user-entities;
]>
<document>&entity;</document>
"""
def entity():
"""
Test entity handling.
1) bad entities
>>> ElementTree.XML("<document>&entity;</document>")
Traceback (most recent call last):
ExpatError: undefined entity: line 1, column 10
>>> ElementTree.XML(ENTITY_XML)
Traceback (most recent call last):
ExpatError: undefined entity &entity;: line 5, column 10
(add more tests here)
"""
# doesn't work with lxml.etree
del entity
def error(xml):
"""
Test error handling.
>>> error("foo").position
(1, 0)
>>> error("<tag>&foo;</tag>").position
(1, 5)
>>> error("foobar<").position
(1, 6)
"""
try:
ET.XML(xml)
except ET.ParseError:
return sys.exc_value
# doesn't work with lxml.etree -> different positions
del error
def namespace():
"""
Test namespace issues.
1) xml namespace
>>> elem = ElementTree.XML("<tag xml:lang='en' />")
>>> serialize(elem) # 1.1
'<tag xml:lang="en"/>'
2) other "well-known" namespaces
>>> elem = ElementTree.XML("<rdf:RDF xmlns:rdf='http://www.w3.org/1999/02/22-rdf-syntax-ns#' />")
>>> serialize(elem) # 2.1
'<rdf:RDF xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"/>'
>>> elem = ElementTree.XML("<html:html xmlns:html='http://www.w3.org/1999/xhtml' />")
>>> serialize(elem) # 2.2
'<html:html xmlns:html="http://www.w3.org/1999/xhtml"/>'
>>> elem = ElementTree.XML("<soap:Envelope xmlns:soap='http://schemas.xmlsoap.org/soap/envelope' />")
>>> serialize(elem) # 2.3
'<soap:Envelope xmlns:soap="http://schemas.xmlsoap.org/soap/envelope"/>'
3) unknown namespaces
"""
def qname():
"""
Test QName handling.
1) decorated tags
>>> elem = ElementTree.Element("{uri}tag")
>>> serialize(elem) # 1.1
'<ns0:tag xmlns:ns0="uri"/>'
>>> elem = ElementTree.Element(ElementTree.QName("{uri}tag"))
>>> serialize(elem) # 1.2
'<ns0:tag xmlns:ns0="uri"/>'
>>> elem = ElementTree.Element(ElementTree.QName("uri", "tag"))
>>> serialize(elem) # 1.3
'<ns0:tag xmlns:ns0="uri"/>'
# ns/attribute order ...
## 2) decorated attributes
## >>> elem.clear()
## >>> elem.attrib["{uri}key"] = "value"
## >>> serialize(elem) # 2.1
## '<ns0:tag ns0:key="value" xmlns:ns0="uri"/>'
## >>> elem.clear()
## >>> elem.attrib[ElementTree.QName("{uri}key")] = "value"
## >>> serialize(elem) # 2.2
## '<ns0:tag ns0:key="value" xmlns:ns0="uri"/>'
## 3) decorated values are not converted by default, but the
## QName wrapper can be used for values
## >>> elem.clear()
## >>> elem.attrib["{uri}key"] = "{uri}value"
## >>> serialize(elem) # 3.1
## '<ns0:tag ns0:key="{uri}value" xmlns:ns0="uri"/>'
## >>> elem.clear()
## >>> elem.attrib["{uri}key"] = ElementTree.QName("{uri}value")
## >>> serialize(elem) # 3.2
## '<ns0:tag ns0:key="ns0:value" xmlns:ns0="uri"/>'
## >>> elem.clear()
## >>> subelem = ElementTree.Element("tag")
## >>> subelem.attrib["{uri1}key"] = ElementTree.QName("{uri2}value")
## >>> elem.append(subelem)
## >>> elem.append(subelem)
## >>> serialize(elem) # 3.3
## '<ns0:tag xmlns:ns0="uri"><tag ns1:key="ns2:value" xmlns:ns1="uri1" xmlns:ns2="uri2"/><tag ns1:key="ns2:value" xmlns:ns1="uri1" xmlns:ns2="uri2"/></ns0:tag>'
"""
def xpath_tokenizer(p):
"""
Test the XPath tokenizer.
>>> # tests from the xml specification
>>> xpath_tokenizer("*")
['*']
>>> xpath_tokenizer("text()")
['text', '()']
>>> xpath_tokenizer("@name")
['@', 'name']
>>> xpath_tokenizer("@*")
['@', '*']
>>> xpath_tokenizer("para[1]")
['para', '[', '1', ']']
>>> xpath_tokenizer("para[last()]")
['para', '[', 'last', '()', ']']
>>> xpath_tokenizer("*/para")
['*', '/', 'para']
>>> xpath_tokenizer("/doc/chapter[5]/section[2]")
['/', 'doc', '/', 'chapter', '[', '5', ']', '/', 'section', '[', '2', ']']
>>> xpath_tokenizer("chapter//para")
['chapter', '//', 'para']
>>> xpath_tokenizer("//para")
['//', 'para']
>>> xpath_tokenizer("//olist/item")
['//', 'olist', '/', 'item']
>>> xpath_tokenizer(".")
['.']
>>> xpath_tokenizer(".//para")
['.', '//', 'para']
>>> xpath_tokenizer("..")
['..']
>>> xpath_tokenizer("../@lang")
['..', '/', '@', 'lang']
>>> xpath_tokenizer("chapter[title]")
['chapter', '[', 'title', ']']
>>> xpath_tokenizer("employee[@secretary and @assistant]")
['employee', '[', '@', 'secretary', '', 'and', '', '@', 'assistant', ']']
>>> # additional tests
>>> xpath_tokenizer("{http://spam}egg")
['{http://spam}egg']
>>> xpath_tokenizer("./spam.egg")
['.', '/', 'spam.egg']
>>> xpath_tokenizer(".//{http://spam}egg")
['.', '//', '{http://spam}egg']
"""
out = []
for op, tag in ElementPath.xpath_tokenizer(p):
out.append(op or tag)
return out
#
# xinclude tests (samples from appendix C of the xinclude specification)
XINCLUDE = {
"C1.xml": """\
<?xml version='1.0'?>
<document xmlns:xi="http://www.w3.org/2001/XInclude">
<p>120 Mz is adequate for an average home user.</p>
<xi:include href="disclaimer.xml"/>
</document>
""", "disclaimer.xml": """\
<?xml version='1.0'?>
<disclaimer>
<p>The opinions represented herein represent those of the individual
and should not be interpreted as official policy endorsed by this
organization.</p>
</disclaimer>
""",
"C2.xml": """\
<?xml version='1.0'?>
<document xmlns:xi="http://www.w3.org/2001/XInclude">
<p>This document has been accessed
<xi:include href="count.txt" parse="text"/> times.</p>
</document>
""", "count.txt": "324387", "C3.xml": """\
<?xml version='1.0'?>
<document xmlns:xi="http://www.w3.org/2001/XInclude">
<p>The following is the source of the "data.xml" resource:</p>
<example><xi:include href="data.xml" parse="text"/></example>
</document>
""", "data.xml": """\
<?xml version='1.0'?>
<data>
<item><![CDATA[Brooks & Shields]]></item>
</data>
""",
"C5.xml": """\
<?xml version='1.0'?>
<div xmlns:xi="http://www.w3.org/2001/XInclude">
<xi:include href="example.txt" parse="text">
<xi:fallback>
<xi:include href="fallback-example.txt" parse="text">
<xi:fallback><a href="mailto:bob@example.org">Report error</a></xi:fallback>
</xi:include>
</xi:fallback>
</xi:include>
</div>
""",
"default.xml": """\
<?xml version='1.0'?>
<document xmlns:xi="http://www.w3.org/2001/XInclude">
<p>Example.</p>
<xi:include href="samples/simple.xml"/>
</document>
"""}
def xinclude_loader(href, parse="xml", encoding=None):
try:
data = XINCLUDE[href]
except KeyError:
raise IOError("resource not found")
if parse == "xml":
return ElementTree.XML(data)
return data
def xinclude():
r"""
Basic inclusion example (XInclude C.1)
>>> document = xinclude_loader("C1.xml")
>>> ElementInclude.include(document, xinclude_loader)
>>> print(serialize(document)) # C1
<document>
<p>120 Mz is adequate for an average home user.</p>
<disclaimer>
<p>The opinions represented herein represent those of the individual
and should not be interpreted as official policy endorsed by this
organization.</p>
</disclaimer>
</document>
Textual inclusion example (XInclude C.2)
>>> document = xinclude_loader("C2.xml")
>>> ElementInclude.include(document, xinclude_loader)
>>> print(serialize(document)) # C2
<document>
<p>This document has been accessed
324387 times.</p>
</document>
Textual inclusion of XML example (XInclude C.3)
>>> document = xinclude_loader("C3.xml")
>>> ElementInclude.include(document, xinclude_loader)
>>> print(serialize(document)) # C3
<document>
<p>The following is the source of the "data.xml" resource:</p>
<example><?xml version='1.0'?>
<data>
<item><![CDATA[Brooks & Shields]]></item>
</data>
</example>
</document>
## Fallback example (XInclude C.5)
## Note! Fallback support is not yet implemented
## >>> document = xinclude_loader("C5.xml")
## >>> ElementInclude.include(document, xinclude_loader)
## Traceback (most recent call last):
## IOError: resource not found
## >>> # print(serialize(document)) # C5
"""
def xinclude_default():
"""
>>> document = xinclude_loader("default.xml")
>>> ElementInclude.include(document)
>>> print(serialize(document)) # default
<document>
<p>Example.</p>
<root>
<element key="value">text</element>
<element>text</element>tail
<empty-element/>
</root>
</document>
"""
#
# xmlwriter
def xmlwriter():
r"""
>>> file = BytesIO()
>>> w = SimpleXMLWriter.XMLWriter(file)
>>> html = w.start("html")
>>> x = w.start("head")
>>> w.element("title", "my document")
>>> w.data("\n")
>>> w.element("meta", name="hello", value="goodbye")
>>> w.data("\n")
>>> w.end()
>>> x = w.start("body")
>>> w.element("h1", "this is a heading")
>>> w.data("\n")
>>> w.element("p", u"this is a paragraph")
>>> w.data("\n")
>>> w.element("p", u"reserved characters: <&>")
>>> w.data("\n")
>>> w.element("p", u"detta är också ett stycke")
>>> w.data("\n")
>>> w.close(html)
>>> print(file.getvalue())
<html><head><title>my document</title>
<meta name="hello" value="goodbye" />
</head><body><h1>this is a heading</h1>
<p>this is a paragraph</p>
<p>reserved characters: <&></p>
<p>detta är också ett stycke</p>
</body></html>
"""
# doesn't work with lxml.etree
del xmlwriter
# --------------------------------------------------------------------
# reported bugs
def bug_xmltoolkit21():
"""
marshaller gives obscure errors for non-string values
>>> elem = ElementTree.Element(123)
>>> serialize(elem) # tag
Traceback (most recent call last):
TypeError: cannot serialize 123 (type int)
>>> elem = ElementTree.Element("elem")
>>> elem.text = 123
>>> serialize(elem) # text
Traceback (most recent call last):
TypeError: cannot serialize 123 (type int)
>>> elem = ElementTree.Element("elem")
>>> elem.tail = 123
>>> serialize(elem) # tail
Traceback (most recent call last):
TypeError: cannot serialize 123 (type int)
>>> elem = ElementTree.Element("elem")
>>> elem.set(123, "123")
>>> serialize(elem) # attribute key
Traceback (most recent call last):
TypeError: cannot serialize 123 (type int)
>>> elem = ElementTree.Element("elem")
>>> elem.set("123", 123)
>>> serialize(elem) # attribute value
Traceback (most recent call last):
TypeError: cannot serialize 123 (type int)
"""
# doesn't work with lxml.etree
del bug_xmltoolkit21
def bug_xmltoolkit25():
"""
typo in ElementTree.findtext
>>> tree = ElementTree.ElementTree(SAMPLE_XML)
>>> tree.findtext("tag")
'text'
>>> tree.findtext("section/tag")
'subtext'
"""
def bug_xmltoolkit28():
"""
.//tag causes exceptions
>>> tree = ElementTree.XML("<doc><table><tbody/></table></doc>")
>>> summarize_list(tree.findall(".//thead"))
[]
>>> summarize_list(tree.findall(".//tbody"))
['tbody']
"""
def bug_xmltoolkitX1():
"""
dump() doesn't flush the output buffer
>>> tree = ElementTree.XML("<doc><table><tbody/></table></doc>")
>>> ElementTree.dump(tree); sys.stdout.write("tail")
<doc><table><tbody /></table></doc>
tail
"""
# doesn't work with lxml.etree
del bug_xmltoolkitX1
def bug_xmltoolkit39():
"""
non-ascii element and attribute names doesn't work
>>> tree = ElementTree.XML("<?xml version='1.0' encoding='iso-8859-1'?><täg />")
>>> ElementTree.tostring(tree, "utf-8")
'<t\\xc3\\xa4g />'
>>> tree = ElementTree.XML("<?xml version='1.0' encoding='iso-8859-1'?><tag ättr='välue' />")
>>> tree.attrib
{u'\\xe4ttr': u'v\\xe4lue'}
>>> ElementTree.tostring(tree, "utf-8")
'<tag \\xc3\\xa4ttr="v\\xc3\\xa4lue" />'
>>> tree = ElementTree.XML("<?xml version='1.0' encoding='iso-8859-1'?><täg>text</täg>")
>>> ElementTree.tostring(tree, "utf-8")
'<t\\xc3\\xa4g>text</t\\xc3\\xa4g>'
>>> tree = ElementTree.Element(u"täg")
>>> ElementTree.tostring(tree, "utf-8")
'<t\\xc3\\xa4g />'
>>> tree = ElementTree.Element("tag")
>>> tree.set(u"ättr", u"välue")
>>> ElementTree.tostring(tree, "utf-8")
'<tag \\xc3\\xa4ttr="v\\xc3\\xa4lue" />'
"""
# doesn't work with lxml.etree
del bug_xmltoolkit39
def bug_xmltoolkit45():
"""
problems parsing mixed unicode/non-ascii html documents
latin-1 text
>>> p = HTMLTreeBuilder.TreeBuilder()
>>> p.feed("<p>välue</p>")
>>> serialize(p.close())
'<p>välue</p>'
utf-8 text
>>> p = HTMLTreeBuilder.TreeBuilder(encoding="utf-8")
>>> p.feed("<p>v\xc3\xa4lue</p>")
>>> serialize(p.close())
'<p>välue</p>'
utf-8 text using meta tag
>>> p = HTMLTreeBuilder.TreeBuilder()
>>> p.feed("<html><meta http-equiv='Content-Type' content='text/html; charset=utf-8'><p>v\xc3\xa4lue</p></html>")
>>> serialize(p.close().find("p"))
'<p>välue</p>'
latin-1 character references
>>> p = HTMLTreeBuilder.TreeBuilder()
>>> p.feed("<p>välue</p>")
>>> serialize(p.close())
'<p>välue</p>'
latin-1 character entities
>>> p = HTMLTreeBuilder.TreeBuilder()
>>> p.feed("<p>välue</p>")
>>> serialize(p.close())
'<p>välue</p>'
mixed latin-1 text and unicode entities
>>> p = HTMLTreeBuilder.TreeBuilder()
>>> p.feed("<p>”välue”</p>")
>>> serialize(p.close())
'<p>”välue”</p>'
mixed unicode and latin-1 entities
>>> p = HTMLTreeBuilder.TreeBuilder()
>>> p.feed("<p>”välue”</p>")
>>> serialize(p.close())
'<p>”välue”</p>'
"""
# doesn't work with lxml.etree
del bug_xmltoolkit45
def bug_xmltoolkit46():
"""
problems parsing open BR tags
>>> p = HTMLTreeBuilder.TreeBuilder()
>>> p.feed("<p>key<br>value</p>")
>>> serialize(p.close())
'<p>key<br />value</p>'
"""
# doesn't work with lxml.etree
del bug_xmltoolkit46
def bug_xmltoolkit54():
"""
problems handling internally defined entities
>>> e = ElementTree.XML("<!DOCTYPE doc [<!ENTITY ldots '舰'>]><doc>&ldots;</doc>")
>>> serialize(e)
'<doc>舰</doc>'
"""
# doesn't work with lxml.etree
del bug_xmltoolkit54
def bug_xmltoolkit55():
"""
make sure we're reporting the first error, not the last
>>> e = ElementTree.XML("<!DOCTYPE doc SYSTEM 'doc.dtd'><doc>&ldots;&ndots;&rdots;</doc>")
Traceback (most recent call last):
ParseError: undefined entity &ldots;: line 1, column 36
"""
# doesn't work with lxml.etree
del bug_xmltoolkit55
def bug_200708_version():
"""
>>> parser = ET.XMLParser()
>>> parser.version
'Expat 2.0.0'
>>> parser.feed(open("samples/simple.xml").read())
>>> print(serialize(parser.close()))
<root>
<element key="value">text</element>
<element>text</element>tail
<empty-element />
</root>
"""
# doesn't work with lxml.etree
del bug_200708_version
def bug_200708_newline():
r"""
Preserve newlines in attributes.
>>> e = ET.Element('SomeTag', text="def _f():\n return 3\n")
>>> ET.tostring(e)
'<SomeTag text="def _f(): return 3 " />'
>>> ET.XML(ET.tostring(e)).get("text")
'def _f():\n return 3\n'
>>> ET.tostring(ET.XML(ET.tostring(e)))
'<SomeTag text="def _f(): return 3 " />'
"""
# doesn't work with lxml.etree
del bug_200708_newline
def bug_200709_default_namespace():
"""
>>> e = ET.Element("{default}elem")
>>> s = ET.SubElement(e, "{default}elem")
>>> serialize(e, default_namespace="default") # 1
'<elem xmlns="default"><elem /></elem>'
>>> e = ET.Element("{default}elem")
>>> s = ET.SubElement(e, "{default}elem")
>>> s = ET.SubElement(e, "{not-default}elem")
>>> serialize(e, default_namespace="default") # 2
'<elem xmlns="default" xmlns:ns1="not-default"><elem /><ns1:elem /></elem>'
>>> e = ET.Element("{default}elem")
>>> s = ET.SubElement(e, "{default}elem")
>>> s = ET.SubElement(e, "elem") # unprefixed name
>>> serialize(e, default_namespace="default") # 3
Traceback (most recent call last):
ValueError: cannot use non-qualified names with default_namespace option
"""
# doesn't work with lxml.etree
del bug_200709_default_namespace
# --------------------------------------------------------------------
if __name__ == "__main__":
import doctest, selftest
failed, tested = doctest.testmod(selftest)
print("%d tests ok." % (tested - failed))
if failed > 0:
print("%d tests failed. Exiting with non-zero return code." % failed)
sys.exit(1)
| {
"repo_name": "lxml/lxml",
"path": "src/lxml/tests/selftest.py",
"copies": "1",
"size": "35400",
"license": "bsd-3-clause",
"hash": -5018732984266702000,
"line_mean": 27.2521947326,
"line_max": 164,
"alpha_frac": 0.5743502825,
"autogenerated": false,
"ratio": 3.358952462282949,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.44333027447829493,
"avg_score": null,
"num_lines": null
} |
from twisted.spread import pb
from twisted.internet import reactor
from peloton.utils.config import locateService
from peloton.utils.config import findTemplateTargetsFor
from peloton.utils.config import PelotonSettings
import peloton.utils.logging as logging
class PelotonService(object):
""" Base class for all services. Public methods all have names prefixed
'public_', much as twisted spread remote callable methods are prefixed 'remote_'.
Configuration
=============
Services live in a strictly regimented structure on the file system. This simplifies
auto-generation of code and automated loading with minimal magic.
The root path points to a directory which contains the service directory.
The service directory is laid out as follows, where the service is
called FooBar::
service_root/foobar/config/service.pcfg
/foobar.py
/<supporting code>
/resource/...
Note that nomenclature is relatively simple; the service directory must be
named the same as the service shortname (when lowercased). The configuration
files are named '*.pcfg'.
The service directory must contain at the very least a file called foobar.py (note
lower case) containing the class FooBar(PelotonService,...). Here FooBar retains
it's original capitalisation and, indeed, it is a matter of convention
that the service name should be camel case.
"""
def __init__(self, name, dispatcher, logger):
""" homePath passed in on construction because from this module
cannot find where the concrete sub-class lives. Configurations are found relative to this
homePath in homePath/config.
"""
self.name = name
self.dispatcher = dispatcher
self.logger = logger
def initSupportServices(self):
""" Start supporting services, such as the logger. Kept out of __init__
so that we can instantiate very lightly (as required by the launch sequencer.)"""
self.logger = logging.getLogger(self.name)
def loadConfig(self, servicePath, runconfig=None):
""" Load service configuration file and then augment with details
of all the methods in this service classed as 'public'.
In doing this the attributes assigned by any decorators on the public methods
are checked out, especially the transform chain. Defaults are assigned
to, for example, HTML and XML output keys if none has been specified. Standard
templates are sought out on the filesystem and attached where found and
the @template keyword is substituted for in the transform chain.
runconfig is the name of a run-time configuration file specified at the time of launching
(either relative to the service config dir or an absolute path). This can specify
many things including, for example, the name under which to publish this service
and the location of the resource folder.
Developers should not use the logger here: loadConfig should be useable prior
to initSupportServices having been called."""
servicePath, self.settings = locateService(self.name, servicePath, runconfig=runconfig)
if self.settings.has_key('profile'):
self.profile = self.settings.profile
else:
self.profile = PelotonSettings()
self.profile['_sysRunConfig'] = runconfig
if not self.profile.has_key('publishedName'):
self.profile['publishedName'] = self.name
publicMethods = [m for m in dir(self) if m.startswith('public_') and callable(getattr(self, m))]
if not self.profile.has_key('methods'):
self.profile['methods'] = PelotonSettings()
methods = self.profile['methods']
for nme in publicMethods:
mthd = getattr(self, nme)
shortname = nme[7:]
templateTargets = findTemplateTargetsFor(self.profile['resourceRoot'], self.name, shortname)
if hasattr(mthd, "_PELOTON_METHOD_PROPS"):
properties = mthd._PELOTON_METHOD_PROPS
else:
properties = PelotonSettings()
# step one, find all template files and insert
# into transforms
for target, templateFile in templateTargets:
key = "transform.%s" % target
if properties.has_key(key):
# look for @template and substitute
if "@template" in properties[key]:
properties[key][properties[key].index("@template")] \
= "template('%s')" % templateFile
else:
# insert an entry
properties['transform.%s'%target] = \
["template('%s')" % templateFile]
# step two insert defaults for any empty transforms that
# need a little something
defaultTransforms = {'xml' : 'defaultXMLTransform',
'html' : 'defaultHTMLTransform',
'json' : 'jsonTransform'
}
for target in ['xml', 'html', 'json']:
key = "transform.%s" % target
if not properties.has_key(key) \
or properties[key] == []:
properties[key]=[defaultTransforms[target]]
# step three, look for all transforms which still have
# @transform in the chain and replace with defaults
for k, v in properties.items():
if not k.startswith('transform.'):
continue
target = k[10:]
if "@template" in v:
try:
v[v.index('@template')] = defaultTransforms[target]
except:
v[v.index('@template')] = ''
if not methods.has_key(shortname):
methods[shortname] = PelotonSettings()
record = methods[shortname]
record['doc'] = mthd.__doc__
record['properties'] = str(properties)
self.version = self.profile['version']
def start(self):
""" Executed after configuration is loaded, prior to starting work.
Can be used to setup database pools etc. Overide in actual services. """
pass
def stop(self):
""" Executed prior to shuting down this service or the node.
Can be used to cleanup database pools etc. Overide in actual services. """
pass
def register(self, key, method, exchange='events', inThread=True):
""" Registers method (which must have signature msg, exchange,
key, ctag) to be the target for events on exchange with key matching the
specified pattern. By default inThread is True which means the event
handler will be called in a thread. If set False the event will be handled
in the main event loop so care must be taken not to perform long-running
operations in handlers that operate in this manner.
The handler class is returned by this method; keeping a reference to it
enables the service to de-register the handler subsequently."""
class ServiceMethodHandler(pb.Referenceable):
def __init__(self, handler, inThread=True):
self.handler = handler
self.inThread = inThread
def remote_eventReceived(self, msg, exchange, key, ctag):
if self.inThread:
reactor.callInThread(self.handler, msg, exchange, key, ctag)
else:
self.handler(msg, exchange, key, ctag)
handler = ServiceMethodHandler(method, inThread)
reactor.callFromThread(self.dispatcher.register, key, handler, exchange)
return handler
def deregister(self, handler):
"""De-register this event handler. """
reactor.callFromThread(self.dispatcher.deregister,handler)
def fireEvent(self, key, exchange='events', **kwargs):
""" Fire an event on the event bus. """
reactor.callFromThread(
self.dispatcher.fireEvent, key, exchange, **kwargs)
def public_index(self):
""" Default index page for HTTP connections. """
return "There is no index for this service!" | {
"repo_name": "aquamatt/Peloton",
"path": "src/peloton/service.py",
"copies": "1",
"size": "8458",
"license": "bsd-3-clause",
"hash": -3442739855671222000,
"line_mean": 43.7566137566,
"line_max": 104,
"alpha_frac": 0.6324190116,
"autogenerated": false,
"ratio": 4.6447007138934655,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00993032434070936,
"num_lines": 189
} |
# $Id: setup.py 532 2009-01-29 04:32:33Z casey.duncan $
import sys
from distutils.core import setup, Extension
if sys.platform != 'win32':
compile_args = ['-funroll-loops']
else:
# XXX insert win32 flag to unroll loops here
compile_args = []
setup(
name='noise',
version='1.0b3',
description='Perlin noise for Python',
long_description='''\
Perlin noise is ubiquitous in modern CGI. Used for procedural texturing,
animation, and enhancing realism, Perlin noise has been called the "salt" of
procedural content. Perlin noise is a type of gradient noise, smoothly
interpolating across a pseudo-random matrix of values.
The noise library includes native-code implementations of Perlin "improved"
noise and Perlin simplex noise. It also includes a fast implementation of
Perlin noise in GLSL, for use in OpenGL shaders. The shader code and many of
the included examples require Pyglet (http://www.pyglet.org), the native-code
noise functions themselves do not, however.
The Perlin improved noise functions can also generate fBm (fractal Brownian
motion) noise by combining multiple octaves of Perlin noise. Functions for
convenient generation of turbulent noise are also included.
Version 1.0b3 fixed problems compiling with Visual C++ on Windows.
Thanks to Stas Kounitski for fixing this issue!
''',
author='Casey Duncan',
author_email='casey.duncan@gmail.com',
url='http://code.google.com/p/caseman',
classifiers = [
'Development Status :: 4 - Beta',
'Topic :: Multimedia :: Graphics',
'License :: OSI Approved :: MIT License',
'Operating System :: MacOS :: MacOS X',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',
],
package_dir={'noise': ''},
packages=['noise'],
ext_modules=[
Extension('noise._simplex', ['_simplex.c'],
extra_compile_args=compile_args,
),
Extension('noise._perlin', ['_perlin.c'],
extra_compile_args=compile_args,
)
],
)
| {
"repo_name": "greyarea/noise",
"path": "setup.py",
"copies": "1",
"size": "1966",
"license": "mit",
"hash": 356684837043012500,
"line_mean": 33.4912280702,
"line_max": 77,
"alpha_frac": 0.7151576806,
"autogenerated": false,
"ratio": 3.498220640569395,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47133783211693947,
"avg_score": null,
"num_lines": null
} |
# $Id: SFrameHelpers.py 344 2012-12-13 13:10:53Z krasznaa $
##
# @package SFrameHelpers
# @short Collection of SFrame related python functions
#
# This package is a collection of python functions useful for SFrame.
# They can either be used from an interactive python session by
# executing
#
# <code>
# >>> import SFrameHelpers
# </code>
#
# or using the script(s) shipped with SFrame.
#
# @author Stefan Ask <Stefan.Ask@cern.ch> - Manchester
# @author David Berge <David.Berge@cern.ch> - CERN
# @author Johannes Haller <Johannes.Haller@cern.ch> - Hamburg
# @author A. Krasznahorkay <Attila.Krasznahorkay@cern.ch> - NYU/Debrecen
# Import base module(s):
import os.path
import time
# Import PyROOT:
import ROOT
##
# @short Function creating <In /> configuration nodes for MC input
#
# The function checks the specified input files and writes the XML nodes
# with their information to the specified output file. Their luminosity
# is calculated using the specified cross section.
#
# @param crossSection Cross section of the Monte Carlo
# @param files List of input files
# @param output Name of the output file
# @param tree Name of the main TTree in the files
# @param prefix Prefix to be put before the file paths.
# E.g. root://mymachine/
# @param real_filenames Boolean flag specifying if the file names are good as
# they are (no absolute path name lookup needed)
# @returns <code>0</code> if successful, something else if not
def CreateInput( crossSection, files, output, tree, prefix, real_filenames ):
# Turn off ROOT error messages:
oldErrorIgnoreLevel = ROOT.gErrorIgnoreLevel
ROOT.gErrorIgnoreLevel = ROOT.kSysError
# Open the output file:
outfile = open( output, "w" )
# Print some header in the output text file:
outfile.write( "<!-- File generated by SFrameHelpers.CreateInput(...) on %s -->\n" % \
time.asctime( time.localtime( time.time() ) ) )
outfile.write( "<!-- The supplied x-section was: %f -->\n\n" % crossSection )
# Some summary values:
totEvents = 0
totLuminosity = 0.0
# Loop over all the files:
for file in files:
# Print some status messages:
print "Processing file: %s" % os.path.basename( file )
# Open the AANT file:
tfile = ROOT.TFile.Open( file )
if not tfile.IsOpen():
print "*ERROR* File \"" + file + "\" does not exist *ERROR*"
return 255
# Access a tree in the ntuple:
collTree = tfile.Get( tree )
if( str( collTree ) == 'None' ):
print "*ERROR* " + tree + " not found in file: \"" + file + "\" *ERROR*"
continue
# Read the number of events in the file:
events = collTree.GetEntries()
luminosity = float( events ) / crossSection
# Increment the summary variables:
totEvents = totEvents + events
totLuminosity = totLuminosity + luminosity
# Compose the XML node. Make sure that the file name has an absolute path
# (no symbolic link, or relative path) and that the luminosity is printed with
# a meaningful precision.
if real_filenames:
outfile.write( "<In FileName=\"" + prefix + file + \
( "\" Lumi=\"%.3g" % luminosity ) + "\" />\n" )
else:
outfile.write( "<In FileName=\"" + prefix + \
os.path.abspath( os.path.realpath( file ) ) + \
( "\" Lumi=\"%.3g" % luminosity ) + "\" />\n" )
# Close the opened input file:
tfile.Close()
# Save some summary information:
outfile.write( "\n<!-- Total number of events processed: %s -->\n" % totEvents )
outfile.write( "<!-- Representing a total luminosity : %.3g -->" % totLuminosity )
# Close the output file:
outfile.close()
# Print some summary information:
print "\nTotal number of events processed: %s" % totEvents
print "Representing a total luminosity : %.3g\n" % totLuminosity
# Turn back ROOT error messages:
ROOT.gErrorIgnoreLevel = oldErrorIgnoreLevel
return 0
##
# @short Function creating <In /> configuration nodes for data input
#
# The function checks the specified input files and writes the XML nodes
# with their information to the specified output file. It assumes that the
# input files are data files, so it just puts a dummy "1.0" as the
# luminosity for them. (The luminosities are disregarded in the event
# weight calculation when the InputData type is set to "data".)
#
# @param files List of input files
# @param output Name of the output file
# @param tree Name of the main TTree in the files
# @param prefix Prefix to be put before the file paths.
# E.g. root://mymachine/
# @param real_filenames Boolean flag specifying if the file names are good as
# they are (no absolute path name lookup needed)
# @returns <code>0</code> if successful, something else if not
def CreateDataInput( files, output, tree, prefix, real_filenames ):
# Turn off ROOT error messages:
oldErrorIgnoreLevel = ROOT.gErrorIgnoreLevel
ROOT.gErrorIgnoreLevel = ROOT.kSysError
# Open the output file:
outfile = open( output, "w" )
# Print some header in the output text file:
outfile.write( "<!-- File generated by SFrameHelpers.CreateDataInput(...) on %s -->\n\n" % \
time.asctime( time.localtime( time.time() ) ) )
# Some summary values:
totEvents = 0
# Loop over all the files:
for file in files:
# Print some status messages:
print "Processing file: %s" % os.path.basename( file )
# Open the AANT file:
tfile = ROOT.TFile.Open( file )
if ( not tfile ) or ( not tfile.IsOpen() ):
print "*ERROR* File \"" + file + "\" does not exist *ERROR*"
return 255
# Access a tree in the ntuple:
collTree = tfile.Get( tree )
if( str( collTree ) == 'None' ):
print "*ERROR* " + tree + " not found in file: \"" + file + "\" *ERROR*"
continue
# Read the number of events in the file:
events = collTree.GetEntries()
# Increment the summary variables:
totEvents = totEvents + events
# Compose the XML node. Make sure that the file name has an absolute path
# (no symbolic link, or relative path) and that the luminosity is printed with
# a meaningful precision.
if real_filenames:
outfile.write( "<In FileName=\"" + prefix + file + \
"\" Lumi=\"1.0\" />\n" )
else:
outfile.write( "<In FileName=\"" + prefix + \
os.path.abspath( os.path.realpath( file ) ) + \
"\" Lumi=\"1.0\" />\n" )
# Close the opened input file:
tfile.Close()
# Save some summary information:
outfile.write( "\n<!-- Total number of events processed: %s -->\n" % totEvents )
# Close the output file:
outfile.close()
# Print some summary information:
print "\nTotal number of events processed: %s" % totEvents
# Turn back ROOT error messages:
ROOT.gErrorIgnoreLevel = oldErrorIgnoreLevel
return 0
| {
"repo_name": "jpavel/cms-ucl-tau",
"path": "SFrame/python/SFrameHelpers.py",
"copies": "1",
"size": "7024",
"license": "mit",
"hash": -2654306918557603000,
"line_mean": 33.9452736318,
"line_max": 94,
"alpha_frac": 0.6452164009,
"autogenerated": false,
"ratio": 3.645044110015568,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4790260510915568,
"avg_score": null,
"num_lines": null
} |
from twisted.internet.error import CannotListenError
from peloton.plugins import PelotonPlugin
from peloton.coreio import PelotonManagementInterface
from peloton.plugins.support.sshmanhole import PasswordManhole
class PelotonShell(PelotonPlugin):
""" Provides an interpreter inside the event loop to which
one can connect by SSH using a pre-specified username/password.
Objects in the namespace of the interpreter provide access to the
mesh and allow administrators to interrogate the mesh and to start and
stop services as well as make other hot-changes.
"""
def initialise(self):
# create an interface, pull out all 'public_*' methods
# into our namespace, striping the prefix
psc = PelotonManagementInterface(self.kernel)
publicMethods = [i for i in dir(psc) if i.startswith('public_')]
namespace={}
for m in publicMethods:
namespace[m[7:]] = getattr(psc, m)
self.pmh = PasswordManhole(int(self.config.port),
self.config.username,
self.config.password,
namespace)
def start(self):
try:
self.pmh.startService()
self.logger.info("SSH shell plugin initialised")
except CannotListenError:
raise Exception("SSH Shell cannot listen on port %d" % self.config.port)
def _stopped(self, *args, **kargs):
self.logger.info("SSH shell plugin stopped")
def stop(self):
self.logger.info("SSH shell plugin stopping")
d = self.pmh.stopService()
d.addCallback(self._stopped)
| {
"repo_name": "aquamatt/Peloton",
"path": "src/peloton/plugins/shell.py",
"copies": "1",
"size": "1842",
"license": "bsd-3-clause",
"hash": 5994752656427817000,
"line_mean": 37.375,
"line_max": 84,
"alpha_frac": 0.6498371336,
"autogenerated": false,
"ratio": 4.1863636363636365,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.009444178071629053,
"num_lines": 48
} |
'''
Created on 20. okt. 2009
@author: Odd Andre Hjelkrem
modified by Tomas Levin, corrected units
'''
import SEMBA as S
#Create empty dictionaries to load emission data in
#SHIP = {}
def CreateShip():
S.load_Ship()
create_vehicle_list()
Vehicle_Types = {}
def create_vehicle_list():
for k,v in S.H_SHIP.items():
k=0
Vehicle_Types[v[0]]= v[0] +" "+ v[8]
def ListTypes():
"""
Lists all ships available in the dataset that is loaded.
"""
#Function to sort as integers
def compare(a, b):
return cmp(int(a), int(b)) # compare as integers
keys = Vehicle_Types.keys()
keys.sort(compare)
print "Ship ID ; Description"
for key in keys:
print str(key)+ ' ; '+ Vehicle_Types[key]
def CalculateAUX(id,Fuel='MGO', DWT=-1, GT=-1):
""" List of shiptypes
1 Bulk ship
2 Dry Cargo
3 Container
4 Ro Ro
5 Reefer
6 Cargo ferry
7 Passenger ferry
8 Tanker (oil)
9 Tanker (Chemical)
10 Gas tanker (LNG)
11 Gas tanker (LPG)
12 Cruise vessel (mechanical drive)
13 Cruise vessel (electric drive)
14 vehicle carrier
AUX emissions are returned as grams per hour
"""
auxData={}
auxData[1]=['DWT',35.312,0.3603]
auxData[2]=['DWT',0.7476,0.7796]
auxData[3]=['DWT',0.5504,0.8637]
auxData[4]=['GT',1.347,0.7512]
auxData[5]=['DWT',0.4827,0.9375]
auxData[6]=['GT',1.655,0.7658]
auxData[7]=['GT',1.13,0.8123]
auxData[8]=['DWT',9.9262,0.703]
auxData[9]=['DWT',5.5294,0.5863]
auxData[10]=['DWT',0.0047,1.2147]
auxData[11]=['DWT',18.043,0.5057]
auxData[12]=['GT',0.9341,0.9482]
auxData[13]=['GT',0.0142,1.0059]
auxData[14]=['GT',0.4916,0.8399]
data=auxData[id]
k=data[1]
n=data[2]
if data[0]=='GT' and GT==-1:
print "Needs GT to caculate Auxilary engine emission"
return -1
if data[0]=='DWT' and DWT==-1:
print "Needs DWT to caculate Auxilary engine emission"
return -1
EAux=-1
if data[0]=='GT':
EAux=float(k) * pow(GT, float(n))
if data[0]=='DWT':
EAux=float(k) * pow(DWT, float(n))
ELoad=-1
ELay=-1
if id==1:
ELoad=EAux*0.32
ELay=EAux*0.21
elif id ==2:
ELoad=EAux*0.33
ELay=EAux*0.27
elif id ==4:
ELoad=EAux*0.41
ELay=EAux*0.25
elif id==8 or id==9 or id==10 or id==11:
ELoad=EAux*0.62
ELay=EAux*0.19
elif id==14:
ELoad=EAux*0.62
ELay=EAux*0.19
else:
ELoad=EAux*0.43
ELay=EAux*0.21
#caculate emissions
components=[]
components=componentValues(EAux,4)
#[FC,NOx,CO,HC,PM] g/kWh, CO2=3.2 times fuel consumption
FC=components[0]
NOx=components[1]
CO=components[2]
HC=components[3]
PM=components[4]
CO2=FC*3.17
SO2Lay=-1
SO2Load=-1
if Fuel == 'MDO':
SO2Lay = FC*ELay*((1.0/100)*(64/32))
SO2Load = FC*ELoad*((1.0/100)*(64/32))
if Fuel == 'MGO':
SO2Lay = FC*ELay*((0.2/100)*(64/32))
SO2Load = FC*ELoad*((0.2/100)*(64/32))
if Fuel == 'RO':
SO2Lay = FC*ELay*((2.0/100)*(64/32))
SO2Load = FC*ELoad*((2.0/100)*(64/32))
EmissionLoad = [FC*ELoad,NOx*ELoad,CO*ELoad,HC*ELoad,PM*ELoad,CO2*ELoad,SO2Load,'g/h']
EmissionLay = [FC*ELay,NOx*ELay,CO*ELay,HC*ELay,PM*ELay,CO2*ELay,SO2Lay,'g/h']
return EmissionLoad, EmissionLay
def componentValues(EngineSize,stroke):
"""Function to return emissions factors for relevant stroke and engine size
Stroke is either 2 or 4
return datalist:
[FC,NOx,CO,HC,PM] g/kWh, CO2=3.17 times fuel consumption
"""
if(EngineSize < 500):
if(stroke == 2):
Components = [-1, 16.24, 0.84, 0.45, -1]
elif(stroke == 4):
Components = [186.71, 12.23, 1.30, 0.61, 0.36]
else:
Components = [-1, -1, -1, -1, -1]
return Components
elif(EngineSize < 2500 and EngineSize > 500):
if(stroke == 2):
Components = [-1, 16.37, 0.90, 0.43, 0.40]
elif(stroke == 4):
Components = [193.43, 11.59, 0.92, 0.55, 0.19]
else:
Components = [-1, -1, -1, -1, -1]
return Components
elif(EngineSize < 5000 and EngineSize > 2500):
if(stroke == 2):
Components = [210.00, 16.05, 0.56, 0.42, 0.50]
elif(stroke == 4):
Components = [186.97, 11.94, 0.51, 0.32, 0.20]
else:
Components = [-1, -1, -1, -1, -1]
return Components
elif(EngineSize < 7500 and EngineSize > 5000):
if(stroke == 2):
Components = [175.00, 18.14, 0.67, 0.52, 0.50]
elif(stroke == 4):
Components = [183.93, 12.86, 0.54, 0.23, 0.19]
else:
Components = [-1, -1, -1, -1, -1]
return Components
elif(EngineSize < 10000 and EngineSize > 7500):
if(stroke == 2):
Components = [175.00, 16.80, 0.87, 0.47, 0.50]
elif(stroke == 4):
Components = [183.49, 12.73, 0.53, 0.24, 0.20]
else:
Components = [-1, -1, -1, -1, -1]
return Components
elif(EngineSize < 15000 and EngineSize > 10000):
if(stroke == 2):
Components = [170.0, 15.56, 0.91, 0.47, 0.50]
elif(stroke == 4):
Components = [181.28, 13.15, 0.59, 0.30, 0.20]
else:
Components = [-1, -1, -1, -1, -1]
return Components
elif(EngineSize < 25000 and EngineSize > 15000):
if(stroke == 2):
Components = [170.0, 16.71, 0.80, 0.44, 0.5]
elif(stroke == 4):
Components = [181.30, 12.94, 0.61, 0.26, 0.20]
else:
Components = [-1, -1, -1, -1, -1]
return Components
elif(EngineSize > 25000):
if(stroke == 2):
Components = [167.0, 15.24, 0.6, 0.22, 0.50]
elif(stroke == 4):
Components = [178.66, 12.80, 0.93, 0.23, 0.22]
else:
Components = [-1, -1, -1, -1, -1]
return Components
else:
Components = [-1, -1, -1, -1, -1]
return Components
return Components
def CalculateShipSail(ShipID, Fuel='MDO', stroke=2, GT= -1, DWT=0):
#1 Bulk ship
#2 Dry cargo
#3 Container
#4 Ro-Ro
#5 Reefer
#6 Cargo ferry
#7 Passenger ferry
#8 High-Speed ferry
#9 Tanker
#10 Gas tanker
#11 Cruise vessel
#12 Vehicle carrier
WarningText = []
#Split into weight groups
if(GT > 0):
if(GT < 500):
GTGroup = 1
elif(GT < 5000):
GTGroup = 2
elif(GT < 10000):
GTGroup = 3
elif(GT < 25000):
GTGroup = 4
elif(GT < 50000):
GTGroup = 5
elif(GT < 100000):
GTGroup = 6
elif(GT < 250000):
GTGroup = 7
elif(GT > 250000):
GTGroup = 8
else:
WarningText.append("GT not in range")
else:
if(DWT == -1):
WarningText.append("GT=-1 && DWT=-1")
GTGroup = 1
DWTGroup = 1
else:
if(DWT < 500):
DWTGroup = 1
elif(DWT < 5000):
DWTGroup = 2
elif(DWT < 10000):
DWTGroup = 3
elif(DWT < 25000):
DWTGroup = 4
elif(DWT < 50000):
DWTGroup = 5
elif(DWT < 100000):
DWTGroup = 6
elif(DWT < 250000):
DWTGroup = 7
else:
DWTGroup = 8
#innvariabel til csvfil: shipID og vektklasse
#Get Data from the Ship dictionary
if(GT > 0):
weightgroup = GTGroup
else:
weightgroup = DWTGroup
key = str(ShipID) + "_" + str(weightgroup)
value = S.H_SHIP[key]
data = value
VehicleID = data[0]
WeightGroup = data[1]
DWTSpeed = data[2]
GTSpeed = data[3]
DWTk = data[4]
DWTn = data[5]
GTk = data[6]
GTn = data[7]
Emission = -1
#Step 1: Determination of average speed
#This step is done in the .csv-file, moved to the semba initialization file
#Step 2: Determination of main engine power output
if(GT > 0):
ME = float(GTk) * pow(GT, float(GTn)) #kW
elif(DWT > 0): #DWT>0
ME = float(DWTk) * pow(DWT, float(DWTn))#kW
else:
ME = -1
#Step 3: Identification of main engine type
#2 or 4 stroke engine, input in function
#Step 4: Estimation of fuel consumption and emissions:
Components = [] #[FC,NOx,CO,HC,PM] g/kWh, CO2=3.2 times fuel consumption
Components=componentValues(ME,stroke)
#WarningText.append(w)
if(GT == -1):
if(DWT == -1):
WarningText.append("Can not calculate vessel speed")
else:
Speed = DWTSpeed
else:
Speed = GTSpeed
#
if(Speed != 0):
kph = float(Speed) * 1.852 #km/h kilometers per hour
else:
kph = -1
FC=Components[0]
NOx=Components[1]
CO=Components[2]
HC=Components[3]
PM=Components[4]
CO2=FC*3.17
#Emissions=[FC*ME/kph,NOx*ME/kph,CO*ME/kph,HC*ME/kph,PM*ME/kph,CO2*ME/kph]
#Sulphur calculations are hereSO2 calculations come here
#MDO = Marine diesel Oil
#MGO = Marine gas oil
#RO = Residual oil
SO2=-1
if Fuel == 'MDO':
SO2 = FC*ME/kph*((1.0/100)*(64/32))
if Fuel == 'MGO':
SO2 = FC*ME/kph*((0.2/100)*(64/32))
if Fuel == 'RO':
SO2 = FC*ME/kph*((2.0/100)*(64/32))
Emissions=[FC*ME/kph,NOx*ME/kph,CO*ME/kph,HC*ME/kph,PM*ME/kph,CO2*ME/kph, SO2,"g/km", WarningText]
return Emissions
###########____Load Data____##################
CreateShip()
print "Hei"
ListTypes()
#Test for TPG ship
#CalculateShip(ShipID, Component, stroke=2, GT= -1, DWT=0, Load=0)
print CalculateShipSail(3, "MGO", stroke=4, GT= -1, DWT=1278)
#CalculateAUX(id,loadingTime,layTime, DWT=-1, GT=-1):
print CalculateAUX(3,'MGO',GT=-1,DWT=1287) | {
"repo_name": "tomasle/semba",
"path": "SEMBA/SHIP.py",
"copies": "1",
"size": "9179",
"license": "bsd-2-clause",
"hash": -5222393544046216000,
"line_mean": 23.9456521739,
"line_max": 104,
"alpha_frac": 0.5856847151,
"autogenerated": false,
"ratio": 2.5475992228698305,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8344012356577475,
"avg_score": 0.05785431627847121,
"num_lines": 368
} |
# $Id: ShowFeats.py 537 2007-08-20 14:54:35Z landrgr1 $
#
# Created by Greg Landrum Aug 2006
#
#
from __future__ import print_function
# # --- ---- --- ---- --- ---- --- ---- --- ---- --- ----
import sys, os, getopt
from rdkit import RDConfig
from rdkit import Chem
from rdkit.Chem import AllChem
_version = "0.3.2"
_usage = """
ShowFeats [optional args] <filenames>
if "-" is provided as a filename, data will be read from stdin (the console)
"""
_welcomeMessage = "This is ShowFeats version %s" % (_version)
import math
#set up the logger:
from rdkit import RDLogger as logging
logger = logging.logger()
logger.setLevel(logging.INFO)
from rdkit import Geometry
from rdkit.Chem.Features import FeatDirUtilsRD as FeatDirUtils
_featColors = {
'Donor': (0, 1, 1),
'Acceptor': (1, 0, 1),
'NegIonizable': (1, 0, 0),
'PosIonizable': (0, 0, 1),
'ZnBinder': (1, .5, .5),
'Aromatic': (1, .8, .2),
'LumpedHydrophobe': (.5, .25, 0),
'Hydrophobe': (.5, .25, 0),
}
def _getVectNormal(v, tol=1e-4):
if math.fabs(v.x) > tol:
res = Geometry.Point3D(v.y, -v.x, 0)
elif math.fabs(v.y) > tol:
res = Geometry.Point3D(-v.y, v.x, 0)
elif math.fabs(v.z) > tol:
res = Geometry.Point3D(1, 0, 0)
else:
raise ValueError('cannot find normal to the null vector')
res.Normalize()
return res
_canonArrowhead = None
viewer = None
def _buildCanonArrowhead(headFrac, nSteps, aspect):
global _canonArrowhead
startP = RDGeometry.Point3D(0, 0, headFrac)
_canonArrowhead = [startP]
scale = headFrac * aspect
baseV = RDGeometry.Point3D(scale, 0, 0)
_canonArrowhead.append(baseV)
twopi = 2 * math.pi
for i in range(1, nSteps):
v = RDGeometry.Point3D(scale * math.cos(i * twopi), scale * math.sin(i * twopi), 0)
_canonArrowhead.append(v)
_globalArrowCGO = []
_globalSphereCGO = []
# taken from pymol's cgo.py
BEGIN = 2
END = 3
TRIANGLE_FAN = 6
COLOR = 6
VERTEX = 4
NORMAL = 5
SPHERE = 7
CYLINDER = 9
ALPHA = 25
def _cgoArrowhead(viewer, tail, head, radius, color, label, headFrac=0.3, nSteps=10, aspect=.5):
global _globalArrowCGO
delta = head - tail
normal = _getVectNormal(delta)
delta.Normalize()
dv = head - tail
dv.Normalize()
dv *= headFrac
startP = head
normal *= headFrac * aspect
cgo = [BEGIN, TRIANGLE_FAN, COLOR, color[0], color[1], color[2], NORMAL, dv.x, dv.y, dv.z, VERTEX,
head.x + dv.x, head.y + dv.y, head.z + dv.z]
base = [BEGIN, TRIANGLE_FAN, COLOR, color[0], color[1], color[2], NORMAL, -dv.x, -dv.y, -dv.z,
VERTEX, head.x, head.y, head.z]
v = startP + normal
cgo.extend([NORMAL, normal.x, normal.y, normal.z])
cgo.extend([VERTEX, v.x, v.y, v.z])
base.extend([VERTEX, v.x, v.y, v.z])
for i in range(1, nSteps):
v = FeatDirUtils.ArbAxisRotation(360. / nSteps * i, delta, normal)
cgo.extend([NORMAL, v.x, v.y, v.z])
v += startP
cgo.extend([VERTEX, v.x, v.y, v.z])
base.extend([VERTEX, v.x, v.y, v.z])
cgo.extend([NORMAL, normal.x, normal.y, normal.z])
cgo.extend([VERTEX, startP.x + normal.x, startP.y + normal.y, startP.z + normal.z])
base.extend([VERTEX, startP.x + normal.x, startP.y + normal.y, startP.z + normal.z])
cgo.append(END)
base.append(END)
cgo.extend(base)
#viewer.server.renderCGO(cgo,label)
_globalArrowCGO.extend(cgo)
def ShowArrow(viewer, tail, head, radius, color, label, transparency=0, includeArrowhead=True):
global _globalArrowCGO
if transparency:
_globalArrowCGO.extend([ALPHA, 1 - transparency])
else:
_globalArrowCGO.extend([ALPHA, 1])
_globalArrowCGO.extend([CYLINDER,
tail.x,
tail.y,
tail.z,
head.x,
head.y,
head.z,
radius * .10,
color[0],
color[1],
color[2],
color[0],
color[1],
color[2], ])
if includeArrowhead:
_cgoArrowhead(viewer, tail, head, radius, color, label)
def ShowMolFeats(mol, factory, radius=0.5, confId=-1, showOnly=True, name='',
transparency=0.0, colors=None, excludeTypes=[], useFeatDirs=True, featLabel=None,
dirLabel=None, includeArrowheads=True, writeFeats=False, showMol=True,
featMapFile=False):
global _globalSphereCGO
out_list = []
import json
if not name:
if mol.HasProp('_Name'):
name = mol.GetProp('_Name')
else:
name = 'molecule'
if not colors:
colors = _featColors
molFeats = factory.GetFeaturesForMol(mol)
if not featLabel:
featLabel = '%s-feats' % name
# viewer.server.resetCGO(featLabel)
if not dirLabel:
dirLabel = featLabel + "-dirs"
# viewer.server.resetCGO(dirLabel)
for i, feat in enumerate(molFeats):
family = feat.GetFamily()
if family in excludeTypes:
continue
pos = feat.GetPos(confId)
color = colors.get(family, (.5, .5, .5))
nm = '%s(%d)' % (family, i + 1)
if transparency:
_globalSphereCGO.extend([ALPHA, 1 - transparency])
else:
_globalSphereCGO.extend([ALPHA, 1])
_globalSphereCGO.extend([COLOR, color[0], color[1], color[2], SPHERE, pos.x, pos.y, pos.z,
radius])
aidText = ''
if writeFeats:
aidText = ' '.join([str(x + 1) for x in feat.GetAtomIds()])
print('%s\t%.3f\t%.3f\t%.3f\t1.0\t# %s' % (family, pos.x, pos.y, pos.z, aidText))
if featMapFile:
print(" family=%s pos=(%.3f,%.3f,%.3f) weight=1.0" % (family, pos.x, pos.y, pos.z), end='',
file=featMapFile)
if useFeatDirs:
ps = []
if family == 'Aromatic':
ps, fType = FeatDirUtils.GetAromaticFeatVects(
mol.GetConformer(confId), feat.GetAtomIds(), pos, scale=1.0)
elif family == 'Donor':
aids = feat.GetAtomIds()
if len(aids) == 1:
featAtom = mol.GetAtomWithIdx(aids[0])
hvyNbrs = [x for x in featAtom.GetNeighbors() if x.GetAtomicNum() != 1]
if len(hvyNbrs) == 1:
ps, fType = FeatDirUtils.GetDonor1FeatVects(mol.GetConformer(confId), aids, scale=1.0)
elif len(hvyNbrs) == 2:
ps, fType = FeatDirUtils.GetDonor2FeatVects(mol.GetConformer(confId), aids, scale=1.0)
elif len(hvyNbrs) == 3:
ps, fType = FeatDirUtils.GetDonor3FeatVects(mol.GetConformer(confId), aids, scale=1.0)
elif family == 'Acceptor':
aids = feat.GetAtomIds()
if len(aids) == 1:
featAtom = mol.GetAtomWithIdx(aids[0])
hvyNbrs = [x for x in featAtom.GetNeighbors() if x.GetAtomicNum() != 1]
if len(hvyNbrs) == 1:
ps, fType = FeatDirUtils.GetAcceptor1FeatVects(
mol.GetConformer(confId), aids, scale=1.0)
elif len(hvyNbrs) == 2:
ps, fType = FeatDirUtils.GetAcceptor2FeatVects(
mol.GetConformer(confId), aids, scale=1.0)
elif len(hvyNbrs) == 3:
ps, fType = FeatDirUtils.GetAcceptor3FeatVects(
mol.GetConformer(confId), aids, scale=1.0)
this_l = [family, pos.x, pos.y, pos.z]
for tail, head in ps:
ShowArrow(viewer, tail, head, radius, color, dirLabel, transparency=transparency,
includeArrowhead=includeArrowheads)
vect = head - tail
print('dir=(%.3f,%.3f,%.3f)' % (vect.x, vect.y, vect.z),)
if vect:
this_l.extend([vect.x, vect.y, vect.z])
out_list.append(this_l)
if featMapFile:
aidText = ' '.join([str(x + 1) for x in feat.GetAtomIds()])
print('# %s' % (aidText), file=featMapFile)
return out_list
def get_feats(molecule):
fdef = open(os.path.join(RDConfig.RDDataDir, 'BaseFeatures.fdef'), 'r').read()
factory = AllChem.BuildFeatureFactoryFromString(fdef)
return ShowMolFeats(molecule, factory, transparency=0.25,
showOnly=False, includeArrowheads=True,
writeFeats=True, showMol=True)
if __name__ == '__main__':
mol = Chem.MolFromSmiles('Cc1sc2c(c1C)C(c1ccc(Cl)cc1)=NC(CC(=O)OC(C)(C)C)c1[nH]nc(C)[n+]1-2')
AllChem.EmbedMolecule(mol)
print(get_feats(mol))
| {
"repo_name": "InformaticsMatters/pipelines",
"path": "src/python/pipelines/rdkit/show_feats.py",
"copies": "1",
"size": "8263",
"license": "apache-2.0",
"hash": 3940858231345543700,
"line_mean": 30.7807692308,
"line_max": 100,
"alpha_frac": 0.5942151761,
"autogenerated": false,
"ratio": 2.881101813110181,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8901894858873833,
"avg_score": 0.014684426067269586,
"num_lines": 260
} |
# $Id: ShowFeats.py 537 2007-08-20 14:54:35Z landrgr1 $
#
# Created by Greg Landrum Aug 2006
#
#
from __future__ import print_function
_version = "0.3.2"
_usage="""
ShowFeats [optional args] <filenames>
if "-" is provided as a filename, data will be read from stdin (the console)
"""
_welcomeMessage="This is ShowFeats version %s"%(_version)
import math
#set up the logger:
from rdkit import RDLogger as logging
logger = logging.logger()
logger.setLevel(logging.INFO)
from rdkit import Geometry
from rdkit.Chem.Features import FeatDirUtilsRD as FeatDirUtils
_featColors = {
'Donor':(0,1,1),
'Acceptor':(1,0,1),
'NegIonizable':(1,0,0),
'PosIonizable':(0,0,1),
'ZnBinder':(1,.5,.5),
'Aromatic':(1,.8,.2),
'LumpedHydrophobe':(.5,.25,0),
'Hydrophobe':(.5,.25,0),
}
def _getVectNormal(v,tol=1e-4):
if math.fabs(v.x)>tol:
res = Geometry.Point3D(v.y,-v.x,0)
elif math.fabs(v.y)>tol:
res = Geometry.Point3D(-v.y,v.x,0)
elif math.fabs(v.z)>tol:
res = Geometry.Point3D(1,0,0)
else:
raise ValueError('cannot find normal to the null vector')
res.Normalize()
return res
_canonArrowhead=None
def _buildCanonArrowhead(headFrac,nSteps,aspect):
global _canonArrowhead
startP = RDGeometry.Point3D(0,0,headFrac)
_canonArrowhead=[startP]
scale = headFrac*aspect
baseV = RDGeometry.Point3D(scale,0,0)
_canonArrowhead.append(baseV)
twopi = 2*math.pi
for i in range(1,nSteps):
v = RDGeometry.Point3D(scale*math.cos(i*twopi),scale*math.sin(i*twopi),0)
_canonArrowhead.append(v)
_globalArrowCGO=[]
_globalSphereCGO=[]
# taken from pymol's cgo.py
BEGIN=2
END=3
TRIANGLE_FAN=6
COLOR=6
VERTEX=4
NORMAL=5
SPHERE=7
CYLINDER=9
ALPHA=25
def _cgoArrowhead(viewer,tail,head,radius,color,label,headFrac=0.3,nSteps=10,aspect=.5):
global _globalArrowCGO
delta = head-tail
normal = _getVectNormal(delta)
delta.Normalize()
dv = head-tail
dv.Normalize()
dv *= headFrac
startP = head
normal*=headFrac*aspect
cgo = [BEGIN,TRIANGLE_FAN,
COLOR,color[0],color[1],color[2],
NORMAL,dv.x,dv.y,dv.z,
VERTEX,head.x+dv.x,head.y+dv.y,head.z+dv.z]
base = [BEGIN,TRIANGLE_FAN,
COLOR,color[0],color[1],color[2],
NORMAL,-dv.x,-dv.y,-dv.z,
VERTEX,head.x,head.y,head.z]
v = startP+normal
cgo.extend([NORMAL,normal.x,normal.y,normal.z])
cgo.extend([VERTEX,v.x,v.y,v.z])
base.extend([VERTEX,v.x,v.y,v.z])
for i in range(1,nSteps):
v = FeatDirUtils.ArbAxisRotation(360./nSteps*i,delta,normal)
cgo.extend([NORMAL,v.x,v.y,v.z])
v += startP
cgo.extend([VERTEX,v.x,v.y,v.z])
base.extend([VERTEX,v.x,v.y,v.z])
cgo.extend([NORMAL,normal.x,normal.y,normal.z])
cgo.extend([VERTEX,startP.x+normal.x,startP.y+normal.y,startP.z+normal.z])
base.extend([VERTEX,startP.x+normal.x,startP.y+normal.y,startP.z+normal.z])
cgo.append(END)
base.append(END)
cgo.extend(base)
#viewer.server.renderCGO(cgo,label)
_globalArrowCGO.extend(cgo)
def ShowArrow(viewer,tail,head,radius,color,label,transparency=0,includeArrowhead=True):
global _globalArrowCGO
if transparency:
_globalArrowCGO.extend([ALPHA,1-transparency])
else:
_globalArrowCGO.extend([ALPHA,1])
_globalArrowCGO.extend([CYLINDER,tail.x,tail.y,tail.z,
head.x,head.y,head.z,
radius*.10,
color[0],color[1],color[2],
color[0],color[1],color[2],
])
if includeArrowhead:
_cgoArrowhead(viewer,tail,head,radius,color,label)
def ShowMolFeats(mol,factory,viewer,radius=0.5,confId=-1,showOnly=True,
name='',transparency=0.0,colors=None,excludeTypes=[],
useFeatDirs=True,featLabel=None,dirLabel=None,includeArrowheads=True,
writeFeats=False,showMol=True,featMapFile=False):
global _globalSphereCGO
if not name:
if mol.HasProp('_Name'):
name = mol.GetProp('_Name')
else:
name = 'molecule'
if not colors:
colors = _featColors
if showMol:
viewer.ShowMol(mol,name=name,showOnly=showOnly,confId=confId)
molFeats=factory.GetFeaturesForMol(mol)
if not featLabel:
featLabel='%s-feats'%name
viewer.server.resetCGO(featLabel)
if not dirLabel:
dirLabel=featLabel+"-dirs"
viewer.server.resetCGO(dirLabel)
for i,feat in enumerate(molFeats):
family=feat.GetFamily()
if family in excludeTypes:
continue
pos = feat.GetPos(confId)
color = colors.get(family,(.5,.5,.5))
nm = '%s(%d)'%(family,i+1)
if transparency:
_globalSphereCGO.extend([ALPHA,1-transparency])
else:
_globalSphereCGO.extend([ALPHA,1])
_globalSphereCGO.extend([COLOR,color[0],color[1],color[2],
SPHERE,pos.x,pos.y,pos.z,
radius])
if writeFeats:
aidText = ' '.join([str(x+1) for x in feat.GetAtomIds()])
print('%s\t%.3f\t%.3f\t%.3f\t1.0\t# %s'%(family,pos.x,pos.y,pos.z,aidText))
if featMapFile:
print(" family=%s pos=(%.3f,%.3f,%.3f) weight=1.0"%(family,pos.x,pos.y,pos.z),end='',file=featMapFile)
if useFeatDirs:
ps = []
if family=='Aromatic':
ps,fType = FeatDirUtils.GetAromaticFeatVects(mol.GetConformer(confId),
feat.GetAtomIds(),pos,
scale=1.0)
elif family=='Donor':
aids = feat.GetAtomIds()
if len(aids)==1:
featAtom=mol.GetAtomWithIdx(aids[0])
hvyNbrs=[x for x in featAtom.GetNeighbors() if x.GetAtomicNum()!=1]
if len(hvyNbrs)==1:
ps,fType = FeatDirUtils.GetDonor1FeatVects(mol.GetConformer(confId),
aids,scale=1.0)
elif len(hvyNbrs)==2:
ps,fType = FeatDirUtils.GetDonor2FeatVects(mol.GetConformer(confId),
aids,scale=1.0)
elif len(hvyNbrs)==3:
ps,fType = FeatDirUtils.GetDonor3FeatVects(mol.GetConformer(confId),
aids,scale=1.0)
elif family=='Acceptor':
aids = feat.GetAtomIds()
if len(aids)==1:
featAtom=mol.GetAtomWithIdx(aids[0])
hvyNbrs=[x for x in featAtom.GetNeighbors() if x.GetAtomicNum()!=1]
if len(hvyNbrs)==1:
ps,fType = FeatDirUtils.GetAcceptor1FeatVects(mol.GetConformer(confId),
aids,scale=1.0)
elif len(hvyNbrs)==2:
ps,fType = FeatDirUtils.GetAcceptor2FeatVects(mol.GetConformer(confId),
aids,scale=1.0)
elif len(hvyNbrs)==3:
ps,fType = FeatDirUtils.GetAcceptor3FeatVects(mol.GetConformer(confId),
aids,scale=1.0)
for tail,head in ps:
ShowArrow(viewer,tail,head,radius,color,dirLabel,
transparency=transparency,includeArrowhead=includeArrowheads)
if featMapFile:
vect = head-tail
print('dir=(%.3f,%.3f,%.3f)'%(vect.x,vect.y,vect.z),end='',file=featMapFile)
if featMapFile:
aidText = ' '.join([str(x+1) for x in feat.GetAtomIds()])
print('# %s'%(aidText),file=featMapFile)
# --- ---- --- ---- --- ---- --- ---- --- ---- --- ----
import sys,os,getopt
from rdkit import RDConfig
from optparse import OptionParser
parser=OptionParser(_usage,version='%prog '+_version)
parser.add_option('-x','--exclude',default='',
help='provide a list of feature names that should be excluded')
parser.add_option('-f','--fdef',default=os.path.join(RDConfig.RDDataDir,'BaseFeatures.fdef'),
help='provide the name of the feature definition (fdef) file.')
parser.add_option('--noDirs','--nodirs',dest='useDirs',default=True,action='store_false',
help='do not draw feature direction indicators')
parser.add_option('--noHeads',dest='includeArrowheads',default=True,action='store_false',
help='do not draw arrowheads on the feature direction indicators')
parser.add_option('--noClear','--noClear',dest='clearAll',default=False,action='store_true',
help='do not clear PyMol on startup')
parser.add_option('--noMols','--nomols',default=False,action='store_true',
help='do not draw the molecules')
parser.add_option('--writeFeats','--write',default=False,action='store_true',
help='print the feature information to the console')
parser.add_option('--featMapFile','--mapFile',default='',
help='save a feature map definition to the specified file')
parser.add_option('--verbose',default=False,action='store_true',
help='be verbose')
if __name__=='__main__':
from rdkit import Chem
from rdkit.Chem import AllChem
from rdkit.Chem.PyMol import MolViewer
options,args = parser.parse_args()
if len(args)<1:
parser.error('please provide either at least one sd or mol file')
try:
v = MolViewer()
except:
logger.error('Unable to connect to PyMol server.\nPlease run ~landrgr1/extern/PyMol/launch.sh to start it.')
sys.exit(1)
if options.clearAll:
v.DeleteAll()
try:
fdef = open(options.fdef,'r').read()
except IOError:
logger.error('ERROR: Could not open fdef file %s'%options.fdef)
sys.exit(1)
factory = AllChem.BuildFeatureFactoryFromString(fdef)
if options.writeFeats:
print('# Family \tX \tY \tZ \tRadius\t # Atom_ids')
if options.featMapFile:
if options.featMapFile=='-':
options.featMapFile=sys.stdout
else:
options.featMapFile=file(options.featMapFile,'w+')
print('# Feature map generated by ShowFeats v%s'%_version, file=options.featMapFile)
print("ScoreMode=All", file=options.featMapFile)
print("DirScoreMode=Ignore", file=options.featMapFile)
print("BeginParams", file=options.featMapFile)
for family in factory.GetFeatureFamilies():
print(" family=%s width=1.0 radius=3.0"%family, file=options.featMapFile)
print("EndParams", file=options.featMapFile)
print("BeginPoints", file=options.featMapFile)
i = 1
for midx,molN in enumerate(args):
if molN!='-':
featLabel='%s_Feats'%molN
else:
featLabel='Mol%d_Feats'%(midx+1)
v.server.resetCGO(featLabel)
# this is a big of kludgery to work around what seems to be a pymol cgo bug:
v.server.sphere((0,0,0),.01,(1,0,1),featLabel)
dirLabel=featLabel+"-dirs"
v.server.resetCGO(dirLabel)
# this is a big of kludgery to work around what seems to be a pymol cgo bug:
v.server.cylinder((0,0,0),(.01,.01,.01),.01,(1,0,1),dirLabel)
if molN != '-':
try:
ms = Chem.SDMolSupplier(molN)
except:
logger.error('Problems reading input file: %s'%molN)
ms = []
else:
ms = Chem.SDMolSupplier()
ms.SetData(sys.stdin.read())
for m in ms:
nm = 'Mol_%d'%(i)
if m.HasProp('_Name'):
nm += '_'+m.GetProp('_Name')
if options.verbose:
if m.HasProp('_Name'):
print("#Molecule: %s"%m.GetProp('_Name'))
else:
print("#Molecule: %s"%nm)
ShowMolFeats(m,factory,v,transparency=0.25,excludeTypes=options.exclude,name=nm,
showOnly=False,
useFeatDirs=options.useDirs,
featLabel=featLabel,dirLabel=dirLabel,
includeArrowheads=options.includeArrowheads,
writeFeats=options.writeFeats,showMol=not options.noMols,
featMapFile=options.featMapFile)
i += 1
if not i%100:
logger.info("Done %d poses"%i)
if ms:
v.server.renderCGO(_globalSphereCGO,featLabel,1)
if options.useDirs:
v.server.renderCGO(_globalArrowCGO,dirLabel,1)
if options.featMapFile:
print("EndPoints",file=options.featMapFile)
sys.exit(0)
| {
"repo_name": "soerendip42/rdkit",
"path": "rdkit/Chem/Features/ShowFeats.py",
"copies": "3",
"size": "12126",
"license": "bsd-3-clause",
"hash": 1101365321808392000,
"line_mean": 32.9663865546,
"line_max": 112,
"alpha_frac": 0.6123206333,
"autogenerated": false,
"ratio": 3.100485809255945,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.997001296690486,
"avg_score": 0.04855869513021719,
"num_lines": 357
} |
# $Id: ShowFeats.py 537 2007-08-20 14:54:35Z landrgr1 $
#
# Created by Greg Landrum Aug 2006
#
#
from __future__ import print_function
_version = "0.3.2"
_usage = """
ShowFeats [optional args] <filenames>
if "-" is provided as a filename, data will be read from stdin (the console)
"""
_welcomeMessage = "This is ShowFeats version %s" % (_version)
import math
#set up the logger:
from rdkit import RDLogger as logging
logger = logging.logger()
logger.setLevel(logging.INFO)
from rdkit import Geometry
from rdkit.Chem.Features import FeatDirUtilsRD as FeatDirUtils
_featColors = {
'Donor': (0, 1, 1),
'Acceptor': (1, 0, 1),
'NegIonizable': (1, 0, 0),
'PosIonizable': (0, 0, 1),
'ZnBinder': (1, .5, .5),
'Aromatic': (1, .8, .2),
'LumpedHydrophobe': (.5, .25, 0),
'Hydrophobe': (.5, .25, 0),
}
def _getVectNormal(v, tol=1e-4):
if math.fabs(v.x) > tol:
res = Geometry.Point3D(v.y, -v.x, 0)
elif math.fabs(v.y) > tol:
res = Geometry.Point3D(-v.y, v.x, 0)
elif math.fabs(v.z) > tol:
res = Geometry.Point3D(1, 0, 0)
else:
raise ValueError('cannot find normal to the null vector')
res.Normalize()
return res
_canonArrowhead = None
def _buildCanonArrowhead(headFrac, nSteps, aspect):
global _canonArrowhead
startP = RDGeometry.Point3D(0, 0, headFrac)
_canonArrowhead = [startP]
scale = headFrac * aspect
baseV = RDGeometry.Point3D(scale, 0, 0)
_canonArrowhead.append(baseV)
twopi = 2 * math.pi
for i in range(1, nSteps):
v = RDGeometry.Point3D(scale * math.cos(i * twopi), scale * math.sin(i * twopi), 0)
_canonArrowhead.append(v)
_globalArrowCGO = []
_globalSphereCGO = []
# taken from pymol's cgo.py
BEGIN = 2
END = 3
TRIANGLE_FAN = 6
COLOR = 6
VERTEX = 4
NORMAL = 5
SPHERE = 7
CYLINDER = 9
ALPHA = 25
def _cgoArrowhead(viewer, tail, head, radius, color, label, headFrac=0.3, nSteps=10, aspect=.5):
global _globalArrowCGO
delta = head - tail
normal = _getVectNormal(delta)
delta.Normalize()
dv = head - tail
dv.Normalize()
dv *= headFrac
startP = head
normal *= headFrac * aspect
cgo = [BEGIN, TRIANGLE_FAN, COLOR, color[0], color[1], color[2], NORMAL, dv.x, dv.y, dv.z, VERTEX,
head.x + dv.x, head.y + dv.y, head.z + dv.z]
base = [BEGIN, TRIANGLE_FAN, COLOR, color[0], color[1], color[2], NORMAL, -dv.x, -dv.y, -dv.z,
VERTEX, head.x, head.y, head.z]
v = startP + normal
cgo.extend([NORMAL, normal.x, normal.y, normal.z])
cgo.extend([VERTEX, v.x, v.y, v.z])
base.extend([VERTEX, v.x, v.y, v.z])
for i in range(1, nSteps):
v = FeatDirUtils.ArbAxisRotation(360. / nSteps * i, delta, normal)
cgo.extend([NORMAL, v.x, v.y, v.z])
v += startP
cgo.extend([VERTEX, v.x, v.y, v.z])
base.extend([VERTEX, v.x, v.y, v.z])
cgo.extend([NORMAL, normal.x, normal.y, normal.z])
cgo.extend([VERTEX, startP.x + normal.x, startP.y + normal.y, startP.z + normal.z])
base.extend([VERTEX, startP.x + normal.x, startP.y + normal.y, startP.z + normal.z])
cgo.append(END)
base.append(END)
cgo.extend(base)
#viewer.server.renderCGO(cgo,label)
_globalArrowCGO.extend(cgo)
def ShowArrow(viewer, tail, head, radius, color, label, transparency=0, includeArrowhead=True):
global _globalArrowCGO
if transparency:
_globalArrowCGO.extend([ALPHA, 1 - transparency])
else:
_globalArrowCGO.extend([ALPHA, 1])
_globalArrowCGO.extend([CYLINDER,
tail.x,
tail.y,
tail.z,
head.x,
head.y,
head.z,
radius * .10,
color[0],
color[1],
color[2],
color[0],
color[1],
color[2], ])
if includeArrowhead:
_cgoArrowhead(viewer, tail, head, radius, color, label)
def ShowMolFeats(mol, factory, viewer, radius=0.5, confId=-1, showOnly=True, name='',
transparency=0.0, colors=None, excludeTypes=[], useFeatDirs=True, featLabel=None,
dirLabel=None, includeArrowheads=True, writeFeats=False, showMol=True,
featMapFile=False):
global _globalSphereCGO
if not name:
if mol.HasProp('_Name'):
name = mol.GetProp('_Name')
else:
name = 'molecule'
if not colors:
colors = _featColors
if showMol:
viewer.ShowMol(mol, name=name, showOnly=showOnly, confId=confId)
molFeats = factory.GetFeaturesForMol(mol)
if not featLabel:
featLabel = '%s-feats' % name
viewer.server.resetCGO(featLabel)
if not dirLabel:
dirLabel = featLabel + "-dirs"
viewer.server.resetCGO(dirLabel)
for i, feat in enumerate(molFeats):
family = feat.GetFamily()
if family in excludeTypes:
continue
pos = feat.GetPos(confId)
color = colors.get(family, (.5, .5, .5))
nm = '%s(%d)' % (family, i + 1)
if transparency:
_globalSphereCGO.extend([ALPHA, 1 - transparency])
else:
_globalSphereCGO.extend([ALPHA, 1])
_globalSphereCGO.extend([COLOR, color[0], color[1], color[2], SPHERE, pos.x, pos.y, pos.z,
radius])
if writeFeats:
aidText = ' '.join([str(x + 1) for x in feat.GetAtomIds()])
print('%s\t%.3f\t%.3f\t%.3f\t1.0\t# %s' % (family, pos.x, pos.y, pos.z, aidText))
if featMapFile:
print(" family=%s pos=(%.3f,%.3f,%.3f) weight=1.0" % (family, pos.x, pos.y, pos.z), end='',
file=featMapFile)
if useFeatDirs:
ps = []
if family == 'Aromatic':
ps, fType = FeatDirUtils.GetAromaticFeatVects(
mol.GetConformer(confId), feat.GetAtomIds(), pos, scale=1.0)
elif family == 'Donor':
aids = feat.GetAtomIds()
if len(aids) == 1:
featAtom = mol.GetAtomWithIdx(aids[0])
hvyNbrs = [x for x in featAtom.GetNeighbors() if x.GetAtomicNum() != 1]
if len(hvyNbrs) == 1:
ps, fType = FeatDirUtils.GetDonor1FeatVects(mol.GetConformer(confId), aids, scale=1.0)
elif len(hvyNbrs) == 2:
ps, fType = FeatDirUtils.GetDonor2FeatVects(mol.GetConformer(confId), aids, scale=1.0)
elif len(hvyNbrs) == 3:
ps, fType = FeatDirUtils.GetDonor3FeatVects(mol.GetConformer(confId), aids, scale=1.0)
elif family == 'Acceptor':
aids = feat.GetAtomIds()
if len(aids) == 1:
featAtom = mol.GetAtomWithIdx(aids[0])
hvyNbrs = [x for x in featAtom.GetNeighbors() if x.GetAtomicNum() != 1]
if len(hvyNbrs) == 1:
ps, fType = FeatDirUtils.GetAcceptor1FeatVects(
mol.GetConformer(confId), aids, scale=1.0)
elif len(hvyNbrs) == 2:
ps, fType = FeatDirUtils.GetAcceptor2FeatVects(
mol.GetConformer(confId), aids, scale=1.0)
elif len(hvyNbrs) == 3:
ps, fType = FeatDirUtils.GetAcceptor3FeatVects(
mol.GetConformer(confId), aids, scale=1.0)
for tail, head in ps:
ShowArrow(viewer, tail, head, radius, color, dirLabel, transparency=transparency,
includeArrowhead=includeArrowheads)
if featMapFile:
vect = head - tail
print('dir=(%.3f,%.3f,%.3f)' % (vect.x, vect.y, vect.z), end='', file=featMapFile)
if featMapFile:
aidText = ' '.join([str(x + 1) for x in feat.GetAtomIds()])
print('# %s' % (aidText), file=featMapFile)
# --- ---- --- ---- --- ---- --- ---- --- ---- --- ----
import sys, os, getopt
from rdkit import RDConfig
from optparse import OptionParser
parser = OptionParser(_usage, version='%prog ' + _version)
parser.add_option('-x', '--exclude', default='',
help='provide a list of feature names that should be excluded')
parser.add_option('-f', '--fdef', default=os.path.join(RDConfig.RDDataDir, 'BaseFeatures.fdef'),
help='provide the name of the feature definition (fdef) file.')
parser.add_option('--noDirs', '--nodirs', dest='useDirs', default=True, action='store_false',
help='do not draw feature direction indicators')
parser.add_option('--noHeads', dest='includeArrowheads', default=True, action='store_false',
help='do not draw arrowheads on the feature direction indicators')
parser.add_option('--noClear', '--noClear', dest='clearAll', default=False, action='store_true',
help='do not clear PyMol on startup')
parser.add_option('--noMols', '--nomols', default=False, action='store_true',
help='do not draw the molecules')
parser.add_option('--writeFeats', '--write', default=False, action='store_true',
help='print the feature information to the console')
parser.add_option('--featMapFile', '--mapFile', default='',
help='save a feature map definition to the specified file')
parser.add_option('--verbose', default=False, action='store_true', help='be verbose')
if __name__ == '__main__':
from rdkit import Chem
from rdkit.Chem import AllChem
from rdkit.Chem.PyMol import MolViewer
options, args = parser.parse_args()
if len(args) < 1:
parser.error('please provide either at least one sd or mol file')
try:
v = MolViewer()
except Exception:
logger.error(
'Unable to connect to PyMol server.\nPlease run ~landrgr1/extern/PyMol/launch.sh to start it.')
sys.exit(1)
if options.clearAll:
v.DeleteAll()
try:
fdef = open(options.fdef, 'r').read()
except IOError:
logger.error('ERROR: Could not open fdef file %s' % options.fdef)
sys.exit(1)
factory = AllChem.BuildFeatureFactoryFromString(fdef)
if options.writeFeats:
print('# Family \tX \tY \tZ \tRadius\t # Atom_ids')
if options.featMapFile:
if options.featMapFile == '-':
options.featMapFile = sys.stdout
else:
options.featMapFile = file(options.featMapFile, 'w+')
print('# Feature map generated by ShowFeats v%s' % _version, file=options.featMapFile)
print("ScoreMode=All", file=options.featMapFile)
print("DirScoreMode=Ignore", file=options.featMapFile)
print("BeginParams", file=options.featMapFile)
for family in factory.GetFeatureFamilies():
print(" family=%s width=1.0 radius=3.0" % family, file=options.featMapFile)
print("EndParams", file=options.featMapFile)
print("BeginPoints", file=options.featMapFile)
i = 1
for midx, molN in enumerate(args):
if molN != '-':
featLabel = '%s_Feats' % molN
else:
featLabel = 'Mol%d_Feats' % (midx + 1)
v.server.resetCGO(featLabel)
# this is a big of kludgery to work around what seems to be a pymol cgo bug:
v.server.sphere((0, 0, 0), .01, (1, 0, 1), featLabel)
dirLabel = featLabel + "-dirs"
v.server.resetCGO(dirLabel)
# this is a big of kludgery to work around what seems to be a pymol cgo bug:
v.server.cylinder((0, 0, 0), (.01, .01, .01), .01, (1, 0, 1), dirLabel)
if molN != '-':
try:
ms = Chem.SDMolSupplier(molN)
except Exception:
logger.error('Problems reading input file: %s' % molN)
ms = []
else:
ms = Chem.SDMolSupplier()
ms.SetData(sys.stdin.read())
for m in ms:
nm = 'Mol_%d' % (i)
if m.HasProp('_Name'):
nm += '_' + m.GetProp('_Name')
if options.verbose:
if m.HasProp('_Name'):
print("#Molecule: %s" % m.GetProp('_Name'))
else:
print("#Molecule: %s" % nm)
ShowMolFeats(m, factory, v, transparency=0.25, excludeTypes=options.exclude, name=nm,
showOnly=False, useFeatDirs=options.useDirs, featLabel=featLabel,
dirLabel=dirLabel, includeArrowheads=options.includeArrowheads,
writeFeats=options.writeFeats, showMol=not options.noMols,
featMapFile=options.featMapFile)
i += 1
if not i % 100:
logger.info("Done %d poses" % i)
if ms:
v.server.renderCGO(_globalSphereCGO, featLabel, 1)
if options.useDirs:
v.server.renderCGO(_globalArrowCGO, dirLabel, 1)
if options.featMapFile:
print("EndPoints", file=options.featMapFile)
sys.exit(0)
| {
"repo_name": "jandom/rdkit",
"path": "rdkit/Chem/Features/ShowFeats.py",
"copies": "5",
"size": "12247",
"license": "bsd-3-clause",
"hash": 1397273997634990300,
"line_mean": 33.5960451977,
"line_max": 101,
"alpha_frac": 0.6077406712,
"autogenerated": false,
"ratio": 3.074817976399699,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6182558647599699,
"avg_score": null,
"num_lines": null
} |
# $Id: ShowFeats.py 537 2007-08-20 14:54:35Z landrgr1 $
#
# Created by Greg Landrum Aug 2006
#
#
_version = "0.3.2"
_usage="""
ShowFeats [optional args] <filenames>
if "-" is provided as a filename, data will be read from stdin (the console)
"""
_welcomeMessage="This is ShowFeats version %s"%(_version)
import math
#set up the logger:
from rdkit import RDLogger as logging
logger = logging.logger()
logger.setLevel(logging.INFO)
from rdkit import Geometry
from rdkit.Chem.Features import FeatDirUtilsRD as FeatDirUtils
_featColors = {
'Donor':(0,1,1),
'Acceptor':(1,0,1),
'NegIonizable':(1,0,0),
'PosIonizable':(0,0,1),
'ZnBinder':(1,.5,.5),
'Aromatic':(1,.8,.2),
'LumpedHydrophobe':(.5,.25,0),
'Hydrophobe':(.5,.25,0),
}
def _getVectNormal(v,tol=1e-4):
if math.fabs(v.x)>tol:
res = Geometry.Point3D(v.y,-v.x,0)
elif math.fabs(v.y)>tol:
res = Geometry.Point3D(-v.y,v.x,0)
elif math.fabs(v.z)>tol:
res = Geometry.Point3D(1,0,0)
else:
raise ValueError,'cannot find normal to the null vector'
res.Normalize()
return res
_canonArrowhead=None
def _buildCanonArrowhead(headFrac,nSteps,aspect):
global _canonArrowhead
startP = RDGeometry.Point3D(0,0,headFrac)
_canonArrowhead=[startP]
scale = headFrac*aspect
baseV = RDGeometry.Point3D(scale,0,0)
_canonArrowhead.append(baseV)
twopi = 2*math.pi
for i in range(1,nSteps):
v = RDGeometry.Point3D(scale*math.cos(i*twopi),scale*math.sin(i*twopi),0)
_canonArrowhead.append(v)
_globalArrowCGO=[]
_globalSphereCGO=[]
# taken from pymol's cgo.py
BEGIN=2
END=3
TRIANGLE_FAN=6
COLOR=6
VERTEX=4
NORMAL=5
SPHERE=7
CYLINDER=9
ALPHA=25
def _cgoArrowhead(viewer,tail,head,radius,color,label,headFrac=0.3,nSteps=10,aspect=.5):
global _globalArrowCGO
delta = head-tail
normal = _getVectNormal(delta)
delta.Normalize()
dv = head-tail
dv.Normalize()
dv *= headFrac
startP = head
normal*=headFrac*aspect
cgo = [BEGIN,TRIANGLE_FAN,
COLOR,color[0],color[1],color[2],
NORMAL,dv.x,dv.y,dv.z,
VERTEX,head.x+dv.x,head.y+dv.y,head.z+dv.z]
base = [BEGIN,TRIANGLE_FAN,
COLOR,color[0],color[1],color[2],
NORMAL,-dv.x,-dv.y,-dv.z,
VERTEX,head.x,head.y,head.z]
v = startP+normal
cgo.extend([NORMAL,normal.x,normal.y,normal.z])
cgo.extend([VERTEX,v.x,v.y,v.z])
base.extend([VERTEX,v.x,v.y,v.z])
for i in range(1,nSteps):
v = FeatDirUtils.ArbAxisRotation(360./nSteps*i,delta,normal)
cgo.extend([NORMAL,v.x,v.y,v.z])
v += startP
cgo.extend([VERTEX,v.x,v.y,v.z])
base.extend([VERTEX,v.x,v.y,v.z])
cgo.extend([NORMAL,normal.x,normal.y,normal.z])
cgo.extend([VERTEX,startP.x+normal.x,startP.y+normal.y,startP.z+normal.z])
base.extend([VERTEX,startP.x+normal.x,startP.y+normal.y,startP.z+normal.z])
cgo.append(END)
base.append(END)
cgo.extend(base)
#viewer.server.renderCGO(cgo,label)
_globalArrowCGO.extend(cgo)
def ShowArrow(viewer,tail,head,radius,color,label,transparency=0,includeArrowhead=True):
global _globalArrowCGO
if transparency:
_globalArrowCGO.extend([ALPHA,1-transparency])
else:
_globalArrowCGO.extend([ALPHA,1])
_globalArrowCGO.extend([CYLINDER,tail.x,tail.y,tail.z,
head.x,head.y,head.z,
radius*.10,
color[0],color[1],color[2],
color[0],color[1],color[2],
])
if includeArrowhead:
_cgoArrowhead(viewer,tail,head,radius,color,label)
def ShowMolFeats(mol,factory,viewer,radius=0.5,confId=-1,showOnly=True,
name='',transparency=0.0,colors=None,excludeTypes=[],
useFeatDirs=True,featLabel=None,dirLabel=None,includeArrowheads=True,
writeFeats=False,showMol=True,featMapFile=False):
global _globalSphereCGO
if not name:
if mol.HasProp('_Name'):
name = mol.GetProp('_Name')
else:
name = 'molecule'
if not colors:
colors = _featColors
if showMol:
viewer.ShowMol(mol,name=name,showOnly=showOnly,confId=confId)
molFeats=factory.GetFeaturesForMol(mol)
if not featLabel:
featLabel='%s-feats'%name
viewer.server.resetCGO(featLabel)
if not dirLabel:
dirLabel=featLabel+"-dirs"
viewer.server.resetCGO(dirLabel)
for i,feat in enumerate(molFeats):
family=feat.GetFamily()
if family in excludeTypes:
continue
pos = feat.GetPos(confId)
color = colors.get(family,(.5,.5,.5))
nm = '%s(%d)'%(family,i+1)
if transparency:
_globalSphereCGO.extend([ALPHA,1-transparency])
else:
_globalSphereCGO.extend([ALPHA,1])
_globalSphereCGO.extend([COLOR,color[0],color[1],color[2],
SPHERE,pos.x,pos.y,pos.z,
radius])
if writeFeats:
aidText = ' '.join([str(x+1) for x in feat.GetAtomIds()])
print '%s\t%.3f\t%.3f\t%.3f\t1.0\t# %s'%(family,pos.x,pos.y,pos.z,aidText)
if featMapFile:
print >>featMapFile," family=%s pos=(%.3f,%.3f,%.3f) weight=1.0"%(family,pos.x,pos.y,pos.z),
if useFeatDirs:
ps = []
if family=='Aromatic':
ps,fType = FeatDirUtils.GetAromaticFeatVects(mol.GetConformer(confId),
feat.GetAtomIds(),pos,
scale=1.0)
elif family=='Donor':
aids = feat.GetAtomIds()
if len(aids)==1:
featAtom=mol.GetAtomWithIdx(aids[0])
hvyNbrs=[x for x in featAtom.GetNeighbors() if x.GetAtomicNum()!=1]
if len(hvyNbrs)==1:
ps,fType = FeatDirUtils.GetDonor1FeatVects(mol.GetConformer(confId),
aids,scale=1.0)
elif len(hvyNbrs)==2:
ps,fType = FeatDirUtils.GetDonor2FeatVects(mol.GetConformer(confId),
aids,scale=1.0)
elif len(hvyNbrs)==3:
ps,fType = FeatDirUtils.GetDonor3FeatVects(mol.GetConformer(confId),
aids,scale=1.0)
elif family=='Acceptor':
aids = feat.GetAtomIds()
if len(aids)==1:
featAtom=mol.GetAtomWithIdx(aids[0])
hvyNbrs=[x for x in featAtom.GetNeighbors() if x.GetAtomicNum()!=1]
if len(hvyNbrs)==1:
ps,fType = FeatDirUtils.GetAcceptor1FeatVects(mol.GetConformer(confId),
aids,scale=1.0)
elif len(hvyNbrs)==2:
ps,fType = FeatDirUtils.GetAcceptor2FeatVects(mol.GetConformer(confId),
aids,scale=1.0)
elif len(hvyNbrs)==3:
ps,fType = FeatDirUtils.GetAcceptor3FeatVects(mol.GetConformer(confId),
aids,scale=1.0)
for tail,head in ps:
ShowArrow(viewer,tail,head,radius,color,dirLabel,
transparency=transparency,includeArrowhead=includeArrowheads)
if featMapFile:
vect = head-tail
print >>featMapFile,'dir=(%.3f,%.3f,%.3f)'%(vect.x,vect.y,vect.z),
if featMapFile:
aidText = ' '.join([str(x+1) for x in feat.GetAtomIds()])
print >>featMapFile,'# %s'%(aidText)
# --- ---- --- ---- --- ---- --- ---- --- ---- --- ----
import sys,os,getopt
from rdkit import RDConfig
from optparse import OptionParser
parser=OptionParser(_usage,version='%prog '+_version)
parser.add_option('-x','--exclude',default='',
help='provide a list of feature names that should be excluded')
parser.add_option('-f','--fdef',default=os.path.join(RDConfig.RDDataDir,'BaseFeatures.fdef'),
help='provide the name of the feature definition (fdef) file.')
parser.add_option('--noDirs','--nodirs',dest='useDirs',default=True,action='store_false',
help='do not draw feature direction indicators')
parser.add_option('--noHeads',dest='includeArrowheads',default=True,action='store_false',
help='do not draw arrowheads on the feature direction indicators')
parser.add_option('--noClear','--noClear',dest='clearAll',default=False,action='store_true',
help='do not clear PyMol on startup')
parser.add_option('--noMols','--nomols',default=False,action='store_true',
help='do not draw the molecules')
parser.add_option('--writeFeats','--write',default=False,action='store_true',
help='print the feature information to the console')
parser.add_option('--featMapFile','--mapFile',default='',
help='save a feature map definition to the specified file')
parser.add_option('--verbose',default=False,action='store_true',
help='be verbose')
if __name__=='__main__':
from rdkit import Chem
from rdkit.Chem import AllChem
from rdkit.Chem.PyMol import MolViewer
options,args = parser.parse_args()
if len(args)<1:
parser.error('please provide either at least one sd or mol file')
try:
v = MolViewer()
except:
logger.error('Unable to connect to PyMol server.\nPlease run ~landrgr1/extern/PyMol/launch.sh to start it.')
sys.exit(1)
if options.clearAll:
v.DeleteAll()
try:
fdef = file(options.fdef,'r').read()
except IOError:
logger.error('ERROR: Could not open fdef file %s'%options.fdef)
sys.exit(1)
factory = AllChem.BuildFeatureFactoryFromString(fdef)
if options.writeFeats:
print '# Family \tX \tY \tZ \tRadius\t # Atom_ids'
if options.featMapFile:
if options.featMapFile=='-':
options.featMapFile=sys.stdout
else:
options.featMapFile=file(options.featMapFile,'w+')
print >>options.featMapFile,'# Feature map generated by ShowFeats v%s'%_version
print >>options.featMapFile,"ScoreMode=All"
print >>options.featMapFile,"DirScoreMode=Ignore"
print >>options.featMapFile,"BeginParams"
for family in factory.GetFeatureFamilies():
print >>options.featMapFile," family=%s width=1.0 radius=3.0"%family
print >>options.featMapFile,"EndParams"
print >>options.featMapFile,"BeginPoints"
i = 1
for midx,molN in enumerate(args):
if molN!='-':
featLabel='%s_Feats'%molN
else:
featLabel='Mol%d_Feats'%(midx+1)
v.server.resetCGO(featLabel)
# this is a big of kludgery to work around what seems to be a pymol cgo bug:
v.server.sphere((0,0,0),.01,(1,0,1),featLabel)
dirLabel=featLabel+"-dirs"
v.server.resetCGO(dirLabel)
# this is a big of kludgery to work around what seems to be a pymol cgo bug:
v.server.cylinder((0,0,0),(.01,.01,.01),.01,(1,0,1),dirLabel)
if molN != '-':
try:
ms = Chem.SDMolSupplier(molN)
except:
logger.error('Problems reading input file: %s'%molN)
ms = []
else:
ms = Chem.SDMolSupplier()
ms.SetData(sys.stdin.read())
for m in ms:
nm = 'Mol_%d'%(i)
if m.HasProp('_Name'):
nm += '_'+m.GetProp('_Name')
if options.verbose:
if m.HasProp('_Name'):
print "#Molecule: %s"%m.GetProp('_Name')
else:
print "#Molecule: %s"%nm
ShowMolFeats(m,factory,v,transparency=0.25,excludeTypes=options.exclude,name=nm,
showOnly=False,
useFeatDirs=options.useDirs,
featLabel=featLabel,dirLabel=dirLabel,
includeArrowheads=options.includeArrowheads,
writeFeats=options.writeFeats,showMol=not options.noMols,
featMapFile=options.featMapFile)
i += 1
if not i%100:
logger.info("Done %d poses"%i)
if ms:
v.server.renderCGO(_globalSphereCGO,featLabel,1)
if options.useDirs:
v.server.renderCGO(_globalArrowCGO,dirLabel,1)
if options.featMapFile:
print >>options.featMapFile,"EndPoints"
sys.exit(0)
| {
"repo_name": "rdkit/rdkit-orig",
"path": "rdkit/Chem/Features/ShowFeats.py",
"copies": "2",
"size": "12019",
"license": "bsd-3-clause",
"hash": -4812932672994946000,
"line_mean": 32.8563380282,
"line_max": 112,
"alpha_frac": 0.611198935,
"autogenerated": false,
"ratio": 3.102478058853898,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4713676993853898,
"avg_score": null,
"num_lines": null
} |
"""Classes for events and event callbacks, which can be used by the
EventManager class."""
class Event (object):
"""Event (signal, data) -> Event
Creates a new Event object for the event management system.
The event object is a simple 'key-value' association containing a
signal identfier ('signal') and additional data.
Attributes:
signal - Identifier of the event (the passed signal).
data - Attached data to distribute with the event.
handled - Indicates, whether the event was handled by an object.
"""
__slots__ = [ "signal", "data", "handled" ]
def __init__ (self, signal, data):
self.signal = signal
self.data = data
self.handled = False
class EventCallback (object):
"""EventCallback (signal, callback, *data) -> EventCallback
Creates a new EventCallback to use as a signal handler for objects.
The EventCallback can be used as container object for event
mechanisms, which need signal to method bindings.
The connected callback can be invoked using the EventCallback.run()
method.
Attributes:
signal - Identifier of the event to wait for.
callback - Function/method to invoke upon receiving the event.
data - Data to pass to the function/method.
"""
__slots__ = ["signal", "data", "callback"]
def __init__ (self, signal, callback, *data):
self.signal = signal
if not callable (callback):
raise TypeError ("callback is not callable")
self.callback = callback
self.data = data
def run (self, *data):
"""E.run (...)
Invokes the callback of the EventCallback.
If additional data is supplied via the 'data' argument, the
already attached callback data in the EventCallback.data
attribute will be concatenated to it and the result will be
passed to the callback.
"""
d = data + self.data
if len (d) != 0:
self.callback (*d)
else:
self.callback ()
| {
"repo_name": "prim/ocempgui",
"path": "ocempgui/events/Signals.py",
"copies": "1",
"size": "3476",
"license": "bsd-2-clause",
"hash": 3169135536607548000,
"line_mean": 38.0561797753,
"line_max": 78,
"alpha_frac": 0.6901611047,
"autogenerated": false,
"ratio": 4.4909560723514215,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5681117177051421,
"avg_score": null,
"num_lines": null
} |
#$Id: simPIRDetectBatch.py,v 1.1 2005/07/19 17:47:46 phoebusc Exp $
#
# NOT FULLY WORKING. SEE NOTES.
#
# First, run in a separate window:
# build/pc/main.exe -b=1 -gui 5 # assuming 5 nodes in your file
# Then, run like this at TestPIRDetectNoReg directory:
# java net.tinyos.sim.SimDriver -nosf -script "simPIRDetect.py" -scriptargs "filename"
#
# This script will:
# 1) Figure out the number of nodes that have data in the "filename" argument.
# 2) Feed in adc readings from "filename" to the parallel simulation
# 3) Collect simulation output in files
# 4) never exit the simulator... just reset it for the next input file, if any
#
# INPUT:
# - If "filename" is a directory, processes all files in the directory
# that do not end in '_output'. Otherwise, just process the file.
# - See parseInput.py for Input file format for parsing
#
# OUTPUT:
# - Dumps Raw Oscope Messages
# - "filename"_output (or a directory worth of "filename"_output)
# - Alternately, if you can also use tossim-radio on the SerialForwarder
# and Listen.java to dump Oscope output to a file
# - batchOut file with important messages for batch of simulations
# (ex. files skipped)
#
# NOTE:
# - Currently, batch mode within simPIRDetect.py does not work properly because
# of issues with getting sim.exec() working or finding a version of sim.reset()
# that gets us back to the initial state of the simulator, with a certain number
# of nodes started.
# - Look at ADCread() for how to cleanup and start next simulation with nextFile()
# - Does not actually process last piece of data injected by ADC,
# because resets simulator. Not perceived to be a big problem.
from simcore import *
from net.tinyos.sim.event import DebugMsgEvent
from net.tinyos.sim.event import ADCDataReadyEvent
from net.tinyos.sim.event import UARTMsgSentEvent
from net.tinyos.sim.event import RadioMsgSentEvent
from net.tinyos.sim.event import SimEventBus
from net.tinyos.sim.event import TossimInitEvent
from net.tinyos.oscope import oscilloscope
from net.tinyos.oscope import OscopeMsg
from os import listdir
from os.path import isfile, isdir
from time import sleep
from re import search
import parseInput
#### Global Variables ####
ADCport = 1 # default PIR Sensor Port
out = None # file handle for output file
init_event = None # used to start the next simulation
# for batchs on directories
firstExecution = True
batchOutname = "simPIRDetect_messages" # for noting skipped files, etc.
# not strictly necessary to list here, but for reader reference
batchOut = None
filename = ""
outFilename = ""
dirname = ""
fileList = []
## Called for each separate simulation for setup
## 1) registers event handlers with simulator
## 2) calls function to parse input data and store in
## (data,nodeState,minSeqNo,maxSeqNo)
## 3) sets up remaining global variables (see global declaration)
## 4) Checks that TOSSIM simulation has enough nodes for script
## aborts/proceeds to next file if not
## 5) opens output file for writing
## - Note that some commands (like registering event handlers)
## must happen only once for all file simulations.
## - Aborting a simulation involves not registering the
## init_event handler and processing the next file
def setupSim():
global data, nodeState, minSeqNo, maxSeqNo, currentSeqNo
global numNodes, totalFinished, out, firstExecution, init_event
# only execute once for all input files
if (firstExecution):
# uart_event = interp.addEventHandler(printUART, UARTMsgSentEvent)
radio_event = interp.addEventHandler(printRadio, RadioMsgSentEvent)
dbg_event = interp.addEventHandler(printDBG, DebugMsgEvent)
adc_event = interp.addEventHandler(ADCread, ADCDataReadyEvent)
firstExecution = False
print("Preparing to simulate using " + filename)
dataList = parseInput.parseFile("xsm",filename)
data = dataList[0]
nodeState = dataList[1]
minSeqNo = dataList[2]
maxSeqNo = dataList[3]
currentSeqNo = {}
for i in nodeState.keys():
currentSeqNo[i] = 0
totalFinished = 0
numNodes = len(nodeState)
sleep(2) # wait for simulator to catch up and update 'motes'
# Check that we have more simulated motes than the largest moteID
# otherwise, the script will never end. (use 'motes' from simcore)
if (max(nodeState.keys()) > len(motes)):
batchOut.write("Skipping file %s.\n" %filename)
batchOut.write("\t File has largest mote ID = %d. The simulator is only simulating %d nodes.\n"
%(max(nodeState.keys()), len(motes)))
batchOut.write("\t The simulator needs to simulate 1 node more than the largest mote ID.\n")
print("Skipping file %s." %filename)
print("\t File has largest mote ID = %d. The simulator is only simulating %d nodes."
%(max(nodeState.keys()), len(motes)))
print("\t The simulator needs to simulate 1 node more than the largest mote ID.")
nextFile()
else:
out = open(outFilename,'w')
print("Outputting to %s" %outFilename)
init_event = interp.addEventHandler(handleInit, TossimInitEvent)
sim.resume() # in case the init event was signaled before scheduling the handler
print "Simulation with " + str(numNodes) + " nodes"
##### Event Handlers Start #####
## Reads ADC values from 'data' and injects it to the ADCport of the
## appropriate mote.
## Also includes cleanup code to finish one simulation
def ADCread(event):
global nodeState, currentSeqNo, totalFinished, out
nodeID = event.getMoteID()
thisTime = sim.__driver.getTossimTime()
print nodeState
if not(nodeState.has_key(nodeID)):
comm.setADCValue(nodeID,thisTime+1,ADCport,0)
print("ADCread: nodeID %d has no input data, set ADCport to 0"
%(nodeID))
return
seqNo = currentSeqNo[nodeID] + minSeqNo[nodeID]
currentSeqNo[nodeID] += 1
key = str(nodeID) + '_' + str(seqNo)
#print("seqNo: " + str(seqNo))
#print("maxSeqNo[%d]: %d" %(nodeID, maxSeqNo[nodeID]))
#print("key: " + str(key))
#print("data: " + str(data))
if data.has_key(key):
value = data[key]
comm.setADCValue(nodeID,thisTime+1,ADCport,value)
else:
comm.setADCValue(nodeID,thisTime+1,ADCport,0)
if (seqNo >= maxSeqNo[nodeID]):
if (nodeState[nodeID] == 1):
print("Finished nodeID: " + str(nodeID))
nodeState[nodeID] = 2
totalFinished += 1
if (totalFinished == numNodes): ##SIMULATION FINISH CLEANUP CODE
print("Ran out of ADC readings for all nodes in " + filename)
print("Closing " + outFilename)
out.close()
interp.removeEventHandler(init_event) # for next simulation
sim.stop()
sim.reset() #unfortunately, resets all motes to 0 again
nextFile()
else: # we lost some data (data was set to 0)
print("ADCread: input data for node %d at time %.2f, sequence number %d missing"
%(nodeID, (thisTime+1), seqNo))
### Helper function for printing
# make sure get two digits for each byte string
# dataHex is a 'hex number' with length 3 or 4
# (has '0x' before each string)
def twoHexDigits(dataHex):
if (len(dataHex) == 4):
return str(dataHex[2:])
elif (len(dataHex) == 3):
return "0" + str(dataHex[2:])
else:
print "2-digit Hex Conversion Error, wrong length input: " + dataHex
return "00"
#def printRadio(event):
#time = sim.__driver.getTossimTime()
#secTime = (time / 4000.0) / 1000.0
#out.write('time: ' + str(secTime) + "\n")
#out.write(event.toString() + "\n")
#print 'time: %.2f' %(secTime)
#print event
def printRadio(event):
print event
msg = event.getMessage().dataGet()
s = ""
for i in range(0,len(msg)):
s += twoHexDigits(hex(0xff & msg[i])) + " "
s+= "\n"
out.write(s)
#out.flush()
def printDBG(event):
thisTime = sim.__driver.getTossimTime()
secTime = (thisTime / 4000.0) / 1000.0
s = event.toString()
if ((s.find(' - ') != -1) and
((s.find('TestPIRDetectM') != -1) or
(s.find('OscopeM') != -1))):
#out.write('time: ' + str(secTime) + "\n")
#out.write(event.toString() + "\n")
print('time: %.2f' %(secTime))
print event
def handleInit(event):
sim.resume()
##### Event Handlers Stop #####
## called by the main program or ADCread after
## finishing simulating with one file to call setupSim()
## for next simulation with proper filenames, or to finish
## the batch of simulations.
def nextFile():
global filename, outFilename, fileList
fileFlag = 0;
while (len(fileList) > 0) and (fileFlag == 0):
f = fileList.pop(0)
f = dirname + '/' + f
if (isfile(f) and (search('_output$',f) == None)):
fileFlag = 1
filename = f
outFilename = filename + "_output"
setupSim()
# finishes immediately if last entries of fileList are directories
if ((len(fileList) == 0) and (fileFlag == 0)):
print "Finished simulation with file/all files in directory."
batchOut.close()
sim.exit()
### Main Program ###
## Currently, it is best to start up TOSSIM as a separate process
## I have not successfully gotten sim.exec() to work, even with multitest.py
## in tools/java/net/tinyos/sim/pyscripts
#sim.exec("build/pc/main.exe",numNodes,"-b=1")
batchOut = open(batchOutname,'w') # for writing important, short output messages
inputArg = sim.__driver.getScriptArgs()
if isfile(inputArg):
dirname = "."
fileList = [inputArg]
nextFile()
elif isdir(inputArg): #directory looping
dirname = inputArg
fileList = listdir(inputArg)
nextFile()
| {
"repo_name": "ekiwi/tinyos-1.x",
"path": "contrib/nestfe/nesc/apps/TestPIRDetectNoReg/simPIRDetectBatch.py",
"copies": "2",
"size": "9918",
"license": "bsd-3-clause",
"hash": 7946766743356371000,
"line_mean": 35.5977859779,
"line_max": 104,
"alpha_frac": 0.6593063118,
"autogenerated": false,
"ratio": 3.5207667731629395,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.01586732545693107,
"num_lines": 271
} |
#$Id: simPIRDetect.py,v 1.2 2005/07/19 17:47:46 phoebusc Exp $
# First, run in a separate window:
# build/pc/main.exe -b=1 -gui 5 # assuming 5 nodes in your file
# Then, run like this at TestPIRDetectNoReg directory:
# java net.tinyos.sim.SimDriver -nosf -script "simPIRDetect.py" -scriptargs "filename simLogName"
#
# This script will:
# 1) Figure out the number of nodes that have data in the "filename" argument.
# 2) Feed in adc readings from "filename" to the parallel simulation
# 3) Collect simulation output in "filename"_output
#
# INPUT:
# - See parseInput.py for Input file format for parsing... you will need to
# edit the arguments used in the script to call parseInput.parseFile()
#
# OUTPUT:
# - Dumps Raw Oscope Messages in "filename"_output
# - Alternately, if you can also use tossim-radio on the SerialForwarder
# and Listen.java to dump Oscope output to a file
# - Appends to simLog file with important messages (ex. file skipped)
#
# NOTE:
# - Look at ADCread() for how to cleanup and end simulation
# - Does not actually process last piece of data injected by ADC,
# because resets simulator. Not perceived to be a big problem.
from simcore import *
from net.tinyos.sim.event import DebugMsgEvent
from net.tinyos.sim.event import ADCDataReadyEvent
from net.tinyos.sim.event import UARTMsgSentEvent
from net.tinyos.sim.event import RadioMsgSentEvent
from net.tinyos.sim.event import SimEventBus
from net.tinyos.sim.event import TossimInitEvent
from net.tinyos.oscope import oscilloscope
from net.tinyos.oscope import OscopeMsg
from os.path import isfile
from time import sleep
import parseInput
#### Global Variables ####
ADCport = 1 # default PIR Sensor Port
out = "" # file handle for output file
simLogname = "simPIRDetect_messages" # for noting skipped files, etc.
# not strictly necessary to list here, but for reader reference
simLog = None
filename = ""
outFilename = ""
## 1) calls function to parse input data and store in
## (data,nodeState,minSeqNo,maxSeqNo)
## 2) sets up remaining global variables (see global declaration)
## 3) Checks that TOSSIM simulation has enough nodes for script
## aborts/proceeds to next file if not
## 4) opens output file for writing
## 5) registers event handlers with simulator
def setupSim():
global data, nodeState, minSeqNo, maxSeqNo, currentSeqNo
global numNodes, totalFinished, out
print("Preparing to simulate using " + filename)
dataList = parseInput.parseFile("xsm",filename)
data = dataList[0]
nodeState = dataList[1]
minSeqNo = dataList[2]
maxSeqNo = dataList[3]
currentSeqNo = {}
for i in nodeState.keys():
currentSeqNo[i] = 0
totalFinished = 0
numNodes = len(nodeState)
sleep(1) #make sure that the simulator has time to update 'motes'
# Check that we have more simulated motes than the largest moteID
# otherwise, the script will never end. (use 'motes' from simcore)
if (max(nodeState.keys()) >= len(motes)):
simLog.write("Skipping file %s.\n" %filename)
simLog.write("\t File has largest mote ID = %d. The simulator is only simulating %d nodes.\n"
%(max(nodeState.keys()), len(motes)))
simLog.write("\t The simulator needs to simulate 1 node more than the largest mote ID.\n")
print("Skipping file %s." %filename)
print("\t File has largest mote ID = %d. The simulator is only simulating %d nodes."
%(max(nodeState.keys()), len(motes)))
print("\t The simulator needs to simulate 1 node more than the largest mote ID.")
else:
out = open(outFilename,'w')
print("Outputting to %s" %outFilename)
print "Simulation with " + str(numNodes) + " nodes"
#uart_event = interp.addEventHandler(printUART, UARTMsgSentEvent)
radio_event = interp.addEventHandler(printRadio, RadioMsgSentEvent)
dbg_event = interp.addEventHandler(printDBG, DebugMsgEvent)
adc_event = interp.addEventHandler(ADCread, ADCDataReadyEvent)
init_event = interp.addEventHandler(handleInit, TossimInitEvent)
sim.resume() #in case we miss an init_event signaled earlier
##### Event Handlers Start #####
## Reads ADC values from 'data' and injects it to the ADCport of the
## appropriate mote.
## Also includes cleanup code to finish one simulation
def ADCread(event):
global nodeState, currentSeqNo, totalFinished, out
nodeID = event.getMoteID()
thisTime = sim.__driver.getTossimTime()
if not(nodeState.has_key(nodeID)):
comm.setADCValue(nodeID,thisTime+1,ADCport,0)
print("ADCread: nodeID %d has no input data, set ADCport to 0"
%(nodeID))
return
seqNo = currentSeqNo[nodeID] + minSeqNo[nodeID]
currentSeqNo[nodeID] += 1
key = str(nodeID) + '_' + str(seqNo)
print("seqNo: " + str(seqNo))
print("maxSeqNo[%d]: %d" %(nodeID, maxSeqNo[nodeID]))
#print("key: " + str(key))
#print("data: " + str(data))
if data.has_key(key):
value = data[key]
comm.setADCValue(nodeID,thisTime+1,ADCport,value)
else:
comm.setADCValue(nodeID,thisTime+1,ADCport,0)
if (seqNo >= maxSeqNo[nodeID]):
if (nodeState[nodeID] == 1):
print("Finished nodeID: " + str(nodeID))
nodeState[nodeID] = 2
totalFinished += 1
if (totalFinished == numNodes): ##SIMULATION FINISH CLEANUP CODE
print("Ran out of ADC readings for all nodes in " + filename)
print("Closing " + outFilename)
finishSim()
else: # we lost some data (data was set to 0)
print("ADCread: input data for node %d at time %.2f, sequence number %d missing"
%(nodeID, (thisTime+1), seqNo))
### Helper function for printing
# make sure get two digits for each byte string
# dataHex is a 'hex number' with length 3 or 4
# (has '0x' before each string)
def twoHexDigits(dataHex):
if (len(dataHex) == 4):
return str(dataHex[2:])
elif (len(dataHex) == 3):
return "0" + str(dataHex[2:])
else:
print "2-digit Hex Conversion Error, wrong length input: " + dataHex
return "00"
#def printRadio(event):
#time = sim.__driver.getTossimTime()
#secTime = (time / 4000.0) / 1000.0
#out.write('time: ' + str(secTime) + "\n")
#out.write(event.toString() + "\n")
#print 'time: %.2f' %(secTime)
#print event
def printRadio(event):
print event
msg = event.getMessage().dataGet()
s = ""
for i in range(0,len(msg)):
s += twoHexDigits(hex(0xff & msg[i])) + " "
s+= "\n"
out.write(s)
#out.flush()
def printDBG(event):
thisTime = sim.__driver.getTossimTime()
secTime = (thisTime / 4000.0) / 1000.0
s = event.toString()
if ((s.find(' - ') != -1) and
((s.find('TestPIRDetectM') != -1) or
(s.find('OscopeM') != -1))):
#out.write('time: ' + str(secTime) + "\n")
#out.write(event.toString() + "\n")
print('time: %.2f' %(secTime))
print event
def handleInit(event):
sim.resume()
##### Event Handlers Stop #####
## called by the main program or ADCread after
## to finish the simulation
def finishSim():
simLog.write("Finished simulation on file %s\n" %filename)
simLog.close()
out.close()
sim.stop()
sim.exit()
print("Finished simulation on file %s" %filename)
### Main Program ###
## remember, must start up TOSSIM as a separate process before starting
## this script
inputArg = sim.__driver.getScriptArgs()
argsList = inputArg.split()
if len(argsList) > 1:
simLogname = argsList[1]
simLog = open(simLogname,'a') # for writing important, short output messages
filename = argsList[0]
if isfile(filename):
outFilename = filename + "_output"
setupSim()
else:
print("%s is not a valid filename" %inputArg)
| {
"repo_name": "fresskarma/tinyos-1.x",
"path": "contrib/nestfe/nesc/apps/TestPIRDetectNoReg/simPIRDetect.py",
"copies": "2",
"size": "7929",
"license": "bsd-3-clause",
"hash": -6831259501337374000,
"line_mean": 33.9295154185,
"line_max": 102,
"alpha_frac": 0.6570815992,
"autogenerated": false,
"ratio": 3.3986283754822115,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9945826438823959,
"avg_score": 0.021976707171650508,
"num_lines": 227
} |
""" Some trivial classes for testing to serialize structures out
to different forms of XML. """
import types
class XMLLanguageSerializer(object):
""" Translates into similarly structured XML or HTML given
tags at init time."""
def __init__(self, header, footer, listStart, listEnd,
listItem, dictStart, dictEnd, dictItem,
dataItemStart, dataItemEnd):
self.TR_FUNCS = {types.ListType: self._tr_list,
types.TupleType: self._tr_list,
types.DictType: self._tr_dict,
types.StringType: self._tr_string,
types.UnicodeType: self._tr_unicode,
types.FloatType: self._tr_float,
types.IntType: self._tr_int,
types.LongType: self._tr_int,
types.BooleanType: self._tr_boolean,
types.NoneType: self._tr_none}
self.header = header
self.footer = footer
self.listStart = listStart
self.listEnd = listEnd
self.listItem = listItem
self.dictStart = dictStart
self.dictEnd = dictEnd
self.dictItem = dictItem
self.dataItemStart = dataItemStart
self.dataItemEnd = dataItemEnd
def write(self, obj):
try:
s = self._format(obj)
return "%s\n%s\n%s" % (self.header, s, self.footer)
except KeyError, ke:
return "Cannot serialize %s with type %s" \
% (str(obj), str(type(obj)))
def _format(self, obj, solo=True):
""" Takes object obj and translates into a JSON string. If
obj or a component of obj is not serializable, Exception
is raised. If solo==True the item is surrounded in dataItemstart and
dataItemEnd tags."""
s = self.TR_FUNCS[type(obj)](obj)
if solo and type(obj) not in \
[types.ListType, types.TupleType, types.DictType]:
return "%s%s%s" % (self.dataItemStart, s, self.dataItemEnd)
else:
return s
def _tr_list(self, o):
tokens =[]
for i in o:
tokens.append(self.listItem % \
{'value': self._format(i, False)})
return "%s\n%s\n%s" % (self.listStart, "\n".join(tokens), self.listEnd)
def _tr_dict(self, o):
tokens=[]
for k,v in o.items():
key = self._format(k, False)
val = self._format(v, False)
tokens.append(self.dictItem % \
{'key': key, 'value': val})
return "%s\n%s\n%s" % (self.dictStart, "\n".join(tokens), self.dictEnd)
def _tr_string(self, o):
substitutions = [('/', r'\/'),
('\\', r'\\'),
('"', r'\"'),
('\b', r'\b'),
('\f', r'\f'),
('\n', r'\n'),
('\r', r'\r'),
('\t', r'\t'),
]
for s, r in substitutions:
o = o.replace(s, r)
return u'"%s"' % o
def _tr_unicode(self, o):
return self._tr_string(o)
def _tr_float(self, o):
return u"%f" % o
def _tr_int(self, o):
return u"%d" % o
def _tr_boolean(self, o):
if o:
return u"true"
else:
return u"false"
def _tr_none(self, o):
return u"null"
class XMLFormatter(XMLLanguageSerializer):
""" Noddy serialiser takes Python struct and makes XML.
Returns tupple of (content-type, value)"""
def __init__(self):
XMLLanguageSerializer.__init__(self,
'<?xml version="1.0"?>\n<result>',
"</result>",
"<list>",
"</list>",
"<item>%(value)s</item>",
"<dict>",
"</dict>",
"<item id=%(key)s>%(value)s</item>",
"<data>",
"</data>")
def format(self, v):
s = XMLLanguageSerializer.write(self, v)
return s
class HTMLFormatter(XMLLanguageSerializer):
""" Noddy serialiser takes Python struct and makes HTML.
Returns tupple of (content-type, value)"""
def __init__(self):
header = """<html>
<head>
<title>Peloton result</title>
</head>
<style>
body {
font-family: verdana,arial,sans-serif;
font-size:80%;
color: #444;
background-color:#668;
}
#main {
width: 80%;
border: 2px solid #444;
padding-left: 20px;
padding-right: 20px;
background-color: white;
margin-left: auto;
margin-right: auto;
}
#titleBar {
font-size: 200%;
font-weight: bold;
color: #335;
}
#titleBarTable {
padding: 0px;
margin: 0px;
width: 100%;
}
td.tbar {
padding: 0px;
margin: 0px;
font-size: 180%;
font-weight: bold;
color: #335;
}
td.smallHead {
font-size: 120%;
}
#subTitleBar {
font-size: 80%;
color: #88a;
border-bottom: solid 1px #da7c0d;
text-align: right;
}
</style>
<body>
<div id='main'>
<div id='titleBar'>
<table id='titleBarTable'>
<tr>
<td class='tbar smallHead'>Response</td>
<td class='tbar' align='right'>Peloton</td>
</tr>
</table>
</div>
<div id='subTitleBar'>
grid computing, batteries included
</div>
"""
XMLLanguageSerializer.__init__(self,
header,
"</div></body></html>",
"<ol>",
"</ol>",
"<li>%(value)s</li>",
"<ul>",
"</ul>",
"<li>%(key)s = %(value)s</li>",
"<p>",
"</p>")
def format(self,v):
""" Returns content-type, content. """
s = XMLLanguageSerializer.write(self, v)
return s
| {
"repo_name": "aquamatt/Peloton",
"path": "src/peloton/utils/simplexml.py",
"copies": "1",
"size": "7217",
"license": "bsd-3-clause",
"hash": 5331411732904994000,
"line_mean": 34.0339805825,
"line_max": 79,
"alpha_frac": 0.4187335458,
"autogenerated": false,
"ratio": 4.397928092626447,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.015138942551451135,
"num_lines": 206
} |
# $Id: sip.py 48 2008-05-27 17:31:15Z yardley $
# -*- coding: utf-8 -*-
"""Session Initiation Protocol."""
from __future__ import absolute_import
from . import http
class Request(http.Request):
"""SIP request.
TODO: Longer class information....
Attributes:
__hdr__: Header fields of SIP request.
TODO.
"""
__hdr_defaults__ = {
'method': 'INVITE',
'uri': 'sip:user@example.com',
'version': '2.0',
'headers': {'To': '', 'From': '', 'Call-ID': '', 'CSeq': '', 'Contact': ''}
}
__methods = dict.fromkeys((
'ACK', 'BYE', 'CANCEL', 'INFO', 'INVITE', 'MESSAGE', 'NOTIFY',
'OPTIONS', 'PRACK', 'PUBLISH', 'REFER', 'REGISTER', 'SUBSCRIBE',
'UPDATE'
))
__proto = 'SIP'
class Response(http.Response):
"""SIP response.
TODO: Longer class information....
Attributes:
__hdr__: Header fields of SIP response.
TODO.
"""
__hdr_defaults__ = {
'version': '2.0',
'status': '200',
'reason': 'OK',
'headers': {'To': '', 'From': '', 'Call-ID': '', 'CSeq': '', 'Contact': ''}
}
__proto = 'SIP'
| {
"repo_name": "dimagol/trex-core",
"path": "scripts/external_libs/dpkt-1.9.1/dpkt/sip.py",
"copies": "3",
"size": "1173",
"license": "apache-2.0",
"hash": -5386231364248902000,
"line_mean": 22.46,
"line_max": 83,
"alpha_frac": 0.494458653,
"autogenerated": false,
"ratio": 3.3706896551724137,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5365148308172414,
"avg_score": null,
"num_lines": null
} |
# $Id: SiteHierarchy.py,v 1.1 2001/10/11 03:25:54 tavis_rudd Exp $
"""Create menus and crumbs from a site hierarchy.
You define the site hierarchy as lists/tuples. Each location in the hierarchy
is a (url, description) tuple. Each list has the base URL/text in the 0
position, and all the children coming after it. Any child can be a list,
representing further depth to the hierarchy. See the end of the file for an
example hierarchy.
Use Hierarchy(contents, currentURL), where contents is this hierarchy, and
currentURL is the position you are currently in. The menubar and crumbs methods
give you the HTML output.
There are methods you can override to customize the HTML output.
"""
##################################################
## DEPENDENCIES
import string
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
##################################################
## CLASSES
class Hierarchy:
def __init__(self, hierarchy, currentURL, prefix='', menuCSSClass=None,
crumbCSSClass=None):
"""
hierarchy is described above, currentURL should be somewhere in
the hierarchy. prefix will be added before all of the URLs (to
help mitigate the problems with absolute URLs), and if given,
cssClass will be used for both links *and* nonlinks.
"""
self._contents = hierarchy
self._currentURL = currentURL
if menuCSSClass:
self._menuCSSClass = ' class="%s"' % menuCSSClass
else:
self._menuCSSClass = ''
if crumbCSSClass:
self._crumbCSSClass = ' class="%s"' % crumbCSSClass
else:
self._crumbCSSClass = ''
self._prefix=prefix
## Main output methods
def menuList(self, menuCSSClass=None):
"""An indented menu list"""
if menuCSSClass:
self._menuCSSClass = ' class="%s"' % menuCSSClass
stream = StringIO()
for item in self._contents[1:]:
self._menubarRecurse(item, 0, stream)
return stream.getvalue()
def crumbs(self, crumbCSSClass=None):
"""The home>where>you>are crumbs"""
if crumbCSSClass:
self._crumbCSSClass = ' class="%s"' % crumbCSSClass
path = []
pos = self._contents
while True:
## This is not the fastest algorithm, I'm afraid.
## But it probably won't be for a huge hierarchy anyway.
foundAny = False
path.append(pos[0])
for item in pos[1:]:
if self._inContents(item):
if isinstance(item, tuple):
path.append(item)
break
else:
pos = item
foundAny = True
break
if not foundAny:
break
if len(path) == 1:
return self.emptyCrumb()
return string.join(map(lambda x, self=self: self.crumbLink(x[0], x[1]),
path), self.crumbSeperator()) + \
self.crumbTerminator()
## Methods to control the Aesthetics
# - override these methods for your own look
def menuLink(self, url, text, indent):
if url == self._currentURL or self._prefix + url == self._currentURL:
return '%s<B%s>%s</B> <BR>\n' % (' '*2*indent,
self._menuCSSClass, text)
else:
return '%s<A HREF="%s%s"%s>%s</A> <BR>\n' % \
(' '*2*indent, self._prefix, url,
self._menuCSSClass, text)
def crumbLink(self, url, text):
if url == self._currentURL or self._prefix + url == self._currentURL:
return '<B%s>%s</B>' % (text, self._crumbCSSClass)
else:
return '<A HREF="%s%s"%s>%s</A>' % \
(self._prefix, url, self._crumbCSSClass, text)
def crumbSeperator(self):
return ' > '
def crumbTerminator(self):
return ''
def emptyCrumb(self):
"""When you are at the homepage"""
return ''
## internal methods
def _menubarRecurse(self, contents, indent, stream):
if isinstance(contents, tuple):
url, text = contents
rest = []
else:
url, text = contents[0]
rest = contents[1:]
stream.write(self.menuLink(url, text, indent))
if self._inContents(contents):
for item in rest:
self._menubarRecurse(item, indent+1, stream)
def _inContents(self, contents):
if isinstance(contents, tuple):
return self._currentURL == contents[0]
for item in contents:
if self._inContents(item):
return True
return False
##################################################
## from the command line
if __name__ == '__main__':
hierarchy = [('/', 'home'),
('/about', 'About Us'),
[('/services', 'Services'),
[('/services/products', 'Products'),
('/services/products/widget', 'The Widget'),
('/services/products/wedge', 'The Wedge'),
('/services/products/thimble', 'The Thimble'),
],
('/services/prices', 'Prices'),
],
('/contact', 'Contact Us'),
]
for url in ['/', '/services', '/services/products/widget', '/contact']:
print('<p>', '='*50)
print('<br> %s: <br>\n' % url)
n = Hierarchy(hierarchy, url, menuCSSClass='menu', crumbCSSClass='crumb',
prefix='/here')
print(n.menuList())
print('<p>', '-'*50)
print(n.crumbs())
| {
"repo_name": "dragondjf/QMarkdowner",
"path": "Cheetah/Tools/SiteHierarchy.py",
"copies": "16",
"size": "5871",
"license": "mit",
"hash": 7941337664225572000,
"line_mean": 34.3674698795,
"line_max": 81,
"alpha_frac": 0.5244421734,
"autogenerated": false,
"ratio": 4.2512671976828385,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.013565074052198684,
"num_lines": 166
} |
"""$Id: skipDays.py 699 2006-09-25 02:01:18Z rubys $"""
__author__ = "Sam Ruby <http://intertwingly.net/> and Mark Pilgrim <http://diveintomark.org/>"
__version__ = "$Revision: 699 $"
__date__ = "$Date: 2006-09-25 02:01:18 +0000 (Mon, 25 Sep 2006) $"
__copyright__ = "Copyright (c) 2002 Sam Ruby and Mark Pilgrim"
from base import validatorBase
from validators import text
from logging import *
#
# skipDays element
#
class skipDays(validatorBase):
def __init__(self):
self.days = []
validatorBase.__init__(self)
def validate(self):
if "day" not in self.children:
self.log(MissingElement({"parent":self.name, "element":"day"}))
if len(self.children) > 7:
self.log(EightDaysAWeek({}))
def do_day(self):
return day()
class day(text):
def validate(self):
if self.value not in ('Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday'):
self.log(InvalidDay({"parent":self.parent.name, "element":self.name, "value":self.value}))
elif self.value in self.parent.days:
self.log(DuplicateValue({"parent":self.parent.name, "element":self.name, "value":self.value}))
else:
self.parent.days.append(self.value)
self.log(ValidDay({"parent":self.parent.name, "element":self.name, "value":self.value}))
| {
"repo_name": "canwe/NewsBlur",
"path": "vendor/feedvalidator/skipDays.py",
"copies": "16",
"size": "1294",
"license": "mit",
"hash": 4532562503811512000,
"line_mean": 33.0526315789,
"line_max": 104,
"alpha_frac": 0.6537867079,
"autogenerated": false,
"ratio": 3.0736342042755345,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
"""$Id: skipHours.py 699 2006-09-25 02:01:18Z rubys $"""
__author__ = "Sam Ruby <http://intertwingly.net/> and Mark Pilgrim <http://diveintomark.org/>"
__version__ = "$Revision: 699 $"
__date__ = "$Date: 2006-09-25 02:01:18 +0000 (Mon, 25 Sep 2006) $"
__copyright__ = "Copyright (c) 2002 Sam Ruby and Mark Pilgrim"
from base import validatorBase
from validators import text
from logging import *
#
# skipHours element
#
class skipHours(validatorBase):
def __init__(self):
self.hours = []
validatorBase.__init__(self)
def validate(self):
if "hour" not in self.children:
self.log(MissingElement({"parent":self.name, "element":"hour"}))
if len(self.children) > 24:
self.log(NotEnoughHoursInTheDay({}))
def do_hour(self):
return hour()
class hour(text):
def validate(self):
try:
h = int(self.value)
if (h < 0) or (h > 24):
raise ValueError
elif h in self.parent.hours or (h in [0,24] and 24-h in self.parent.hours):
self.log(DuplicateValue({"parent":self.parent.name, "element":self.name, "value":self.value}))
else:
self.parent.hours.append(h)
self.log(ValidHour({"parent":self.parent.name, "element":self.name, "value":self.value}))
except ValueError:
self.log(InvalidHour({"parent":self.parent.name, "element":self.name, "value":self.value}))
| {
"repo_name": "dosiecki/NewsBlur",
"path": "vendor/feedvalidator/skipHours.py",
"copies": "16",
"size": "1363",
"license": "mit",
"hash": -6129460164247129000,
"line_mean": 31.4523809524,
"line_max": 102,
"alpha_frac": 0.6404988995,
"autogenerated": false,
"ratio": 3.1697674418604653,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.02069545589380986,
"num_lines": 42
} |
# $Id: smb.py 23 2006-11-08 15:45:33Z dugsong $
# -*- coding: utf-8 -*-
"""Server Message Block."""
from __future__ import print_function
from __future__ import absolute_import
from . import dpkt
# https://msdn.microsoft.com/en-us/library/ee441774.aspx
SMB_FLAGS_LOCK_AND_READ_OK = 0x01
SMB_FLAGS_BUF_AVAIL = 0x02
SMB_FLAGS_CASE_INSENSITIVE = 0x08
SMB_FLAGS_CANONICALIZED_PATHS = 0x10
SMB_FLAGS_OPLOCK = 0x20
SMB_FLAGS_OPBATCH = 0x40
SMB_FLAGS_REPLY = 0x80
SMB_FLAGS2_LONG_NAMES = 0x0001
SMB_FLAGS2_EXTENDED_ATTRIBUTES = 0x0002
SMB_FLAGS2_SECURITY_SIGNATURES = 0x0004
SMB_FLAGS2_COMPRESSED = 0x0008
SMB_FLAGS2_SECURITY_SIGNATURES_REQUIRED = 0x0010
SMB_FLAGS2_IS_LONG_NAME = 0x0040
SMB_FLAGS2_REVERSE_PATH = 0x0400
SMB_FLAGS2_EXTENDED_SECURITY = 0x0800
SMB_FLAGS2_DFS = 0x1000
SMB_FLAGS2_PAGING_IO = 0x2000
SMB_FLAGS2_NT_STATUS = 0x4000
SMB_FLAGS2_UNICODE = 0x8000
SMB_STATUS_SUCCESS = 0x00000000
class SMB(dpkt.Packet):
"""Server Message Block.
TODO: Longer class information....
Attributes:
__hdr__ = [
('proto', '4s', b'\xffSMB'),
('cmd', 'B', 0),
('status', 'I', SMB_STATUS_SUCCESS),
('flags', 'B', 0),
('flags2', 'H', 0),
('_pidhi', 'H', 0),
('security', '8s', b''),
('rsvd', 'H', 0),
('tid', 'H', 0),
('_pidlo', 'H', 0),
('uid', 'H', 0),
('mid', 'H', 0)
]
"""
__byte_order__ = '<'
__hdr__ = [
('proto', '4s', b'\xffSMB'),
('cmd', 'B', 0),
('status', 'I', SMB_STATUS_SUCCESS),
('flags', 'B', 0),
('flags2', 'H', 0),
('_pidhi', 'H', 0),
('security', '8s', b''),
('rsvd', 'H', 0),
('tid', 'H', 0),
('_pidlo', 'H', 0),
('uid', 'H', 0),
('mid', 'H', 0)
]
@property
def pid(self):
return (self._pidhi << 16) | self._pidlo
@pid.setter
def pid(self, v):
self._pidhi = v >> 16
self._pidlo = v & 0xffff
def test_smb():
buf = b'\xffSMB\xa0\x00\x00\x00\x00\x08\x03\xc8\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x08\xfa\x7a\x00\x08\x53\x02'
smb = SMB(buf)
assert smb.flags == SMB_FLAGS_CASE_INSENSITIVE
assert smb.flags2 == SMB_FLAGS2_UNICODE | SMB_FLAGS2_NT_STATUS | SMB_FLAGS2_EXTENDED_SECURITY | SMB_FLAGS2_EXTENDED_ATTRIBUTES | SMB_FLAGS2_LONG_NAMES
assert smb.pid == 31482
assert smb.uid == 2048
assert smb.mid == 595
print(repr(smb))
smb = SMB()
smb.pid = 0x00081020
smb.uid = 0x800
assert str(smb) == str(b'\xffSMB\x00\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x20\x10\x00\x08\x00\x00')
if __name__ == '__main__':
test_smb()
print('Tests Successful...')
| {
"repo_name": "dimagol/trex-core",
"path": "scripts/external_libs/dpkt-1.9.1/dpkt/smb.py",
"copies": "3",
"size": "2804",
"license": "apache-2.0",
"hash": -8795794963718097000,
"line_mean": 26.2233009709,
"line_max": 154,
"alpha_frac": 0.5602710414,
"autogenerated": false,
"ratio": 2.6353383458646618,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4695609387264662,
"avg_score": null,
"num_lines": null
} |
"""
smtps.py - A Python SMTP Server. Listens on a socket for RFC821
messages. As each message is processed, methods on the class
SMTPServerInterface are called. Applications should sub-class this and
specialize the methods to suit. The default implementation does
nothing.
The usage pattern is to subclass SMTPServerInterface, overriding
methods as appropriate to the application. An instance of this
subclass should be passed to the SMTPServer object, and then the
SMTPServer.serve method should be called. This blocks forever, serving
the given port. See the __main__ code below for an example.
The SMTPServerInterface subclass should keep state information such as
the FROM: and RCPT TO: addresses. The 'SMTPServerInterface.data' is
called when the complete RFC821 data messages has been received. The
application can then do what it likes with the message.
A couple of helper functions are defined that manipulate from & to
addresses.
"""
import sys, socket, string
#
# Your applications should specialize this.
#
class SMTPServerInterface:
"""
A base class for the imlementation of an application specific SMTP
Server. Applications should subclass this and overide these
methods, which by default do nothing.
A method is defined for each RFC821 command. For each of these
methods, 'args' is the complete command received from the
client. The 'data' method is called after all of the client DATA
is received.
If a method returns 'None', then a '250 OK'message is
automatically sent to the client. If a subclass returns a non-null
string then it is returned instead.
"""
def helo(self, args):
return None
def mailFrom(self, args):
return None
def rcptTo(self, args):
return None
def data(self, args):
return None
def quit(self, args):
return None
def reset(self, args):
return None
#
# Some helper functions for manipulating from & to addresses etc.
#
def stripAddress(address):
"""
Strip the leading & trailing <> from an address. Handy for
getting FROM: addresses.
"""
start = string.index(address, '<') + 1
end = string.index(address, '>')
return address[start:end]
def splitTo(address):
"""
Return 'address' as undressed (host, fulladdress) tuple.
Handy for use with TO: addresses.
"""
start = string.index(address, '<') + 1
sep = string.index(address, '@') + 1
end = string.index(address, '>')
return (address[sep:end], address[start:end],)
#
# A specialization of SMTPServerInterface for debug, that just prints its args.
#
class SMTPServerInterfaceDebug(SMTPServerInterface):
"""
A debug instance of a SMTPServerInterface that just prints its
args and returns.
"""
def helo(self, args):
print 'Received "helo"', args
def mailFrom(self, args):
print 'Received "MAIL FROM:"', args
def rcptTo(self, args):
print 'Received "RCPT TO"', args
def data(self, args):
print 'Received "DATA"', args
def quit(self, args):
print 'Received "QUIT"', args
def reset(self, args):
print 'Received "RSET"', args
#
# This drives the state for a single RFC821 message.
#
class SMTPServerEngine:
"""
Server engine that calls methods on the SMTPServerInterface object
passed at construction time. It is constructed with a bound socket
connection to a client. The 'chug' method drives the state,
returning when the client RFC821 transaction is complete.
"""
ST_INIT = 0
ST_HELO = 1
ST_MAIL = 2
ST_RCPT = 3
ST_DATA = 4
ST_QUIT = 5
def __init__(self, socket, impl):
self.impl = impl;
self.socket = socket;
self.state = SMTPServerEngine.ST_INIT
def chug(self):
"""
Chug the engine, till QUIT is received from the client. As
each RFC821 message is received, calls are made on the
SMTPServerInterface methods on the object passed at
construction time.
"""
self.socket.send("220 Welcome to Python smtps.py. $Id: smtps.py,v 1.10 2003/12/01 22:12:23 lsmithso Exp $\r\n")
while 1:
data = ''
completeLine = 0
# Make sure an entire line is received before handing off
# to the state engine. Thanks to John Hall for pointing
# this out.
while not completeLine:
lump = self.socket.recv(1024);
if len(lump):
data += lump
if (len(data) >= 2) and data[-2:] == '\r\n':
completeLine = 1
if self.state != SMTPServerEngine.ST_DATA:
rsp, keep = self.doCommand(data)
else:
rsp = self.doData(data)
if rsp == None:
continue
self.socket.send(rsp + "\r\n")
if keep == 0:
self.socket.close()
return
else:
# EOF
return
return
def doCommand(self, data):
"""Process a single SMTP Command"""
cmd = data[0:4]
cmd = string.upper(cmd)
keep = 1
rv = None
if cmd == "HELO":
self.state = SMTPServerEngine.ST_HELO
rv = self.impl.helo(data)
elif cmd == "RSET":
rv = self.impl.reset(data)
self.dataAccum = ""
self.state = SMTPServerEngine.ST_INIT
elif cmd == "NOOP":
pass
elif cmd == "QUIT":
rv = self.impl.quit(data)
keep = 0
elif cmd == "MAIL":
if self.state != SMTPServerEngine.ST_HELO:
return ("503 Bad command sequence", 1)
self.state = SMTPServerEngine.ST_MAIL
rv = self.impl.mailFrom(data)
elif cmd == "RCPT":
if (self.state != SMTPServerEngine.ST_MAIL) and (self.state != SMTPServerEngine.ST_RCPT):
return ("503 Bad command sequence", 1)
self.state = SMTPServerEngine.ST_RCPT
rv = self.impl.rcptTo(data)
elif cmd == "DATA":
if self.state != SMTPServerEngine.ST_RCPT:
return ("503 Bad command sequence", 1)
self.state = SMTPServerEngine.ST_DATA
self.dataAccum = ""
return ("354 OK, Enter data, terminated with a \\r\\n.\\r\\n", 1)
else:
return ("505 Eh? WTF was that?", 1)
if rv:
return (rv, keep)
else:
return("250 OK", keep)
def doData(self, data):
"""
Process SMTP Data. Accumulates client DATA until the
terminator is found.
"""
self.dataAccum = self.dataAccum + data
if len(self.dataAccum) > 4 and self.dataAccum[-5:] == '\r\n.\r\n':
self.dataAccum = self.dataAccum[:-5]
rv = self.impl.data(self.dataAccum)
self.state = SMTPServerEngine.ST_HELO
if rv:
return rv
else:
return "250 OK - Data and terminator. found"
else:
return None
class SMTPServer:
"""
A single threaded SMTP Server connection manager. Listens for
incoming SMTP connections on a given port. For each connection,
the SMTPServerEngine is chugged, passing the given instance of
SMTPServerInterface.
"""
def __init__(self, port):
self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self._socket.bind(("", port))
self._socket.listen(5)
def serve(self, impl = None):
while 1:
nsd = self._socket.accept()
if impl == None:
impl = SMTPServerInterfaceDebug()
engine = SMTPServerEngine(nsd[0], impl)
engine.chug()
def Usage():
print """Usage: python smtps.py [port].
Where 'port' is SMTP port number, 25 by default. """
sys.exit(1)
if __name__ == '__main__':
if len(sys.argv) > 2:
Usage()
if len(sys.argv) == 2:
if sys.argv[1] in ('-h', '-help', '--help', '?', '-?'):
Usage()
port = int(sys.argv[1])
else:
port = 25
s = SMTPServer(port)
s.serve()
| {
"repo_name": "clashboom/carol",
"path": "app/smtps.py",
"copies": "1",
"size": "9059",
"license": "apache-2.0",
"hash": -2053761207242857500,
"line_mean": 30.5644599303,
"line_max": 119,
"alpha_frac": 0.582183464,
"autogenerated": false,
"ratio": 4.046002679767754,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5128186143767753,
"avg_score": null,
"num_lines": null
} |
""" $Id: SOAP.py,v 1.2 2000/03/13 22:32:35 kmacleod Exp $
SOAP.py implements the Simple Object Access Protocol
<http://develop.com/SOAP/>
"""
from StringIO import StringIO
from ScarabMarshal import *
from xml.sax import saxlib, saxexts
from types import *
import string
import base64
# just constants
DICT = "dict"
ARRAY = "array"
CHAR = "char"
class SOAPMarshaler(Marshaler):
def __init__(self, stream):
self.written_stream_header = 0
self.write = stream.write
self.flush = stream.flush
def encode_call(self, method, map):
dict = { 'id':0 }
self.m_init(dict)
self.write('''<SOAP:Envelope
xmlns:SOAP="urn:schemas-xmlsoap-org:soap.v1"
xmlns:tbd="ToBeDetermined">
<SOAP:Body>
''')
self._marshal({ method : map }, dict)
self.write(''' </SOAP:Body>
</SOAP:Envelope>
''')
self.m_finish(dict)
def encode_response(self, method, map):
dict = { 'id':0 }
self.m_init(dict)
self.write('''<SOAP:Envelope
xmlns:SOAP="urn:schemas-xmlsoap-org:soap.v1"
xmlns:tbd="ToBeDetermined">
<SOAP:Body>
''')
self._marshal({ method + "Response" : map }, dict)
self.write(''' </SOAP:Body>
</SOAP:Envelope>
''')
self.m_finish(dict)
def encode_fault(self, faultcode, message, runcode, detail = None):
dict = { 'id':0 }
self.m_init(dict)
self.write('''<SOAP:Envelope
xmlns:SOAP="urn:schemas-xmlsoap-org:soap.v1"
xmlns:tbd="ToBeDetermined">
<SOAP:Body>
''')
if detail == None:
self._marshal({ "SOAP:Fault" : { 'faultcode' : faultcode,
'faultstring' : message,
'runcode' : runcode } },
dict)
else:
self._marshal({ "SOAP:Fault" : { 'faultcode' : faultcode,
'faultstring' : message,
'runcode' : runcode,
'detail' : detail } },
dict)
self.write(''' </SOAP:Body>
</SOAP:Envelope>
''')
self.m_finish(dict)
def m_init(self, dict):
self.write('<?xml version="1.0"?>')
def m_finish(self, dict):
self.write("\n\f\n")
def persistent_id(self, object):
return None
def m_reference(self, object, dict):
""" already done """
def m_None(self, object, dict):
""" already done """
def m_int(self, object, dict):
self.write(str(object))
def m_long(self, object, dict):
self.write(str(object))
def m_float(self, object, dict):
self.write(str(object))
def m_complex(self, object, dict):
self.write(str(object))
def m_string(self, object, dict):
self.write(str(object))
def m_list(self, object, dict):
# FIXME missing ref ids
n = len(object)
for k in range(n):
type = self._type(object[k])
self.write('<tbd:urtype' + type + '>')
self._marshal(object[k], dict)
self.write('</tbd:urtype>')
def m_tuple(self, object, dict):
# FIXME missing ref ids
# FIXME set type to tuple
n = len(object)
for k in range(n):
type = self._type(object[k])
self.write('<tbd:urtype' + type + '>')
self._marshal(object[k], dict)
self.write('</tbd:urtype>')
def m_dictionary(self, object, dict):
# FIXME missing ref ids
items = object.items()
n = len(items)
for k in range(n):
key, value = items[k]
type = self._type(value)
self.write('<' + key + type + '>')
self._marshal(value, dict)
self.write('</' + key + '>')
def m_instance(self, object, dict):
# FIXME missing ref ids
cls = object.__class__
module = whichmodule(cls)
name = cls.__name__
try:
getstate = object.__getstate__
except AttributeError:
stuff = object.__dict__
else:
stuff = getstate()
items = stuff.items()
n = len(items)
for k in range(n):
key, value = items[k]
type = self._type(value)
self.write('<' + key + type + '>')
self._marshal(value, dict)
self.write('</' + key + '>')
def _type(self, object):
t = type(object)
if t == ListType:
return ' SOAP:arrayType="tbd:urtype[]"'
elif t == TupleType:
return ' SOAP:arrayType="tbd:urtype[]"'
elif t == NoneType:
return ' xsi:null="1"'
return ''
class SOAPUnmarshaler(Unmarshaler, saxlib.DocumentHandler):
def __init__(self, stream):
self.memo = {}
self.stream = stream
def _unmarshal(self):
self.parse_value_stack = [ {} ]
self.parse_utype_stack = [ DICT ]
self.parse_type_stack = [ ]
self.parser = saxexts.make_parser()
self.parser.setDocumentHandler(self)
self.parser.setErrorHandler(self)
lines = []
stream = self.stream
# FIXME SAX parsers should support this on streams
line = stream.readline()
while (line != "\f\n") and (line != ""):
lines.append(line)
line = stream.readline()
if len(lines) == 0:
raise EOFError
stream = StringIO(string.join(lines))
self.parser.parseFile(stream)
o = self.parse_value_stack[0]
delattr(self, 'parse_value_stack')
self.parser.close()
return o
def startElement(self, name, attrs):
self.chars = ""
xsi_type = None
if attrs.has_key('xsi:type'):
xsi_type = attrs['xsi:type']
elif (attrs.has_key('xsi:null')
and attrs['xsi:null'] == '1'):
xsi_type = "None"
self.parse_type_stack.append(xsi_type)
# FIXME 'list' is a temp hack for a specific user
if (attrs.has_key('SOAP:arrayType')
or (attrs.has_key('type')
and attrs['type'] == 'list')):
self.parse_utype_stack.append(ARRAY)
self.parse_value_stack.append( [ ] )
else:
# will be set to DICT if a sub-element is found
self.parse_utype_stack.append(CHAR)
def endElement(self, name):
# FIXME do something with types
xsi_type = self.parse_type_stack.pop()
utype = self.parse_utype_stack.pop()
if utype is CHAR:
if xsi_type == "None":
value = None
else:
value = self.chars
else:
value = self.parse_value_stack.pop()
# if we're in an element, and our parent element was defaulted
# to CHAR, then we're in a struct and we need to create that
# dictionary.
if self.parse_utype_stack[-1] is CHAR:
self.parse_value_stack.append( { } )
self.parse_utype_stack[-1] = DICT
if self.parse_utype_stack[-1] is DICT:
self.parse_value_stack[-1][name] = value
else:
self.parse_value_stack[-1].append(value)
def characters(self, ch, start, length):
self.chars = self.chars + ch[start:start + length]
def fatalError(self, exc):
raise exc
# Shorthands (credits to and most copied from pickle.py)
def encode_call(file, method, object):
SOAPMarshaler(file).encode_call(method, object)
def encode_calls(method, object):
file = StringIO()
SOAPMarshaler(file).encode_call(method, object)
return file.getvalue()
def encode_response(file, method, object):
SOAPMarshaler(file).encode_response(method, object)
def encode_responses(method, object):
file = StringIO()
SOAPMarshaler(file).encode_response(method, object)
return file.getvalue()
def encode_fault(file, faultcode, message, runcode, detail = None):
SOAPMarshaler(file).encode_fault(faultcode, message, runcode, detail)
def encode_faults(faultcode, message, runcode, detail = None):
file = StringIO()
SOAPMarshaler(file).encode_fault(faultcode, message, runcode, detail)
return file.getvalue()
def dump(object, file):
SOAPMarshaler(file).dump(object)
def dumps(object):
file = StringIO()
SOAPMarshaler(file).dump(object)
return file.getvalue()
def decode(file):
return SOAPUnmarshaler(file).decode()
def decodes(str):
file = StringIO(str)
return SOAPUnmarshaler(file).decode()
def load(file):
return SOAPUnmarshaler(file).load()
def loads(str):
file = StringIO(str)
return SOAPUnmarshaler(file).load()
if __name__ == '__main__':
runtests(load, loads, dump, dumps)
| {
"repo_name": "mworks/mworks",
"path": "supporting_libs/scarab/Scarab-0.1.00d19/python/SOAP.py",
"copies": "1",
"size": "8690",
"license": "mit",
"hash": -8138549652893928000,
"line_mean": 27.3061889251,
"line_max": 73,
"alpha_frac": 0.5602991945,
"autogenerated": false,
"ratio": 3.519643580396922,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.951997491686714,
"avg_score": 0.011993571605956285,
"num_lines": 307
} |
# $Id: sshmanhole.py 105 2008-04-02 19:39:49Z mp $
# This code modified from source originaly found at
# http://buildbot.net/repos/trunk/buildbot/manhole.py
#
"""
This code modified from source originaly found at
http://buildbot.net/repos/trunk/buildbot/manhole.py
"""
import os.path
import binascii, base64
from twisted.python import log
from twisted.application import service, strports
from twisted.cred import checkers, portal
from twisted.conch import manhole, manhole_ssh, checkers as conchc
from twisted.conch.insults import insults
from twisted.internet import protocol
class chainedProtocolFactory:
# this curries the 'namespace' argument into a later call to
# chainedProtocolFactory()
def __init__(self, namespace):
self.namespace = namespace
def __call__(self):
return insults.ServerProtocol(manhole.ColoredManhole, self.namespace)
class AuthorizedKeysChecker(conchc.SSHPublicKeyDatabase):
"""Accept connections using SSH keys from a given file.
SSHPublicKeyDatabase takes the username that the prospective client has
requested and attempts to get a ~/.ssh/authorized_keys file for that
username. This requires root access, so it isn't as useful as you'd
like.
Instead, this subclass looks for keys in a single file, given as an
argument. This file is typically kept in the buildmaster's basedir. The
file should have 'ssh-dss ....' lines in it, just like authorized_keys.
"""
def __init__(self, authorized_keys_file):
self.authorized_keys_file = os.path.expanduser(authorized_keys_file)
def checkKey(self, credentials):
f = open(self.authorized_keys_file)
for l in f.readlines():
l2 = l.split()
if len(l2) < 2:
continue
try:
if base64.decodestring(l2[1]) == credentials.blob:
return 1
except binascii.Error:
continue
return 0
class _BaseManhole(service.MultiService):
def __init__(self, port, checker, using_ssh=True, namespace={}):
"""
@type port: string or int
@param port: what port should the Manhole listen on? This is a
strports specification string, like 'tcp:12345' or
'tcp:12345:interface=127.0.0.1'. Bare integers are treated as a
simple tcp port.
@type checker: an object providing the
L{twisted.cred.checkers.ICredentialsChecker} interface
@param checker: if provided, this checker is used to authenticate the
client instead of using the username/password scheme. You must either
provide a username/password or a Checker. Some useful values are::
import twisted.cred.checkers as credc
import twisted.conch.checkers as conchc
c = credc.AllowAnonymousAccess # completely open
c = credc.FilePasswordDB(passwd_filename) # file of name:passwd
c = conchc.UNIXPasswordDatabase # getpwnam() (probably /etc/passwd)
@type using_ssh: bool
@param using_ssh: If True, accept SSH connections. If False, accept
regular unencrypted telnet connections.
"""
# unfortunately, these don't work unless we're running as root
#c = credc.PluggableAuthenticationModulesChecker: PAM
#c = conchc.SSHPublicKeyDatabase() # ~/.ssh/authorized_keys
# and I can't get UNIXPasswordDatabase to work
service.MultiService.__init__(self)
if type(port) is int:
port = "tcp:%d" % port
self.port = port # for comparison later
self.checker = checker # to maybe compare later
def makeProtocol():
p = insults.ServerProtocol(manhole.ColoredManhole, namespace)
return p
self.using_ssh = using_ssh
if using_ssh:
r = manhole_ssh.TerminalRealm()
r.chainedProtocolFactory = makeProtocol
p = portal.Portal(r, [self.checker])
f = manhole_ssh.ConchFactory(p)
else:
r = _TelnetRealm(makeNamespace)
p = portal.Portal(r, [self.checker])
f = protocol.ServerFactory()
f.protocol = makeTelnetProtocol(p)
s = strports.service(self.port, f)
s.setServiceParent(self)
def startService(self):
service.MultiService.startService(self)
if self.using_ssh:
via = "via SSH"
else:
via = "via telnet"
log.msg("Manhole listening %s on port %s" % (via, self.port))
class PasswordManhole(_BaseManhole):
"""This Manhole accepts encrypted (ssh) connections, and requires a
username and password to authorize access.
"""
compare_attrs = ["port", "username", "password"]
def __init__(self, port, username, password, namespace={}):
"""
@type port: string or int
@param port: what port should the Manhole listen on? This is a
strports specification string, like 'tcp:12345' or
'tcp:12345:interface=127.0.0.1'. Bare integers are treated as a
simple tcp port.
@param username:
@param password: username= and password= form a pair of strings to
use when authenticating the remote user.
"""
self.username = username
self.password = password
c = checkers.InMemoryUsernamePasswordDatabaseDontUse()
c.addUser(username, password)
_BaseManhole.__init__(self, port, c, namespace=namespace)
class AuthorizedKeysManhole(_BaseManhole):
"""This Manhole accepts ssh connections, and requires that the
prospective client have an ssh private key that matches one of the public
keys in our authorized_keys file. It is created with the name of a file
that contains the public keys that we will accept."""
compare_attrs = ["port", "keyfile"]
def __init__(self, port, keyfile):
"""
@type port: string or int
@param port: what port should the Manhole listen on? This is a
strports specification string, like 'tcp:12345' or
'tcp:12345:interface=127.0.0.1'. Bare integers are treated as a
simple tcp port.
@param keyfile: the name of a file (relative to the buildmaster's
basedir) that contains SSH public keys of authorized
users, one per line. This is the exact same format
as used by sshd in ~/.ssh/authorized_keys .
"""
self.keyfile = keyfile
c = AuthorizedKeysChecker(keyfile)
_BaseManhole.__init__(self, port, c)
class ArbitraryCheckerManhole(_BaseManhole):
"""This Manhole accepts ssh connections, but uses an arbitrary
user-supplied 'checker' object to perform authentication."""
compare_attrs = ["port", "checker"]
def __init__(self, port, checker):
"""
@type port: string or int
@param port: what port should the Manhole listen on? This is a
strports specification string, like 'tcp:12345' or
'tcp:12345:interface=127.0.0.1'. Bare integers are treated as a
simple tcp port.
@param checker: an instance of a twisted.cred 'checker' which will
perform authentication
"""
_BaseManhole.__init__(self, port, checker)
| {
"repo_name": "aquamatt/Peloton",
"path": "src/peloton/plugins/support/sshmanhole.py",
"copies": "1",
"size": "7363",
"license": "bsd-3-clause",
"hash": 7821584598727583000,
"line_mean": 37.1502590674,
"line_max": 79,
"alpha_frac": 0.6413146815,
"autogenerated": false,
"ratio": 4.224325874928285,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0025970783369370473,
"num_lines": 193
} |
# $Id: ssl.py 46 2008-05-27 02:08:12Z jon.oberheide $
"""Secure Sockets Layer / Transport Layer Security."""
import dpkt
class SSL2(dpkt.Packet):
__hdr__ = (
('len', 'H', 0),
('msg', 's', ''),
('pad', 's', ''),
)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
if self.len & 0x8000:
n = self.len = self.len & 0x7FFF
self.msg, self.data = self.data[:n], self.data[n:]
else:
n = self.len = self.len & 0x3FFF
padlen = ord(self.data[0])
self.msg = self.data[1:1+n]
self.pad = self.data[1+n:1+n+padlen]
self.data = self.data[1+n+padlen:]
# SSLv3/TLS version
SSL3_VERSION = 0x0300
TLS1_VERSION = 0x0301
# Record type
SSL3_RT_CHANGE_CIPHER_SPEC = 20
SSL3_RT_ALERT = 21
SSL3_RT_HANDSHAKE = 22
SSL3_RT_APPLICATION_DATA = 23
# Handshake message type
SSL3_MT_HELLO_REQUEST = 0
SSL3_MT_CLIENT_HELLO = 1
SSL3_MT_SERVER_HELLO = 2
SSL3_MT_CERTIFICATE = 11
SSL3_MT_SERVER_KEY_EXCHANGE = 12
SSL3_MT_CERTIFICATE_REQUEST = 13
SSL3_MT_SERVER_DONE = 14
SSL3_MT_CERTIFICATE_VERIFY = 15
SSL3_MT_CLIENT_KEY_EXCHANGE = 16
SSL3_MT_FINISHED = 20
class SSL3(dpkt.Packet):
__hdr__ = (
('type', 'B', 0),
('version', 'H', 0),
('len', 'H', 0),
)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
if self.len <= len(self.data):
self.msg, self.data = self.data[:self.len], self.data[self.len:]
"""
Byte 0 = SSL record type = 22 (SSL3_RT_HANDSHAKE)
Bytes 1-2 = SSL version (major/minor)
Bytes 3-4 = Length of data in the record (excluding the header itself).
Byte 5 = Handshake type
Bytes 6-8 = Length of data to follow in this record
Bytes 9-n = Command-specific data
"""
class SSLFactory(object):
def __new__(cls, buf):
v = buf[1:3]
if v == '\x03\x01' or v == '\x03\x00':
return SSL3(buf)
return SSL2(buf)
| {
"repo_name": "ashumeow/pcaphar",
"path": "src/third_party/dpkt/dpkt/ssl.py",
"copies": "10",
"size": "2100",
"license": "apache-2.0",
"hash": 7132666103386646000,
"line_mean": 27.7671232877,
"line_max": 76,
"alpha_frac": 0.539047619,
"autogenerated": false,
"ratio": 2.9411764705882355,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8480224089588236,
"avg_score": null,
"num_lines": null
} |
"""Secure Sockets Layer / Transport Layer Security."""
import dpkt
import ssl_ciphersuites
import struct
import binascii
import traceback
import datetime
#
# Note from April 2011: cde...@gmail.com added code that parses SSL3/TLS messages more in depth.
#
# Jul 2012: afleenor@google.com modified and extended SSL support further.
#
class SSL2(dpkt.Packet):
__hdr__ = (
('len', 'H', 0),
('msg', 's', ''),
('pad', 's', ''),
)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
if self.len & 0x8000:
n = self.len = self.len & 0x7FFF
self.msg, self.data = self.data[:n], self.data[n:]
else:
n = self.len = self.len & 0x3FFF
padlen = ord(self.data[0])
self.msg = self.data[1:1+n]
self.pad = self.data[1+n:1+n+padlen]
self.data = self.data[1+n+padlen:]
# SSLv3/TLS versions
SSL3_V = 0x0300
TLS1_V = 0x0301
TLS11_V = 0x0302
TLS12_V = 0x0303
ssl3_versions_str = {
SSL3_V: 'SSL3',
TLS1_V: 'TLS 1.0',
TLS11_V: 'TLS 1.1',
TLS12_V: 'TLS 1.2'
}
SSL3_VERSION_BYTES = set(('\x03\x00', '\x03\x01', '\x03\x02', '\x03\x03'))
# Alert levels
SSL3_AD_WARNING = 1
SSL3_AD_FATAL = 2
alert_level_str = {
SSL3_AD_WARNING: 'SSL3_AD_WARNING',
SSL3_AD_FATAL: 'SSL3_AD_FATAL'
}
# SSL3 alert descriptions
SSL3_AD_CLOSE_NOTIFY = 0
SSL3_AD_UNEXPECTED_MESSAGE = 10 # fatal
SSL3_AD_BAD_RECORD_MAC = 20 # fatal
SSL3_AD_DECOMPRESSION_FAILURE = 30 # fatal
SSL3_AD_HANDSHAKE_FAILURE = 40 # fatal
SSL3_AD_NO_CERTIFICATE = 41
SSL3_AD_BAD_CERTIFICATE = 42
SSL3_AD_UNSUPPORTED_CERTIFICATE = 43
SSL3_AD_CERTIFICATE_REVOKED = 44
SSL3_AD_CERTIFICATE_EXPIRED = 45
SSL3_AD_CERTIFICATE_UNKNOWN = 46
SSL3_AD_ILLEGAL_PARAMETER = 47 # fatal
# TLS1 alert descriptions
TLS1_AD_DECRYPTION_FAILED = 21
TLS1_AD_RECORD_OVERFLOW = 22
TLS1_AD_UNKNOWN_CA = 48 # fatal
TLS1_AD_ACCESS_DENIED = 49 # fatal
TLS1_AD_DECODE_ERROR = 50 # fatal
TLS1_AD_DECRYPT_ERROR = 51
TLS1_AD_EXPORT_RESTRICTION = 60 # fatal
TLS1_AD_PROTOCOL_VERSION = 70 # fatal
TLS1_AD_INSUFFICIENT_SECURITY = 71 # fatal
TLS1_AD_INTERNAL_ERROR = 80 # fatal
TLS1_AD_USER_CANCELLED = 90
TLS1_AD_NO_RENEGOTIATION = 100
#/* codes 110-114 are from RFC3546 */
TLS1_AD_UNSUPPORTED_EXTENSION = 110
TLS1_AD_CERTIFICATE_UNOBTAINABLE = 111
TLS1_AD_UNRECOGNIZED_NAME = 112
TLS1_AD_BAD_CERTIFICATE_STATUS_RESPONSE = 113
TLS1_AD_BAD_CERTIFICATE_HASH_VALUE = 114
TLS1_AD_UNKNOWN_PSK_IDENTITY = 115 # fatal
# Mapping alert types to strings
alert_description_str = {
SSL3_AD_CLOSE_NOTIFY: 'SSL3_AD_CLOSE_NOTIFY',
SSL3_AD_UNEXPECTED_MESSAGE: 'SSL3_AD_UNEXPECTED_MESSAGE',
SSL3_AD_BAD_RECORD_MAC: 'SSL3_AD_BAD_RECORD_MAC',
SSL3_AD_DECOMPRESSION_FAILURE: 'SSL3_AD_DECOMPRESSION_FAILURE',
SSL3_AD_HANDSHAKE_FAILURE: 'SSL3_AD_HANDSHAKE_FAILURE',
SSL3_AD_NO_CERTIFICATE: 'SSL3_AD_NO_CERTIFICATE',
SSL3_AD_BAD_CERTIFICATE: 'SSL3_AD_BAD_CERTIFICATE',
SSL3_AD_UNSUPPORTED_CERTIFICATE: 'SSL3_AD_UNSUPPORTED_CERTIFICATE',
SSL3_AD_CERTIFICATE_REVOKED: 'SSL3_AD_CERTIFICATE_REVOKED',
SSL3_AD_CERTIFICATE_EXPIRED: 'SSL3_AD_CERTIFICATE_EXPIRED',
SSL3_AD_CERTIFICATE_UNKNOWN: 'SSL3_AD_CERTIFICATE_UNKNOWN',
SSL3_AD_ILLEGAL_PARAMETER: 'SSL3_AD_ILLEGAL_PARAMETER',
TLS1_AD_DECRYPTION_FAILED: 'TLS1_AD_DECRYPTION_FAILED',
TLS1_AD_RECORD_OVERFLOW: 'TLS1_AD_RECORD_OVERFLOW',
TLS1_AD_UNKNOWN_CA: 'TLS1_AD_UNKNOWN_CA',
TLS1_AD_ACCESS_DENIED: 'TLS1_AD_ACCESS_DENIED',
TLS1_AD_DECODE_ERROR: 'TLS1_AD_DECODE_ERROR',
TLS1_AD_DECRYPT_ERROR: 'TLS1_AD_DECRYPT_ERROR',
TLS1_AD_EXPORT_RESTRICTION: 'TLS1_AD_EXPORT_RESTRICTION',
TLS1_AD_PROTOCOL_VERSION: 'TLS1_AD_PROTOCOL_VERSION',
TLS1_AD_INSUFFICIENT_SECURITY: 'TLS1_AD_INSUFFICIENT_SECURITY',
TLS1_AD_INTERNAL_ERROR: 'TLS1_AD_INTERNAL_ERROR',
TLS1_AD_USER_CANCELLED: 'TLS1_AD_USER_CANCELLED',
TLS1_AD_NO_RENEGOTIATION: 'TLS1_AD_NO_RENEGOTIATION',
TLS1_AD_UNSUPPORTED_EXTENSION: 'TLS1_AD_UNSUPPORTED_EXTENSION',
TLS1_AD_CERTIFICATE_UNOBTAINABLE: 'TLS1_AD_CERTIFICATE_UNOBTAINABLE',
TLS1_AD_UNRECOGNIZED_NAME: 'TLS1_AD_UNRECOGNIZED_NAME',
TLS1_AD_BAD_CERTIFICATE_STATUS_RESPONSE: 'TLS1_AD_BAD_CERTIFICATE_STATUS_RESPONSE',
TLS1_AD_BAD_CERTIFICATE_HASH_VALUE: 'TLS1_AD_BAD_CERTIFICATE_HASH_VALUE',
TLS1_AD_UNKNOWN_PSK_IDENTITY: 'TLS1_AD_UNKNOWN_PSK_IDENTITY'
}
# struct format strings for parsing buffer lengths
# don't forget, you have to pad a 3-byte value with \x00
_SIZE_FORMATS = ['!B', '!H', '!I', '!I']
def parse_variable_array(buf, lenbytes):
"""
Parse an array described using the 'Type name<x..y>' syntax from the spec
Read a length at the start of buf, and returns that many bytes
after, in a tuple with the TOTAL bytes consumed (including the size). This
does not check that the array is the right length for any given datatype.
"""
# first have to figure out how to parse length
assert lenbytes <= 4 # pretty sure 4 is impossible, too
size_format = _SIZE_FORMATS[lenbytes - 1]
padding = '\x00' if lenbytes == 3 else ''
# read off the length
size = struct.unpack(size_format, padding + buf[:lenbytes])[0]
# read the actual data
data = buf[lenbytes:lenbytes + size]
# if len(data) != size: insufficient data
return data, size + lenbytes
class SSL3Exception(Exception):
pass
class TLSRecord(dpkt.Packet):
"""
SSLv3 or TLSv1+ packet.
In addition to the fields specified in the header, there are
compressed and decrypted fields, indicating whether, in the language
of the spec, this is a TLSPlaintext, TLSCompressed, or
TLSCiphertext. The application will have to figure out when it's
appropriate to change these values.
"""
__hdr__ = (
('type', 'B', 0),
('version', 'H', 0),
('length', 'H', 0),
)
def __init__(self, *args, **kwargs):
# assume plaintext unless specified otherwise in arguments
self.compressed = kwargs.pop('compressed', False)
self.encrypted = kwargs.pop('encrypted', False)
# parent constructor
dpkt.Packet.__init__(self, *args, **kwargs)
# make sure length and data are consistent
self.length = len(self.data)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
header_length = self.__hdr_len__
self.data = buf[header_length:header_length+self.length]
# make sure buffer was long enough
if len(self.data) != self.length:
raise dpkt.NeedData('TLSRecord data was too short.')
# assume compressed and encrypted when it's been parsed from
# raw data
self.compressed = True
self.encrypted = True
class TLSChangeCipherSpec(dpkt.Packet):
"""
ChangeCipherSpec message is just a single byte with value 1
"""
__hdr__ = (('type', 'B', 1),)
class TLSAppData(str):
"""
As far as TLSRecord is concerned, AppData is just an opaque blob.
"""
pass
class TLSAlert(dpkt.Packet):
__hdr__ = (
('level', 'B', 1),
('description', 'B', 0),
)
class TLSHelloRequest(dpkt.Packet):
__hdr__ = tuple()
TLSExtensionTypes = {
0: 'server_name',
1: 'max_fragment_length',
2: 'client_certificate_url',
3: 'trusted_ca_keys',
4: 'truncated_hmac',
5: 'status_request',
6: 'user_mapping',
7: 'client_authz',
8: 'server_authz',
9: 'cert_type',
10: 'elliptic_curves',
11: 'ec_point_formats',
12: 'srp',
13: 'signature_algorithms',
14: 'use_srtp',
15: 'heartbeat',
35: 'session_tickets',
13172: 'next_protocol_negotiation',
65281: 'renegotiation_info',
}
class TLSExtension(object):
def __init__(self, extNumber, data):
self.data = data
self.value = extNumber
@property
def name(self):
return TLSExtensionTypes.get(self.value, 'unknown')
class TLSClientHello(dpkt.Packet):
__hdr__ = (
('version', 'H', 0x0301),
('random', '32s', '\x00'*32),
) # the rest is variable-length and has to be done manually
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
# now session, cipher suites, extensions are in self.data
self.session_id, pointer = parse_variable_array(self.data, 1)
# print 'pointer',pointer
# handle ciphersuites
ciphersuites, parsed = parse_variable_array(self.data[pointer:], 2)
pointer += parsed
self.num_ciphersuites = len(ciphersuites) / 2
# check len(ciphersuites) % 2 == 0 ?
# compression methods
compression_methods, parsed = parse_variable_array(
self.data[pointer:], 1)
pointer += parsed
self.num_compression_methods = parsed - 1
self.compression_methods = map(ord, compression_methods)
self.extensions = []
if len(self.data[pointer:]) <= 0:
return
# skip total extensions length
pointer += 2
while len(self.data[pointer:]) > 0:
# extensions
extType = struct.unpack('!H', self.data[pointer:pointer+2])[0]
pointer += 2
extension, extensionLength = parse_variable_array(self.data[pointer:], 2)
pointer += extensionLength
self.extensions.append(TLSExtension(extType, extension))
class TLSServerHello(dpkt.Packet):
__hdr__ = (
('version', 'H', '0x0301'),
('random', '32s', '\x00'*32),
) # session is variable, forcing rest to be manual
def unpack(self, buf):
try:
dpkt.Packet.unpack(self, buf)
self.session_id, pointer = parse_variable_array(self.data, 1)
# single cipher suite
self.cipher_suite = struct.unpack('!H', self.data[pointer:pointer+2])[0]
pointer += 2
# single compression method
self.compression = struct.unpack('!B', self.data[pointer:pointer+1])[0]
pointer += 1
# ignore extensions for now
except struct.error:
# probably data too short
raise dpkt.NeedData
class TLSUnknownHandshake(dpkt.Packet):
__hdr__ = tuple()
TLSCertificate = TLSUnknownHandshake
TLSServerKeyExchange = TLSUnknownHandshake
TLSCertificateRequest = TLSUnknownHandshake
TLSServerHelloDone = TLSUnknownHandshake
TLSCertificateVerify = TLSUnknownHandshake
TLSClientKeyExchange = TLSUnknownHandshake
TLSFinished = TLSUnknownHandshake
# mapping of handshake type ids to their names
# and the classes that implement them
HANDSHAKE_TYPES = {
0: ('HelloRequest', TLSHelloRequest),
1: ('ClientHello', TLSClientHello),
2: ('ServerHello', TLSServerHello),
11: ('Certificate', TLSCertificate),
12: ('ServerKeyExchange', TLSServerKeyExchange),
13: ('CertificateRequest', TLSCertificateRequest),
14: ('ServerHelloDone', TLSServerHelloDone),
15: ('CertificateVerify', TLSCertificateVerify),
16: ('ClientKeyExchange', TLSClientKeyExchange),
20: ('Finished', TLSFinished),
}
class TLSHandshake(dpkt.Packet):
'''
A TLS Handshake message
This goes for all messages encapsulated in the Record layer, but especially
important for handshakes and app data: A message may be spread across a
number of TLSRecords, in addition to the possibility of there being more
than one in a given Record. You have to put together the contents of
TLSRecord's yourself.
'''
# struct.unpack can't handle the 3-byte int, so we parse it as bytes
# (and store it as bytes so dpkt doesn't get confused), and turn it into
# an int in a user-facing property
__hdr__ = (
('type', 'B', 0),
('length_bytes', '3s', 0),
)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
# Wait, might there be more than one message of self.type?
embedded_type = HANDSHAKE_TYPES.get(self.type, None)
if embedded_type is None:
raise SSL3Exception('Unknown or invalid handshake type %d' %
self.type)
# only take the right number of bytes
self.data = self.data[:self.length]
if len(self.data) != self.length:
raise dpkt.NeedData
# get class out of embedded_type tuple
self.data = embedded_type[1](self.data)
@property
def length(self):
return struct.unpack('!I', '\x00' + self.length_bytes)[0]
RECORD_TYPES = {
20: TLSChangeCipherSpec,
21: TLSAlert,
22: TLSHandshake,
23: TLSAppData,
}
class SSLFactory(object):
def __new__(cls, buf):
v = buf[1:3]
if v in [ '\x03\x00', '\x03\x01', '\x03\x02' ]:
return SSL3(buf)
# SSL2 has no characteristic header or magic bytes, so we just assume
# that the msg is an SSL2 msg if it is not detected as SSL3+
return SSL2(buf)
def TLSMultiFactory(buf):
'''
Attempt to parse one or more TLSRecord's out of buf
Args:
buf: string containing SSL/TLS messages. May have an incomplete record
on the end
Returns:
[TLSRecord]
int, total bytes consumed, != len(buf) if an incomplete record was left at
the end.
Raises ...?
'''
if not buf:
return [], 0
v = buf[1:3]
if v in SSL3_VERSION_BYTES:
try:
msg = TLSRecord(buf)
parsed_bytes = len(msg) # len fn includes header length
except dpkt.NeedData:
return [], 0 # tell caller we parsed nothing
else:
raise SSL3Exception('Bad TLS version in buf: %r' % buf[:5])
later_messages, later_bytes = TLSMultiFactory(buf[len(msg):])
return [msg] + later_messages, parsed_bytes + later_bytes
import unittest
_hexdecode = binascii.a2b_hex
class TLSRecordTest(unittest.TestCase):
"""
Test basic TLSRecord functionality
For this test, the contents of the record doesn't matter, since we're not
parsing the next layer.
"""
def setUp(self):
# add some extra data, to make sure length is parsed correctly
self.p = TLSRecord('\x17\x03\x01\x00\x08abcdefghzzzzzzzzzzz')
def testContentType(self):
self.assertEqual(self.p.type, 23)
def testVersion(self):
self.assertEqual(self.p.version, 0x0301)
def testLength(self):
self.assertEqual(self.p.length, 8)
def testData(self):
self.assertEqual(self.p.data, 'abcdefgh')
def testInitialFlags(self):
self.assertTrue(self.p.compressed)
self.assertTrue(self.p.encrypted)
def testRepack(self):
p2 = TLSRecord(type=23, version=0x0301, data='abcdefgh')
self.assertEqual(p2.type, 23)
self.assertEqual(p2.version, 0x0301)
self.assertEqual(p2.length, 8)
self.assertEqual(p2.data, 'abcdefgh')
self.assertEqual(p2.pack(), self.p.pack())
def testTotalLength(self):
# that len(p) includes header
self.assertEqual(len(self.p), 13)
def testRaisesNeedDataWhenBufIsShort(self):
self.assertRaises(
dpkt.NeedData,
TLSRecord,
'\x16\x03\x01\x00\x10abc')
class TLSChangeCipherSpecTest(unittest.TestCase):
"It's just a byte. This will be quick, I promise"
def setUp(self):
self.p = TLSChangeCipherSpec('\x01')
def testParses(self):
self.assertEqual(self.p.type, 1)
def testTotalLength(self):
self.assertEqual(len(self.p), 1)
class TLSAppDataTest(unittest.TestCase):
"AppData is basically just a string"
def testValue(self):
d = TLSAppData('abcdefgh')
self.assertEqual(d, 'abcdefgh')
class TLSHandshakeTest(unittest.TestCase):
def setUp(self):
self.h = TLSHandshake('\x00\x00\x00\x01\xff')
def testCreatedInsideMessage(self):
self.assertTrue(isinstance(self.h.data, TLSHelloRequest))
def testLength(self):
self.assertEqual(self.h.length, 0x01)
def testRaisesNeedData(self):
self.assertRaises(dpkt.NeedData, TLSHandshake, '\x00\x00\x01\x01')
class ClientHelloTest(unittest.TestCase):
'This data is extracted from and verified by Wireshark'
def setUp(self):
self.data = _hexdecode(
"01000199" # handshake header
"0301" # version
"5008220ce5e0e78b6891afe204498c9363feffbe03235a2d9e05b7d990eb708d" # rand
"2009bc0192e008e6fa8fe47998fca91311ba30ddde14a9587dc674b11c3d3e5ed1" # session id
# cipher suites
"005400ffc00ac0140088008700390038c00fc00500840035c007c009c011c0130045004400330032c00cc00ec002c0040096004100050004002fc008c01200160013c00dc003feff000ac006c010c00bc00100020001"
"0100" # compresssion methods
# extensions
"00fc0000000e000c0000096c6f63616c686f7374000a00080006001700180019000b00020100002300d0a50b2e9f618a9ea9bf493ef49b421835cd2f6b05bbe1179d8edf70d58c33d656e8696d36d7e7e0b9d3ecc0e4de339552fa06c64c0fcb550a334bc43944e2739ca342d15a9ebbe981ac87a0d38160507d47af09bdc16c5f0ee4cdceea551539382333226048a026d3a90a0535f4a64236467db8fee22b041af986ad0f253bc369137cd8d8cd061925461d7f4d7895ca9a4181ab554dad50360ac31860e971483877c9335ac1300c5e78f3e56f3b8e0fc16358fcaceefd5c8d8aaae7b35be116f8832856ca61144fcdd95e071b94d0cf7233740000"
"FFFFFFFFFFFFFFFF") # random garbage
self.p = TLSHandshake(self.data)
def testClientHelloConstructed(self):
'Make sure the correct class was constructed'
#print self.p
self.assertTrue(isinstance(self.p.data, TLSClientHello))
# def testClientDateCorrect(self):
# self.assertEqual(self.p.random_unixtime, 1342710284)
def testClientRandomCorrect(self):
self.assertEqual(self.p.data.random,
_hexdecode('5008220ce5e0e78b6891afe204498c9363feffbe03235a2d9e05b7d990eb708d'))
def testCipherSuiteLength(self):
# we won't bother testing the identity of each cipher suite in the list.
self.assertEqual(self.p.data.num_ciphersuites, 42)
#self.assertEqual(len(self.p.ciphersuites), 42)
def testSessionId(self):
self.assertEqual(self.p.data.session_id,
_hexdecode('09bc0192e008e6fa8fe47998fca91311ba30ddde14a9587dc674b11c3d3e5ed1'))
def testCompressionMethods(self):
self.assertEqual(self.p.data.num_compression_methods, 1)
def testTotalLength(self):
self.assertEqual(len(self.p), 413)
class ServerHelloTest(unittest.TestCase):
'Again, from Wireshark'
def setUp(self):
self.data = _hexdecode('0200004d03015008220c8ec43c5462315a7c99f5d5b6bff009ad285b51dc18485f352e9fdecd2009bc0192e008e6fa8fe47998fca91311ba30ddde14a9587dc674b11c3d3e5ed10002000005ff01000100')
self.p = TLSHandshake(self.data)
def testConstructed(self):
self.assertTrue(isinstance(self.p.data, TLSServerHello))
# def testDateCorrect(self):
# self.assertEqual(self.p.random_unixtime, 1342710284)
def testRandomCorrect(self):
self.assertEqual(self.p.data.random,
_hexdecode('5008220c8ec43c5462315a7c99f5d5b6bff009ad285b51dc18485f352e9fdecd'))
def testCipherSuite(self):
self.assertEqual(self.p.data.cipher_suite.name, 'TLS_RSA_WITH_NULL_SHA')
def testTotalLength(self):
self.assertEqual(len(self.p), 81)
class TLSMultiFactoryTest(unittest.TestCase):
"Made up test data"
def setUp(self):
self.data = _hexdecode('1703010010' # header 1
'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA' # data 1
'1703010010' # header 2
'BBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBB' # data 2
'1703010010' # header 3
'CCCCCCCC') # data 3 (incomplete)
self.msgs, self.bytes_parsed = TLSMultiFactory(self.data)
def testNumMessages(self):
# only complete messages should be parsed, incomplete ones left
# in buffer
self.assertEqual(len(self.msgs), 2)
def testBytesParsed(self):
self.assertEqual(self.bytes_parsed, (5 + 16) * 2)
def testFirstMsgData(self):
self.assertEqual(self.msgs[0].data, _hexdecode('AA' * 16))
def testSecondMsgData(self):
self.assertEqual(self.msgs[1].data, _hexdecode('BB' * 16))
if __name__ == '__main__':
unittest.main()
| {
"repo_name": "pquerna/tls-client-hello-stats",
"path": "third_party/dpkt/dpkt/ssl.py",
"copies": "1",
"size": "20961",
"license": "apache-2.0",
"hash": 4592996065288656000,
"line_mean": 33.935,
"line_max": 518,
"alpha_frac": 0.64348075,
"autogenerated": false,
"ratio": 3.276692199468501,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9402393423291073,
"avg_score": 0.0035559052354855155,
"num_lines": 600
} |
"""
Standalone file Reader for the reStructuredText markup syntax.
"""
__docformat__ = 'reStructuredText'
from docutils import frontend, readers
from docutils.transforms import frontmatter, references, misc
class Reader(readers.Reader):
supported = ('standalone',)
"""Contexts this reader supports."""
document = None
"""A single document tree."""
settings_spec = (
'Standalone Reader',
None,
(('Disable the promotion of a lone top-level section title to '
'document title (and subsequent section title to document '
'subtitle promotion; enabled by default).',
['--no-doc-title'],
{'dest': 'doctitle_xform', 'action': 'store_false', 'default': 1,
'validator': frontend.validate_boolean}),
('Disable the bibliographic field list transform (enabled by '
'default).',
['--no-doc-info'],
{'dest': 'docinfo_xform', 'action': 'store_false', 'default': 1,
'validator': frontend.validate_boolean}),
('Activate the promotion of lone subsection titles to '
'section subtitles (disabled by default).',
['--section-subtitles'],
{'dest': 'sectsubtitle_xform', 'action': 'store_true', 'default': 0,
'validator': frontend.validate_boolean}),
('Deactivate the promotion of lone subsection titles.',
['--no-section-subtitles'],
{'dest': 'sectsubtitle_xform', 'action': 'store_false'}),
))
config_section = 'standalone reader'
config_section_dependencies = ('readers',)
def get_transforms(self):
return readers.Reader.get_transforms(self) + [
references.Substitutions,
references.PropagateTargets,
frontmatter.DocTitle,
frontmatter.SectionSubTitle,
frontmatter.DocInfo,
references.AnonymousHyperlinks,
references.IndirectHyperlinks,
references.Footnotes,
references.ExternalTargets,
references.InternalTargets,
references.DanglingReferences,
misc.Transitions,
]
| {
"repo_name": "clumsy/intellij-community",
"path": "python/helpers/py2only/docutils/readers/standalone.py",
"copies": "49",
"size": "2328",
"license": "apache-2.0",
"hash": 7674145993452743000,
"line_mean": 35.375,
"line_max": 78,
"alpha_frac": 0.616838488,
"autogenerated": false,
"ratio": 4.434285714285714,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
"""
Standalone file Reader for the reStructuredText markup syntax.
"""
__docformat__ = 'reStructuredText'
import sys
from docutils import frontend, readers
from docutils.transforms import frontmatter, references, misc
class Reader(readers.Reader):
supported = ('standalone',)
"""Contexts this reader supports."""
document = None
"""A single document tree."""
settings_spec = (
'Standalone Reader',
None,
(('Disable the promotion of a lone top-level section title to '
'document title (and subsequent section title to document '
'subtitle promotion; enabled by default).',
['--no-doc-title'],
{'dest': 'doctitle_xform', 'action': 'store_false', 'default': 1,
'validator': frontend.validate_boolean}),
('Disable the bibliographic field list transform (enabled by '
'default).',
['--no-doc-info'],
{'dest': 'docinfo_xform', 'action': 'store_false', 'default': 1,
'validator': frontend.validate_boolean}),
('Activate the promotion of lone subsection titles to '
'section subtitles (disabled by default).',
['--section-subtitles'],
{'dest': 'sectsubtitle_xform', 'action': 'store_true', 'default': 0,
'validator': frontend.validate_boolean}),
('Deactivate the promotion of lone subsection titles.',
['--no-section-subtitles'],
{'dest': 'sectsubtitle_xform', 'action': 'store_false'}),
))
config_section = 'standalone reader'
config_section_dependencies = ('readers',)
def get_transforms(self):
return readers.Reader.get_transforms(self) + [
references.Substitutions,
references.PropagateTargets,
frontmatter.DocTitle,
frontmatter.SectionSubTitle,
frontmatter.DocInfo,
references.AnonymousHyperlinks,
references.IndirectHyperlinks,
references.Footnotes,
references.ExternalTargets,
references.InternalTargets,
references.DanglingReferences,
misc.Transitions,
]
| {
"repo_name": "neumerance/cloudloon2",
"path": ".venv/lib/python2.7/site-packages/docutils/readers/standalone.py",
"copies": "197",
"size": "2340",
"license": "apache-2.0",
"hash": 6805392908729219000,
"line_mean": 34.4545454545,
"line_max": 78,
"alpha_frac": 0.6175213675,
"autogenerated": false,
"ratio": 4.431818181818182,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 66
} |
"""
Standalone file Reader for the reStructuredText markup syntax.
"""
__docformat__ = 'reStructuredText'
import sys
from docutils import frontend, readers
from docutils.transforms import frontmatter, references, misc
class Reader(readers.Reader):
supported = ('standalone',)
"""Contexts this reader supports."""
document = None
"""A single document tree."""
settings_spec = (
'Standalone Reader',
None,
(('Disable the promotion of a lone top-level section title to '
'document title (and subsequent section title to document '
'subtitle promotion; enabled by default).',
['--no-doc-title'],
{'dest': 'doctitle_xform', 'action': 'store_false', 'default': 1,
'validator': frontend.validate_boolean}),
('Disable the bibliographic field list transform (enabled by '
'default).',
['--no-doc-info'],
{'dest': 'docinfo_xform', 'action': 'store_false', 'default': 1,
'validator': frontend.validate_boolean}),
('Activate the promotion of lone subsection titles to '
'section subtitles (disabled by default).',
['--section-subtitles'],
{'dest': 'sectsubtitle_xform', 'action': 'store_true', 'default': 0,
'validator': frontend.validate_boolean}),
('Deactivate the promotion of lone subsection titles.',
['--no-section-subtitles'],
{'dest': 'sectsubtitle_xform', 'action': 'store_false'}),
))
config_section = 'standalone reader'
config_section_dependencies = ('readers',)
def get_transforms(self):
return readers.Reader.get_transforms(self) + [
references.Substitutions,
references.PropagateTargets,
frontmatter.DocTitle,
frontmatter.SectionSubTitle,
frontmatter.DocInfo,
references.AnonymousHyperlinks,
references.IndirectHyperlinks,
references.Footnotes,
references.ExternalTargets,
references.InternalTargets,
references.DanglingReferences,
misc.Transitions,
]
| {
"repo_name": "rimbalinux/MSISDNArea",
"path": "docutils/readers/standalone.py",
"copies": "2",
"size": "2406",
"license": "bsd-3-clause",
"hash": -5714764106682946000,
"line_mean": 34.4545454545,
"line_max": 78,
"alpha_frac": 0.6005818786,
"autogenerated": false,
"ratio": 4.50561797752809,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.610619985612809,
"avg_score": null,
"num_lines": null
} |
# $Id: startup.py,v 1.1 2011/03/22 02:43:52 paus Exp $
import FWCore.ParameterSet.Config as cms
process = cms.Process('FILEFI')
# import of standard configurations
process.load('Configuration/StandardSequences/Services_cff')
process.load('FWCore/MessageService/MessageLogger_cfi')
process.load('Configuration.StandardSequences.GeometryDB_cff')
process.load('Configuration/StandardSequences/MagneticField_38T_cff')
process.load('Configuration/StandardSequences/FrontierConditions_GlobalTag_cff')
process.load('Configuration/EventContent/EventContent_cff')
process.configurationMetadata = cms.untracked.PSet(
version = cms.untracked.string('Mit_020'),
annotation = cms.untracked.string('AODSIM'),
name = cms.untracked.string('BambuProduction')
)
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(-1)
)
process.options = cms.untracked.PSet(
Rethrow = cms.untracked.vstring('ProductNotFound'),
fileMode = cms.untracked.string('NOMERGE'),
)
# input source
process.source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring('file:/data/blue/bendavid/392aodsim/62FE6208-38E8-DF11-A912-0018F3D09648.root')
)
process.source.inputCommands = cms.untracked.vstring("keep *", "drop *_MEtoEDMConverter_*_*", "drop L1GlobalTriggerObjectMapRecord_hltL1GtObjectMap__HLT")
# other statements
process.GlobalTag.globaltag = 'START311_V2::All'
process.add_(cms.Service("ObjectService"))
process.load("MitProd.BAMBUSequences.BambuFillAODSIM_cfi")
process.MitTreeFiller.TreeWriter.fileName = 'XX-MITDATASET-XX'
process.MitTreeFiller.TreeWriter.maxSize = cms.untracked.uint32(1790)
process.bambu_step = cms.Path(process.BambuFillAODSIM)
# schedule definition
process.schedule = cms.Schedule(process.bambu_step)
| {
"repo_name": "cpausmit/Kraken",
"path": "filefi/020/startup.py",
"copies": "1",
"size": "1764",
"license": "mit",
"hash": 8202309571245573000,
"line_mean": 35,
"line_max": 154,
"alpha_frac": 0.7783446712,
"autogenerated": false,
"ratio": 2.9747048903878586,
"config_test": true,
"has_no_keywords": true,
"few_assignments": false,
"quality_score": 0.42530495615878583,
"avg_score": null,
"num_lines": null
} |
"""
A finite state machine specialized for regular-expression-based text filters,
this module defines the following classes:
- `StateMachine`, a state machine
- `State`, a state superclass
- `StateMachineWS`, a whitespace-sensitive version of `StateMachine`
- `StateWS`, a state superclass for use with `StateMachineWS`
- `SearchStateMachine`, uses `re.search()` instead of `re.match()`
- `SearchStateMachineWS`, uses `re.search()` instead of `re.match()`
- `ViewList`, extends standard Python lists.
- `StringList`, string-specific ViewList.
Exception classes:
- `StateMachineError`
- `UnknownStateError`
- `DuplicateStateError`
- `UnknownTransitionError`
- `DuplicateTransitionError`
- `TransitionPatternNotFound`
- `TransitionMethodNotFound`
- `UnexpectedIndentationError`
- `TransitionCorrection`: Raised to switch to another transition.
- `StateCorrection`: Raised to switch to another state & transition.
Functions:
- `string2lines()`: split a multi-line string into a list of one-line strings
How To Use This Module
======================
(See the individual classes, methods, and attributes for details.)
1. Import it: ``import statemachine`` or ``from statemachine import ...``.
You will also need to ``import re``.
2. Derive a subclass of `State` (or `StateWS`) for each state in your state
machine::
class MyState(statemachine.State):
Within the state's class definition:
a) Include a pattern for each transition, in `State.patterns`::
patterns = {'atransition': r'pattern', ...}
b) Include a list of initial transitions to be set up automatically, in
`State.initial_transitions`::
initial_transitions = ['atransition', ...]
c) Define a method for each transition, with the same name as the
transition pattern::
def atransition(self, match, context, next_state):
# do something
result = [...] # a list
return context, next_state, result
# context, next_state may be altered
Transition methods may raise an `EOFError` to cut processing short.
d) You may wish to override the `State.bof()` and/or `State.eof()` implicit
transition methods, which handle the beginning- and end-of-file.
e) In order to handle nested processing, you may wish to override the
attributes `State.nested_sm` and/or `State.nested_sm_kwargs`.
If you are using `StateWS` as a base class, in order to handle nested
indented blocks, you may wish to:
- override the attributes `StateWS.indent_sm`,
`StateWS.indent_sm_kwargs`, `StateWS.known_indent_sm`, and/or
`StateWS.known_indent_sm_kwargs`;
- override the `StateWS.blank()` method; and/or
- override or extend the `StateWS.indent()`, `StateWS.known_indent()`,
and/or `StateWS.firstknown_indent()` methods.
3. Create a state machine object::
sm = StateMachine(state_classes=[MyState, ...],
initial_state='MyState')
4. Obtain the input text, which needs to be converted into a tab-free list of
one-line strings. For example, to read text from a file called
'inputfile'::
input_string = open('inputfile').read()
input_lines = statemachine.string2lines(input_string)
5. Run the state machine on the input text and collect the results, a list::
results = sm.run(input_lines)
6. Remove any lingering circular references::
sm.unlink()
"""
__docformat__ = 'restructuredtext'
import sys
import re
import types
import unicodedata
class StateMachine:
"""
A finite state machine for text filters using regular expressions.
The input is provided in the form of a list of one-line strings (no
newlines). States are subclasses of the `State` class. Transitions consist
of regular expression patterns and transition methods, and are defined in
each state.
The state machine is started with the `run()` method, which returns the
results of processing in a list.
"""
def __init__(self, state_classes, initial_state, debug=0):
"""
Initialize a `StateMachine` object; add state objects.
Parameters:
- `state_classes`: a list of `State` (sub)classes.
- `initial_state`: a string, the class name of the initial state.
- `debug`: a boolean; produce verbose output if true (nonzero).
"""
self.input_lines = None
"""`StringList` of input lines (without newlines).
Filled by `self.run()`."""
self.input_offset = 0
"""Offset of `self.input_lines` from the beginning of the file."""
self.line = None
"""Current input line."""
self.line_offset = -1
"""Current input line offset from beginning of `self.input_lines`."""
self.debug = debug
"""Debugging mode on/off."""
self.initial_state = initial_state
"""The name of the initial state (key to `self.states`)."""
self.current_state = initial_state
"""The name of the current state (key to `self.states`)."""
self.states = {}
"""Mapping of {state_name: State_object}."""
self.add_states(state_classes)
self.observers = []
"""List of bound methods or functions to call whenever the current
line changes. Observers are called with one argument, ``self``.
Cleared at the end of `run()`."""
def unlink(self):
"""Remove circular references to objects no longer required."""
for state in self.states.values():
state.unlink()
self.states = None
def run(self, input_lines, input_offset=0, context=None,
input_source=None, initial_state=None):
"""
Run the state machine on `input_lines`. Return results (a list).
Reset `self.line_offset` and `self.current_state`. Run the
beginning-of-file transition. Input one line at a time and check for a
matching transition. If a match is found, call the transition method
and possibly change the state. Store the context returned by the
transition method to be passed on to the next transition matched.
Accumulate the results returned by the transition methods in a list.
Run the end-of-file transition. Finally, return the accumulated
results.
Parameters:
- `input_lines`: a list of strings without newlines, or `StringList`.
- `input_offset`: the line offset of `input_lines` from the beginning
of the file.
- `context`: application-specific storage.
- `input_source`: name or path of source of `input_lines`.
- `initial_state`: name of initial state.
"""
self.runtime_init()
if isinstance(input_lines, StringList):
self.input_lines = input_lines
else:
self.input_lines = StringList(input_lines, source=input_source)
self.input_offset = input_offset
self.line_offset = -1
self.current_state = initial_state or self.initial_state
if self.debug:
print >>sys.stderr, (
'\nStateMachine.run: input_lines (line_offset=%s):\n| %s'
% (self.line_offset, '\n| '.join(self.input_lines)))
transitions = None
results = []
state = self.get_state()
try:
if self.debug:
print >>sys.stderr, ('\nStateMachine.run: bof transition')
context, result = state.bof(context)
results.extend(result)
while 1:
try:
try:
self.next_line()
if self.debug:
source, offset = self.input_lines.info(
self.line_offset)
print >>sys.stderr, (
'\nStateMachine.run: line (source=%r, '
'offset=%r):\n| %s'
% (source, offset, self.line))
context, next_state, result = self.check_line(
context, state, transitions)
except EOFError:
if self.debug:
print >>sys.stderr, (
'\nStateMachine.run: %s.eof transition'
% state.__class__.__name__)
result = state.eof(context)
results.extend(result)
break
else:
results.extend(result)
except TransitionCorrection, exception:
self.previous_line() # back up for another try
transitions = (exception.args[0],)
if self.debug:
print >>sys.stderr, (
'\nStateMachine.run: TransitionCorrection to '
'state "%s", transition %s.'
% (state.__class__.__name__, transitions[0]))
continue
except StateCorrection, exception:
self.previous_line() # back up for another try
next_state = exception.args[0]
if len(exception.args) == 1:
transitions = None
else:
transitions = (exception.args[1],)
if self.debug:
print >>sys.stderr, (
'\nStateMachine.run: StateCorrection to state '
'"%s", transition %s.'
% (next_state, transitions[0]))
else:
transitions = None
state = self.get_state(next_state)
except:
if self.debug:
self.error()
raise
self.observers = []
return results
def get_state(self, next_state=None):
"""
Return current state object; set it first if `next_state` given.
Parameter `next_state`: a string, the name of the next state.
Exception: `UnknownStateError` raised if `next_state` unknown.
"""
if next_state:
if self.debug and next_state != self.current_state:
print >>sys.stderr, \
('\nStateMachine.get_state: Changing state from '
'"%s" to "%s" (input line %s).'
% (self.current_state, next_state,
self.abs_line_number()))
self.current_state = next_state
try:
return self.states[self.current_state]
except KeyError:
raise UnknownStateError(self.current_state)
def next_line(self, n=1):
"""Load `self.line` with the `n`'th next line and return it."""
try:
try:
self.line_offset += n
self.line = self.input_lines[self.line_offset]
except IndexError:
self.line = None
raise EOFError
return self.line
finally:
self.notify_observers()
def is_next_line_blank(self):
"""Return 1 if the next line is blank or non-existant."""
try:
return not self.input_lines[self.line_offset + 1].strip()
except IndexError:
return 1
def at_eof(self):
"""Return 1 if the input is at or past end-of-file."""
return self.line_offset >= len(self.input_lines) - 1
def at_bof(self):
"""Return 1 if the input is at or before beginning-of-file."""
return self.line_offset <= 0
def previous_line(self, n=1):
"""Load `self.line` with the `n`'th previous line and return it."""
self.line_offset -= n
if self.line_offset < 0:
self.line = None
else:
self.line = self.input_lines[self.line_offset]
self.notify_observers()
return self.line
def goto_line(self, line_offset):
"""Jump to absolute line offset `line_offset`, load and return it."""
try:
try:
self.line_offset = line_offset - self.input_offset
self.line = self.input_lines[self.line_offset]
except IndexError:
self.line = None
raise EOFError
return self.line
finally:
self.notify_observers()
def get_source(self, line_offset):
"""Return source of line at absolute line offset `line_offset`."""
return self.input_lines.source(line_offset - self.input_offset)
def abs_line_offset(self):
"""Return line offset of current line, from beginning of file."""
return self.line_offset + self.input_offset
def abs_line_number(self):
"""Return line number of current line (counting from 1)."""
return self.line_offset + self.input_offset + 1
def insert_input(self, input_lines, source):
self.input_lines.insert(self.line_offset + 1, '',
source='internal padding')
self.input_lines.insert(self.line_offset + 1, '',
source='internal padding')
self.input_lines.insert(self.line_offset + 2,
StringList(input_lines, source))
def get_text_block(self, flush_left=0):
"""
Return a contiguous block of text.
If `flush_left` is true, raise `UnexpectedIndentationError` if an
indented line is encountered before the text block ends (with a blank
line).
"""
try:
block = self.input_lines.get_text_block(self.line_offset,
flush_left)
self.next_line(len(block) - 1)
return block
except UnexpectedIndentationError, error:
block, source, lineno = error.args
self.next_line(len(block) - 1) # advance to last line of block
raise
def check_line(self, context, state, transitions=None):
"""
Examine one line of input for a transition match & execute its method.
Parameters:
- `context`: application-dependent storage.
- `state`: a `State` object, the current state.
- `transitions`: an optional ordered list of transition names to try,
instead of ``state.transition_order``.
Return the values returned by the transition method:
- context: possibly modified from the parameter `context`;
- next state name (`State` subclass name);
- the result output of the transition, a list.
When there is no match, ``state.no_match()`` is called and its return
value is returned.
"""
if transitions is None:
transitions = state.transition_order
state_correction = None
if self.debug:
print >>sys.stderr, (
'\nStateMachine.check_line: state="%s", transitions=%r.'
% (state.__class__.__name__, transitions))
for name in transitions:
pattern, method, next_state = state.transitions[name]
match = pattern.match(self.line)
if match:
if self.debug:
print >>sys.stderr, (
'\nStateMachine.check_line: Matched transition '
'"%s" in state "%s".'
% (name, state.__class__.__name__))
return method(match, context, next_state)
else:
if self.debug:
print >>sys.stderr, (
'\nStateMachine.check_line: No match in state "%s".'
% state.__class__.__name__)
return state.no_match(context, transitions)
def add_state(self, state_class):
"""
Initialize & add a `state_class` (`State` subclass) object.
Exception: `DuplicateStateError` raised if `state_class` was already
added.
"""
statename = state_class.__name__
if statename in self.states:
raise DuplicateStateError(statename)
self.states[statename] = state_class(self, self.debug)
def add_states(self, state_classes):
"""
Add `state_classes` (a list of `State` subclasses).
"""
for state_class in state_classes:
self.add_state(state_class)
def runtime_init(self):
"""
Initialize `self.states`.
"""
for state in self.states.values():
state.runtime_init()
def error(self):
"""Report error details."""
type, value, module, line, function = _exception_data()
print >>sys.stderr, '%s: %s' % (type, value)
print >>sys.stderr, 'input line %s' % (self.abs_line_number())
print >>sys.stderr, ('module %s, line %s, function %s'
% (module, line, function))
def attach_observer(self, observer):
"""
The `observer` parameter is a function or bound method which takes two
arguments, the source and offset of the current line.
"""
self.observers.append(observer)
def detach_observer(self, observer):
self.observers.remove(observer)
def notify_observers(self):
for observer in self.observers:
try:
info = self.input_lines.info(self.line_offset)
except IndexError:
info = (None, None)
observer(*info)
class State:
"""
State superclass. Contains a list of transitions, and transition methods.
Transition methods all have the same signature. They take 3 parameters:
- An `re` match object. ``match.string`` contains the matched input line,
``match.start()`` gives the start index of the match, and
``match.end()`` gives the end index.
- A context object, whose meaning is application-defined (initial value
``None``). It can be used to store any information required by the state
machine, and the retured context is passed on to the next transition
method unchanged.
- The name of the next state, a string, taken from the transitions list;
normally it is returned unchanged, but it may be altered by the
transition method if necessary.
Transition methods all return a 3-tuple:
- A context object, as (potentially) modified by the transition method.
- The next state name (a return value of ``None`` means no state change).
- The processing result, a list, which is accumulated by the state
machine.
Transition methods may raise an `EOFError` to cut processing short.
There are two implicit transitions, and corresponding transition methods
are defined: `bof()` handles the beginning-of-file, and `eof()` handles
the end-of-file. These methods have non-standard signatures and return
values. `bof()` returns the initial context and results, and may be used
to return a header string, or do any other processing needed. `eof()`
should handle any remaining context and wrap things up; it returns the
final processing result.
Typical applications need only subclass `State` (or a subclass), set the
`patterns` and `initial_transitions` class attributes, and provide
corresponding transition methods. The default object initialization will
take care of constructing the list of transitions.
"""
patterns = None
"""
{Name: pattern} mapping, used by `make_transition()`. Each pattern may
be a string or a compiled `re` pattern. Override in subclasses.
"""
initial_transitions = None
"""
A list of transitions to initialize when a `State` is instantiated.
Each entry is either a transition name string, or a (transition name, next
state name) pair. See `make_transitions()`. Override in subclasses.
"""
nested_sm = None
"""
The `StateMachine` class for handling nested processing.
If left as ``None``, `nested_sm` defaults to the class of the state's
controlling state machine. Override it in subclasses to avoid the default.
"""
nested_sm_kwargs = None
"""
Keyword arguments dictionary, passed to the `nested_sm` constructor.
Two keys must have entries in the dictionary:
- Key 'state_classes' must be set to a list of `State` classes.
- Key 'initial_state' must be set to the name of the initial state class.
If `nested_sm_kwargs` is left as ``None``, 'state_classes' defaults to the
class of the current state, and 'initial_state' defaults to the name of
the class of the current state. Override in subclasses to avoid the
defaults.
"""
def __init__(self, state_machine, debug=0):
"""
Initialize a `State` object; make & add initial transitions.
Parameters:
- `statemachine`: the controlling `StateMachine` object.
- `debug`: a boolean; produce verbose output if true (nonzero).
"""
self.transition_order = []
"""A list of transition names in search order."""
self.transitions = {}
"""
A mapping of transition names to 3-tuples containing
(compiled_pattern, transition_method, next_state_name). Initialized as
an instance attribute dynamically (instead of as a class attribute)
because it may make forward references to patterns and methods in this
or other classes.
"""
self.add_initial_transitions()
self.state_machine = state_machine
"""A reference to the controlling `StateMachine` object."""
self.debug = debug
"""Debugging mode on/off."""
if self.nested_sm is None:
self.nested_sm = self.state_machine.__class__
if self.nested_sm_kwargs is None:
self.nested_sm_kwargs = {'state_classes': [self.__class__],
'initial_state': self.__class__.__name__}
def runtime_init(self):
"""
Initialize this `State` before running the state machine; called from
`self.state_machine.run()`.
"""
pass
def unlink(self):
"""Remove circular references to objects no longer required."""
self.state_machine = None
def add_initial_transitions(self):
"""Make and add transitions listed in `self.initial_transitions`."""
if self.initial_transitions:
names, transitions = self.make_transitions(
self.initial_transitions)
self.add_transitions(names, transitions)
def add_transitions(self, names, transitions):
"""
Add a list of transitions to the start of the transition list.
Parameters:
- `names`: a list of transition names.
- `transitions`: a mapping of names to transition tuples.
Exceptions: `DuplicateTransitionError`, `UnknownTransitionError`.
"""
for name in names:
if name in self.transitions:
raise DuplicateTransitionError(name)
if name not in transitions:
raise UnknownTransitionError(name)
self.transition_order[:0] = names
self.transitions.update(transitions)
def add_transition(self, name, transition):
"""
Add a transition to the start of the transition list.
Parameter `transition`: a ready-made transition 3-tuple.
Exception: `DuplicateTransitionError`.
"""
if name in self.transitions:
raise DuplicateTransitionError(name)
self.transition_order[:0] = [name]
self.transitions[name] = transition
def remove_transition(self, name):
"""
Remove a transition by `name`.
Exception: `UnknownTransitionError`.
"""
try:
del self.transitions[name]
self.transition_order.remove(name)
except:
raise UnknownTransitionError(name)
def make_transition(self, name, next_state=None):
"""
Make & return a transition tuple based on `name`.
This is a convenience function to simplify transition creation.
Parameters:
- `name`: a string, the name of the transition pattern & method. This
`State` object must have a method called '`name`', and a dictionary
`self.patterns` containing a key '`name`'.
- `next_state`: a string, the name of the next `State` object for this
transition. A value of ``None`` (or absent) implies no state change
(i.e., continue with the same state).
Exceptions: `TransitionPatternNotFound`, `TransitionMethodNotFound`.
"""
if next_state is None:
next_state = self.__class__.__name__
try:
pattern = self.patterns[name]
if not hasattr(pattern, 'match'):
pattern = re.compile(pattern)
except KeyError:
raise TransitionPatternNotFound(
'%s.patterns[%r]' % (self.__class__.__name__, name))
try:
method = getattr(self, name)
except AttributeError:
raise TransitionMethodNotFound(
'%s.%s' % (self.__class__.__name__, name))
return (pattern, method, next_state)
def make_transitions(self, name_list):
"""
Return a list of transition names and a transition mapping.
Parameter `name_list`: a list, where each entry is either a transition
name string, or a 1- or 2-tuple (transition name, optional next state
name).
"""
stringtype = type('')
names = []
transitions = {}
for namestate in name_list:
if type(namestate) is stringtype:
transitions[namestate] = self.make_transition(namestate)
names.append(namestate)
else:
transitions[namestate[0]] = self.make_transition(*namestate)
names.append(namestate[0])
return names, transitions
def no_match(self, context, transitions):
"""
Called when there is no match from `StateMachine.check_line()`.
Return the same values returned by transition methods:
- context: unchanged;
- next state name: ``None``;
- empty result list.
Override in subclasses to catch this event.
"""
return context, None, []
def bof(self, context):
"""
Handle beginning-of-file. Return unchanged `context`, empty result.
Override in subclasses.
Parameter `context`: application-defined storage.
"""
return context, []
def eof(self, context):
"""
Handle end-of-file. Return empty result.
Override in subclasses.
Parameter `context`: application-defined storage.
"""
return []
def nop(self, match, context, next_state):
"""
A "do nothing" transition method.
Return unchanged `context` & `next_state`, empty result. Useful for
simple state changes (actionless transitions).
"""
return context, next_state, []
class StateMachineWS(StateMachine):
"""
`StateMachine` subclass specialized for whitespace recognition.
There are three methods provided for extracting indented text blocks:
- `get_indented()`: use when the indent is unknown.
- `get_known_indented()`: use when the indent is known for all lines.
- `get_first_known_indented()`: use when only the first line's indent is
known.
"""
def get_indented(self, until_blank=0, strip_indent=1):
"""
Return a block of indented lines of text, and info.
Extract an indented block where the indent is unknown for all lines.
:Parameters:
- `until_blank`: Stop collecting at the first blank line if true
(1).
- `strip_indent`: Strip common leading indent if true (1,
default).
:Return:
- the indented block (a list of lines of text),
- its indent,
- its first line offset from BOF, and
- whether or not it finished with a blank line.
"""
offset = self.abs_line_offset()
indented, indent, blank_finish = self.input_lines.get_indented(
self.line_offset, until_blank, strip_indent)
if indented:
self.next_line(len(indented) - 1) # advance to last indented line
while indented and not indented[0].strip():
indented.trim_start()
offset += 1
return indented, indent, offset, blank_finish
def get_known_indented(self, indent, until_blank=0, strip_indent=1):
"""
Return an indented block and info.
Extract an indented block where the indent is known for all lines.
Starting with the current line, extract the entire text block with at
least `indent` indentation (which must be whitespace, except for the
first line).
:Parameters:
- `indent`: The number of indent columns/characters.
- `until_blank`: Stop collecting at the first blank line if true
(1).
- `strip_indent`: Strip `indent` characters of indentation if true
(1, default).
:Return:
- the indented block,
- its first line offset from BOF, and
- whether or not it finished with a blank line.
"""
offset = self.abs_line_offset()
indented, indent, blank_finish = self.input_lines.get_indented(
self.line_offset, until_blank, strip_indent,
block_indent=indent)
self.next_line(len(indented) - 1) # advance to last indented line
while indented and not indented[0].strip():
indented.trim_start()
offset += 1
return indented, offset, blank_finish
def get_first_known_indented(self, indent, until_blank=0, strip_indent=1,
strip_top=1):
"""
Return an indented block and info.
Extract an indented block where the indent is known for the first line
and unknown for all other lines.
:Parameters:
- `indent`: The first line's indent (# of columns/characters).
- `until_blank`: Stop collecting at the first blank line if true
(1).
- `strip_indent`: Strip `indent` characters of indentation if true
(1, default).
- `strip_top`: Strip blank lines from the beginning of the block.
:Return:
- the indented block,
- its indent,
- its first line offset from BOF, and
- whether or not it finished with a blank line.
"""
offset = self.abs_line_offset()
indented, indent, blank_finish = self.input_lines.get_indented(
self.line_offset, until_blank, strip_indent,
first_indent=indent)
self.next_line(len(indented) - 1) # advance to last indented line
if strip_top:
while indented and not indented[0].strip():
indented.trim_start()
offset += 1
return indented, indent, offset, blank_finish
class StateWS(State):
"""
State superclass specialized for whitespace (blank lines & indents).
Use this class with `StateMachineWS`. The transitions 'blank' (for blank
lines) and 'indent' (for indented text blocks) are added automatically,
before any other transitions. The transition method `blank()` handles
blank lines and `indent()` handles nested indented blocks. Indented
blocks trigger a new state machine to be created by `indent()` and run.
The class of the state machine to be created is in `indent_sm`, and the
constructor keyword arguments are in the dictionary `indent_sm_kwargs`.
The methods `known_indent()` and `firstknown_indent()` are provided for
indented blocks where the indent (all lines' and first line's only,
respectively) is known to the transition method, along with the attributes
`known_indent_sm` and `known_indent_sm_kwargs`. Neither transition method
is triggered automatically.
"""
indent_sm = None
"""
The `StateMachine` class handling indented text blocks.
If left as ``None``, `indent_sm` defaults to the value of
`State.nested_sm`. Override it in subclasses to avoid the default.
"""
indent_sm_kwargs = None
"""
Keyword arguments dictionary, passed to the `indent_sm` constructor.
If left as ``None``, `indent_sm_kwargs` defaults to the value of
`State.nested_sm_kwargs`. Override it in subclasses to avoid the default.
"""
known_indent_sm = None
"""
The `StateMachine` class handling known-indented text blocks.
If left as ``None``, `known_indent_sm` defaults to the value of
`indent_sm`. Override it in subclasses to avoid the default.
"""
known_indent_sm_kwargs = None
"""
Keyword arguments dictionary, passed to the `known_indent_sm` constructor.
If left as ``None``, `known_indent_sm_kwargs` defaults to the value of
`indent_sm_kwargs`. Override it in subclasses to avoid the default.
"""
ws_patterns = {'blank': ' *$',
'indent': ' +'}
"""Patterns for default whitespace transitions. May be overridden in
subclasses."""
ws_initial_transitions = ('blank', 'indent')
"""Default initial whitespace transitions, added before those listed in
`State.initial_transitions`. May be overridden in subclasses."""
def __init__(self, state_machine, debug=0):
"""
Initialize a `StateSM` object; extends `State.__init__()`.
Check for indent state machine attributes, set defaults if not set.
"""
State.__init__(self, state_machine, debug)
if self.indent_sm is None:
self.indent_sm = self.nested_sm
if self.indent_sm_kwargs is None:
self.indent_sm_kwargs = self.nested_sm_kwargs
if self.known_indent_sm is None:
self.known_indent_sm = self.indent_sm
if self.known_indent_sm_kwargs is None:
self.known_indent_sm_kwargs = self.indent_sm_kwargs
def add_initial_transitions(self):
"""
Add whitespace-specific transitions before those defined in subclass.
Extends `State.add_initial_transitions()`.
"""
State.add_initial_transitions(self)
if self.patterns is None:
self.patterns = {}
self.patterns.update(self.ws_patterns)
names, transitions = self.make_transitions(
self.ws_initial_transitions)
self.add_transitions(names, transitions)
def blank(self, match, context, next_state):
"""Handle blank lines. Does nothing. Override in subclasses."""
return self.nop(match, context, next_state)
def indent(self, match, context, next_state):
"""
Handle an indented text block. Extend or override in subclasses.
Recursively run the registered state machine for indented blocks
(`self.indent_sm`).
"""
indented, indent, line_offset, blank_finish = \
self.state_machine.get_indented()
sm = self.indent_sm(debug=self.debug, **self.indent_sm_kwargs)
results = sm.run(indented, input_offset=line_offset)
return context, next_state, results
def known_indent(self, match, context, next_state):
"""
Handle a known-indent text block. Extend or override in subclasses.
Recursively run the registered state machine for known-indent indented
blocks (`self.known_indent_sm`). The indent is the length of the
match, ``match.end()``.
"""
indented, line_offset, blank_finish = \
self.state_machine.get_known_indented(match.end())
sm = self.known_indent_sm(debug=self.debug,
**self.known_indent_sm_kwargs)
results = sm.run(indented, input_offset=line_offset)
return context, next_state, results
def first_known_indent(self, match, context, next_state):
"""
Handle an indented text block (first line's indent known).
Extend or override in subclasses.
Recursively run the registered state machine for known-indent indented
blocks (`self.known_indent_sm`). The indent is the length of the
match, ``match.end()``.
"""
indented, line_offset, blank_finish = \
self.state_machine.get_first_known_indented(match.end())
sm = self.known_indent_sm(debug=self.debug,
**self.known_indent_sm_kwargs)
results = sm.run(indented, input_offset=line_offset)
return context, next_state, results
class _SearchOverride:
"""
Mix-in class to override `StateMachine` regular expression behavior.
Changes regular expression matching, from the default `re.match()`
(succeeds only if the pattern matches at the start of `self.line`) to
`re.search()` (succeeds if the pattern matches anywhere in `self.line`).
When subclassing a `StateMachine`, list this class **first** in the
inheritance list of the class definition.
"""
def match(self, pattern):
"""
Return the result of a regular expression search.
Overrides `StateMachine.match()`.
Parameter `pattern`: `re` compiled regular expression.
"""
return pattern.search(self.line)
class SearchStateMachine(_SearchOverride, StateMachine):
"""`StateMachine` which uses `re.search()` instead of `re.match()`."""
pass
class SearchStateMachineWS(_SearchOverride, StateMachineWS):
"""`StateMachineWS` which uses `re.search()` instead of `re.match()`."""
pass
class ViewList:
"""
List with extended functionality: slices of ViewList objects are child
lists, linked to their parents. Changes made to a child list also affect
the parent list. A child list is effectively a "view" (in the SQL sense)
of the parent list. Changes to parent lists, however, do *not* affect
active child lists. If a parent list is changed, any active child lists
should be recreated.
The start and end of the slice can be trimmed using the `trim_start()` and
`trim_end()` methods, without affecting the parent list. The link between
child and parent lists can be broken by calling `disconnect()` on the
child list.
Also, ViewList objects keep track of the source & offset of each item.
This information is accessible via the `source()`, `offset()`, and
`info()` methods.
"""
def __init__(self, initlist=None, source=None, items=None,
parent=None, parent_offset=None):
self.data = []
"""The actual list of data, flattened from various sources."""
self.items = []
"""A list of (source, offset) pairs, same length as `self.data`: the
source of each line and the offset of each line from the beginning of
its source."""
self.parent = parent
"""The parent list."""
self.parent_offset = parent_offset
"""Offset of this list from the beginning of the parent list."""
if isinstance(initlist, ViewList):
self.data = initlist.data[:]
self.items = initlist.items[:]
elif initlist is not None:
self.data = list(initlist)
if items:
self.items = items
else:
self.items = [(source, i) for i in range(len(initlist))]
assert len(self.data) == len(self.items), 'data mismatch'
def __str__(self):
return str(self.data)
def __repr__(self):
return '%s(%s, items=%s)' % (self.__class__.__name__,
self.data, self.items)
def __lt__(self, other): return self.data < self.__cast(other)
def __le__(self, other): return self.data <= self.__cast(other)
def __eq__(self, other): return self.data == self.__cast(other)
def __ne__(self, other): return self.data != self.__cast(other)
def __gt__(self, other): return self.data > self.__cast(other)
def __ge__(self, other): return self.data >= self.__cast(other)
def __cmp__(self, other): return cmp(self.data, self.__cast(other))
def __cast(self, other):
if isinstance(other, ViewList):
return other.data
else:
return other
def __contains__(self, item): return item in self.data
def __len__(self): return len(self.data)
# The __getitem__()/__setitem__() methods check whether the index
# is a slice first, since native list objects start supporting
# them directly in Python 2.3 (no exception is raised when
# indexing a list with a slice object; they just work).
def __getitem__(self, i):
if isinstance(i, types.SliceType):
assert i.step in (None, 1), 'cannot handle slice with stride'
return self.__class__(self.data[i.start:i.stop],
items=self.items[i.start:i.stop],
parent=self, parent_offset=i.start or 0)
else:
return self.data[i]
def __setitem__(self, i, item):
if isinstance(i, types.SliceType):
assert i.step in (None, 1), 'cannot handle slice with stride'
if not isinstance(item, ViewList):
raise TypeError('assigning non-ViewList to ViewList slice')
self.data[i.start:i.stop] = item.data
self.items[i.start:i.stop] = item.items
assert len(self.data) == len(self.items), 'data mismatch'
if self.parent:
self.parent[(i.start or 0) + self.parent_offset
: (i.stop or len(self)) + self.parent_offset] = item
else:
self.data[i] = item
if self.parent:
self.parent[i + self.parent_offset] = item
def __delitem__(self, i):
try:
del self.data[i]
del self.items[i]
if self.parent:
del self.parent[i + self.parent_offset]
except TypeError:
assert i.step is None, 'cannot handle slice with stride'
del self.data[i.start:i.stop]
del self.items[i.start:i.stop]
if self.parent:
del self.parent[(i.start or 0) + self.parent_offset
: (i.stop or len(self)) + self.parent_offset]
def __add__(self, other):
if isinstance(other, ViewList):
return self.__class__(self.data + other.data,
items=(self.items + other.items))
else:
raise TypeError('adding non-ViewList to a ViewList')
def __radd__(self, other):
if isinstance(other, ViewList):
return self.__class__(other.data + self.data,
items=(other.items + self.items))
else:
raise TypeError('adding ViewList to a non-ViewList')
def __iadd__(self, other):
if isinstance(other, ViewList):
self.data += other.data
else:
raise TypeError('argument to += must be a ViewList')
return self
def __mul__(self, n):
return self.__class__(self.data * n, items=(self.items * n))
__rmul__ = __mul__
def __imul__(self, n):
self.data *= n
self.items *= n
return self
def extend(self, other):
if not isinstance(other, ViewList):
raise TypeError('extending a ViewList with a non-ViewList')
if self.parent:
self.parent.insert(len(self.data) + self.parent_offset, other)
self.data.extend(other.data)
self.items.extend(other.items)
def append(self, item, source=None, offset=0):
if source is None:
self.extend(item)
else:
if self.parent:
self.parent.insert(len(self.data) + self.parent_offset, item,
source, offset)
self.data.append(item)
self.items.append((source, offset))
def insert(self, i, item, source=None, offset=0):
if source is None:
if not isinstance(item, ViewList):
raise TypeError('inserting non-ViewList with no source given')
self.data[i:i] = item.data
self.items[i:i] = item.items
if self.parent:
index = (len(self.data) + i) % len(self.data)
self.parent.insert(index + self.parent_offset, item)
else:
self.data.insert(i, item)
self.items.insert(i, (source, offset))
if self.parent:
index = (len(self.data) + i) % len(self.data)
self.parent.insert(index + self.parent_offset, item,
source, offset)
def pop(self, i=-1):
if self.parent:
index = (len(self.data) + i) % len(self.data)
self.parent.pop(index + self.parent_offset)
self.items.pop(i)
return self.data.pop(i)
def trim_start(self, n=1):
"""
Remove items from the start of the list, without touching the parent.
"""
if n > len(self.data):
raise IndexError("Size of trim too large; can't trim %s items "
"from a list of size %s." % (n, len(self.data)))
elif n < 0:
raise IndexError('Trim size must be >= 0.')
del self.data[:n]
del self.items[:n]
if self.parent:
self.parent_offset += n
def trim_end(self, n=1):
"""
Remove items from the end of the list, without touching the parent.
"""
if n > len(self.data):
raise IndexError("Size of trim too large; can't trim %s items "
"from a list of size %s." % (n, len(self.data)))
elif n < 0:
raise IndexError('Trim size must be >= 0.')
del self.data[-n:]
del self.items[-n:]
def remove(self, item):
index = self.index(item)
del self[index]
def count(self, item): return self.data.count(item)
def index(self, item): return self.data.index(item)
def reverse(self):
self.data.reverse()
self.items.reverse()
self.parent = None
def sort(self, *args):
tmp = zip(self.data, self.items)
tmp.sort(*args)
self.data = [entry[0] for entry in tmp]
self.items = [entry[1] for entry in tmp]
self.parent = None
def info(self, i):
"""Return source & offset for index `i`."""
try:
return self.items[i]
except IndexError:
if i == len(self.data): # Just past the end
return self.items[i - 1][0], None
else:
raise
def source(self, i):
"""Return source for index `i`."""
return self.info(i)[0]
def offset(self, i):
"""Return offset for index `i`."""
return self.info(i)[1]
def disconnect(self):
"""Break link between this list and parent list."""
self.parent = None
class StringList(ViewList):
"""A `ViewList` with string-specific methods."""
def trim_left(self, length, start=0, end=sys.maxint):
"""
Trim `length` characters off the beginning of each item, in-place,
from index `start` to `end`. No whitespace-checking is done on the
trimmed text. Does not affect slice parent.
"""
self.data[start:end] = [line[length:]
for line in self.data[start:end]]
def get_text_block(self, start, flush_left=0):
"""
Return a contiguous block of text.
If `flush_left` is true, raise `UnexpectedIndentationError` if an
indented line is encountered before the text block ends (with a blank
line).
"""
end = start
last = len(self.data)
while end < last:
line = self.data[end]
if not line.strip():
break
if flush_left and (line[0] == ' '):
source, offset = self.info(end)
raise UnexpectedIndentationError(self[start:end], source,
offset + 1)
end += 1
return self[start:end]
def get_indented(self, start=0, until_blank=0, strip_indent=1,
block_indent=None, first_indent=None):
"""
Extract and return a StringList of indented lines of text.
Collect all lines with indentation, determine the minimum indentation,
remove the minimum indentation from all indented lines (unless
`strip_indent` is false), and return them. All lines up to but not
including the first unindented line will be returned.
:Parameters:
- `start`: The index of the first line to examine.
- `until_blank`: Stop collecting at the first blank line if true.
- `strip_indent`: Strip common leading indent if true (default).
- `block_indent`: The indent of the entire block, if known.
- `first_indent`: The indent of the first line, if known.
:Return:
- a StringList of indented lines with mininum indent removed;
- the amount of the indent;
- a boolean: did the indented block finish with a blank line or EOF?
"""
indent = block_indent # start with None if unknown
end = start
if block_indent is not None and first_indent is None:
first_indent = block_indent
if first_indent is not None:
end += 1
last = len(self.data)
while end < last:
line = self.data[end]
if line and (line[0] != ' '
or (block_indent is not None
and line[:block_indent].strip())):
# Line not indented or insufficiently indented.
# Block finished properly iff the last indented line blank:
blank_finish = ((end > start)
and not self.data[end - 1].strip())
break
stripped = line.lstrip()
if not stripped: # blank line
if until_blank:
blank_finish = 1
break
elif block_indent is None:
line_indent = len(line) - len(stripped)
if indent is None:
indent = line_indent
else:
indent = min(indent, line_indent)
end += 1
else:
blank_finish = 1 # block ends at end of lines
block = self[start:end]
if first_indent is not None and block:
block.data[0] = block.data[0][first_indent:]
if indent and strip_indent:
block.trim_left(indent, start=(first_indent is not None))
return block, indent or 0, blank_finish
def get_2D_block(self, top, left, bottom, right, strip_indent=1):
block = self[top:bottom]
indent = right
for i in range(len(block.data)):
block.data[i] = line = block.data[i][left:right].rstrip()
if line:
indent = min(indent, len(line) - len(line.lstrip()))
if strip_indent and 0 < indent < right:
block.data = [line[indent:] for line in block.data]
return block
def pad_double_width(self, pad_char):
"""
Pad all double-width characters in self by appending `pad_char` to each.
For East Asian language support.
"""
if hasattr(unicodedata, 'east_asian_width'):
east_asian_width = unicodedata.east_asian_width
else:
return # new in Python 2.4
for i in range(len(self.data)):
line = self.data[i]
if isinstance(line, unicode):
new = []
for char in line:
new.append(char)
if east_asian_width(char) in 'WF': # 'W'ide & 'F'ull-width
new.append(pad_char)
self.data[i] = ''.join(new)
def replace(self, old, new):
"""Replace all occurrences of substring `old` with `new`."""
for i in range(len(self.data)):
self.data[i] = self.data[i].replace(old, new)
class StateMachineError(Exception): pass
class UnknownStateError(StateMachineError): pass
class DuplicateStateError(StateMachineError): pass
class UnknownTransitionError(StateMachineError): pass
class DuplicateTransitionError(StateMachineError): pass
class TransitionPatternNotFound(StateMachineError): pass
class TransitionMethodNotFound(StateMachineError): pass
class UnexpectedIndentationError(StateMachineError): pass
class TransitionCorrection(Exception):
"""
Raise from within a transition method to switch to another transition.
Raise with one argument, the new transition name.
"""
class StateCorrection(Exception):
"""
Raise from within a transition method to switch to another state.
Raise with one or two arguments: new state name, and an optional new
transition name.
"""
def string2lines(astring, tab_width=8, convert_whitespace=0,
whitespace=re.compile('[\v\f]')):
"""
Return a list of one-line strings with tabs expanded, no newlines, and
trailing whitespace stripped.
Each tab is expanded with between 1 and `tab_width` spaces, so that the
next character's index becomes a multiple of `tab_width` (8 by default).
Parameters:
- `astring`: a multi-line string.
- `tab_width`: the number of columns between tab stops.
- `convert_whitespace`: convert form feeds and vertical tabs to spaces?
"""
if convert_whitespace:
astring = whitespace.sub(' ', astring)
return [s.expandtabs(tab_width).rstrip() for s in astring.splitlines()]
def _exception_data():
"""
Return exception information:
- the exception's class name;
- the exception object;
- the name of the file containing the offending code;
- the line number of the offending code;
- the function name of the offending code.
"""
type, value, traceback = sys.exc_info()
while traceback.tb_next:
traceback = traceback.tb_next
code = traceback.tb_frame.f_code
return (type.__name__, value, code.co_filename, traceback.tb_lineno,
code.co_name)
| {
"repo_name": "umitproject/tease-o-matic",
"path": "docutils/statemachine.py",
"copies": "8",
"size": "55215",
"license": "bsd-3-clause",
"hash": -2549302872578249000,
"line_mean": 36.2319622387,
"line_max": 80,
"alpha_frac": 0.5871230644,
"autogenerated": false,
"ratio": 4.394349383207322,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8981472447607323,
"avg_score": null,
"num_lines": null
} |
"""
A finite state machine specialized for regular-expression-based text filters,
this module defines the following classes:
- `StateMachine`, a state machine
- `State`, a state superclass
- `StateMachineWS`, a whitespace-sensitive version of `StateMachine`
- `StateWS`, a state superclass for use with `StateMachineWS`
- `SearchStateMachine`, uses `re.search()` instead of `re.match()`
- `SearchStateMachineWS`, uses `re.search()` instead of `re.match()`
- `ViewList`, extends standard Python lists.
- `StringList`, string-specific ViewList.
Exception classes:
- `StateMachineError`
- `UnknownStateError`
- `DuplicateStateError`
- `UnknownTransitionError`
- `DuplicateTransitionError`
- `TransitionPatternNotFound`
- `TransitionMethodNotFound`
- `UnexpectedIndentationError`
- `TransitionCorrection`: Raised to switch to another transition.
- `StateCorrection`: Raised to switch to another state & transition.
Functions:
- `string2lines()`: split a multi-line string into a list of one-line strings
How To Use This Module
======================
(See the individual classes, methods, and attributes for details.)
1. Import it: ``import statemachine`` or ``from statemachine import ...``.
You will also need to ``import re``.
2. Derive a subclass of `State` (or `StateWS`) for each state in your state
machine::
class MyState(statemachine.State):
Within the state's class definition:
a) Include a pattern for each transition, in `State.patterns`::
patterns = {'atransition': r'pattern', ...}
b) Include a list of initial transitions to be set up automatically, in
`State.initial_transitions`::
initial_transitions = ['atransition', ...]
c) Define a method for each transition, with the same name as the
transition pattern::
def atransition(self, match, context, next_state):
# do something
result = [...] # a list
return context, next_state, result
# context, next_state may be altered
Transition methods may raise an `EOFError` to cut processing short.
d) You may wish to override the `State.bof()` and/or `State.eof()` implicit
transition methods, which handle the beginning- and end-of-file.
e) In order to handle nested processing, you may wish to override the
attributes `State.nested_sm` and/or `State.nested_sm_kwargs`.
If you are using `StateWS` as a base class, in order to handle nested
indented blocks, you may wish to:
- override the attributes `StateWS.indent_sm`,
`StateWS.indent_sm_kwargs`, `StateWS.known_indent_sm`, and/or
`StateWS.known_indent_sm_kwargs`;
- override the `StateWS.blank()` method; and/or
- override or extend the `StateWS.indent()`, `StateWS.known_indent()`,
and/or `StateWS.firstknown_indent()` methods.
3. Create a state machine object::
sm = StateMachine(state_classes=[MyState, ...],
initial_state='MyState')
4. Obtain the input text, which needs to be converted into a tab-free list of
one-line strings. For example, to read text from a file called
'inputfile'::
input_string = open('inputfile').read()
input_lines = statemachine.string2lines(input_string)
5. Run the state machine on the input text and collect the results, a list::
results = sm.run(input_lines)
6. Remove any lingering circular references::
sm.unlink()
"""
__docformat__ = 'restructuredtext'
import sys
import re
import types
import unicodedata
class StateMachine:
"""
A finite state machine for text filters using regular expressions.
The input is provided in the form of a list of one-line strings (no
newlines). States are subclasses of the `State` class. Transitions consist
of regular expression patterns and transition methods, and are defined in
each state.
The state machine is started with the `run()` method, which returns the
results of processing in a list.
"""
def __init__(self, state_classes, initial_state, debug=0):
"""
Initialize a `StateMachine` object; add state objects.
Parameters:
- `state_classes`: a list of `State` (sub)classes.
- `initial_state`: a string, the class name of the initial state.
- `debug`: a boolean; produce verbose output if true (nonzero).
"""
self.input_lines = None
"""`StringList` of input lines (without newlines).
Filled by `self.run()`."""
self.input_offset = 0
"""Offset of `self.input_lines` from the beginning of the file."""
self.line = None
"""Current input line."""
self.line_offset = -1
"""Current input line offset from beginning of `self.input_lines`."""
self.debug = debug
"""Debugging mode on/off."""
self.initial_state = initial_state
"""The name of the initial state (key to `self.states`)."""
self.current_state = initial_state
"""The name of the current state (key to `self.states`)."""
self.states = {}
"""Mapping of {state_name: State_object}."""
self.add_states(state_classes)
self.observers = []
"""List of bound methods or functions to call whenever the current
line changes. Observers are called with one argument, ``self``.
Cleared at the end of `run()`."""
def unlink(self):
"""Remove circular references to objects no longer required."""
for state in self.states.values():
state.unlink()
self.states = None
def run(self, input_lines, input_offset=0, context=None,
input_source=None, initial_state=None):
"""
Run the state machine on `input_lines`. Return results (a list).
Reset `self.line_offset` and `self.current_state`. Run the
beginning-of-file transition. Input one line at a time and check for a
matching transition. If a match is found, call the transition method
and possibly change the state. Store the context returned by the
transition method to be passed on to the next transition matched.
Accumulate the results returned by the transition methods in a list.
Run the end-of-file transition. Finally, return the accumulated
results.
Parameters:
- `input_lines`: a list of strings without newlines, or `StringList`.
- `input_offset`: the line offset of `input_lines` from the beginning
of the file.
- `context`: application-specific storage.
- `input_source`: name or path of source of `input_lines`.
- `initial_state`: name of initial state.
"""
self.runtime_init()
if isinstance(input_lines, StringList):
self.input_lines = input_lines
else:
self.input_lines = StringList(input_lines, source=input_source)
self.input_offset = input_offset
self.line_offset = -1
self.current_state = initial_state or self.initial_state
if self.debug:
print >>sys.stderr, (
'\nStateMachine.run: input_lines (line_offset=%s):\n| %s'
% (self.line_offset, '\n| '.join(self.input_lines)))
transitions = None
results = []
state = self.get_state()
try:
if self.debug:
print >>sys.stderr, ('\nStateMachine.run: bof transition')
context, result = state.bof(context)
results.extend(result)
while 1:
try:
try:
self.next_line()
if self.debug:
source, offset = self.input_lines.info(
self.line_offset)
print >>sys.stderr, (
'\nStateMachine.run: line (source=%r, '
'offset=%r):\n| %s'
% (source, offset, self.line))
context, next_state, result = self.check_line(
context, state, transitions)
except EOFError:
if self.debug:
print >>sys.stderr, (
'\nStateMachine.run: %s.eof transition'
% state.__class__.__name__)
result = state.eof(context)
results.extend(result)
break
else:
results.extend(result)
except TransitionCorrection, exception:
self.previous_line() # back up for another try
transitions = (exception.args[0],)
if self.debug:
print >>sys.stderr, (
'\nStateMachine.run: TransitionCorrection to '
'state "%s", transition %s.'
% (state.__class__.__name__, transitions[0]))
continue
except StateCorrection, exception:
self.previous_line() # back up for another try
next_state = exception.args[0]
if len(exception.args) == 1:
transitions = None
else:
transitions = (exception.args[1],)
if self.debug:
print >>sys.stderr, (
'\nStateMachine.run: StateCorrection to state '
'"%s", transition %s.'
% (next_state, transitions[0]))
else:
transitions = None
state = self.get_state(next_state)
except:
if self.debug:
self.error()
raise
self.observers = []
return results
def get_state(self, next_state=None):
"""
Return current state object; set it first if `next_state` given.
Parameter `next_state`: a string, the name of the next state.
Exception: `UnknownStateError` raised if `next_state` unknown.
"""
if next_state:
if self.debug and next_state != self.current_state:
print >>sys.stderr, \
('\nStateMachine.get_state: Changing state from '
'"%s" to "%s" (input line %s).'
% (self.current_state, next_state,
self.abs_line_number()))
self.current_state = next_state
try:
return self.states[self.current_state]
except KeyError:
raise UnknownStateError(self.current_state)
def next_line(self, n=1):
"""Load `self.line` with the `n`'th next line and return it."""
try:
try:
self.line_offset += n
self.line = self.input_lines[self.line_offset]
except IndexError:
self.line = None
raise EOFError
return self.line
finally:
self.notify_observers()
def is_next_line_blank(self):
"""Return 1 if the next line is blank or non-existant."""
try:
return not self.input_lines[self.line_offset + 1].strip()
except IndexError:
return 1
def at_eof(self):
"""Return 1 if the input is at or past end-of-file."""
return self.line_offset >= len(self.input_lines) - 1
def at_bof(self):
"""Return 1 if the input is at or before beginning-of-file."""
return self.line_offset <= 0
def previous_line(self, n=1):
"""Load `self.line` with the `n`'th previous line and return it."""
self.line_offset -= n
if self.line_offset < 0:
self.line = None
else:
self.line = self.input_lines[self.line_offset]
self.notify_observers()
return self.line
def goto_line(self, line_offset):
"""Jump to absolute line offset `line_offset`, load and return it."""
try:
try:
self.line_offset = line_offset - self.input_offset
self.line = self.input_lines[self.line_offset]
except IndexError:
self.line = None
raise EOFError
return self.line
finally:
self.notify_observers()
def get_source(self, line_offset):
"""Return source of line at absolute line offset `line_offset`."""
return self.input_lines.source(line_offset - self.input_offset)
def abs_line_offset(self):
"""Return line offset of current line, from beginning of file."""
return self.line_offset + self.input_offset
def abs_line_number(self):
"""Return line number of current line (counting from 1)."""
return self.line_offset + self.input_offset + 1
def insert_input(self, input_lines, source):
self.input_lines.insert(self.line_offset + 1, '',
source='internal padding')
self.input_lines.insert(self.line_offset + 1, '',
source='internal padding')
self.input_lines.insert(self.line_offset + 2,
StringList(input_lines, source))
def get_text_block(self, flush_left=0):
"""
Return a contiguous block of text.
If `flush_left` is true, raise `UnexpectedIndentationError` if an
indented line is encountered before the text block ends (with a blank
line).
"""
try:
block = self.input_lines.get_text_block(self.line_offset,
flush_left)
self.next_line(len(block) - 1)
return block
except UnexpectedIndentationError, error:
block, source, lineno = error.args
self.next_line(len(block) - 1) # advance to last line of block
raise
def check_line(self, context, state, transitions=None):
"""
Examine one line of input for a transition match & execute its method.
Parameters:
- `context`: application-dependent storage.
- `state`: a `State` object, the current state.
- `transitions`: an optional ordered list of transition names to try,
instead of ``state.transition_order``.
Return the values returned by the transition method:
- context: possibly modified from the parameter `context`;
- next state name (`State` subclass name);
- the result output of the transition, a list.
When there is no match, ``state.no_match()`` is called and its return
value is returned.
"""
if transitions is None:
transitions = state.transition_order
state_correction = None
if self.debug:
print >>sys.stderr, (
'\nStateMachine.check_line: state="%s", transitions=%r.'
% (state.__class__.__name__, transitions))
for name in transitions:
pattern, method, next_state = state.transitions[name]
match = pattern.match(self.line)
if match:
if self.debug:
print >>sys.stderr, (
'\nStateMachine.check_line: Matched transition '
'"%s" in state "%s".'
% (name, state.__class__.__name__))
return method(match, context, next_state)
else:
if self.debug:
print >>sys.stderr, (
'\nStateMachine.check_line: No match in state "%s".'
% state.__class__.__name__)
return state.no_match(context, transitions)
def add_state(self, state_class):
"""
Initialize & add a `state_class` (`State` subclass) object.
Exception: `DuplicateStateError` raised if `state_class` was already
added.
"""
statename = state_class.__name__
if statename in self.states:
raise DuplicateStateError(statename)
self.states[statename] = state_class(self, self.debug)
def add_states(self, state_classes):
"""
Add `state_classes` (a list of `State` subclasses).
"""
for state_class in state_classes:
self.add_state(state_class)
def runtime_init(self):
"""
Initialize `self.states`.
"""
for state in self.states.values():
state.runtime_init()
def error(self):
"""Report error details."""
type, value, module, line, function = _exception_data()
print >>sys.stderr, '%s: %s' % (type, value)
print >>sys.stderr, 'input line %s' % (self.abs_line_number())
print >>sys.stderr, ('module %s, line %s, function %s'
% (module, line, function))
def attach_observer(self, observer):
"""
The `observer` parameter is a function or bound method which takes two
arguments, the source and offset of the current line.
"""
self.observers.append(observer)
def detach_observer(self, observer):
self.observers.remove(observer)
def notify_observers(self):
for observer in self.observers:
try:
info = self.input_lines.info(self.line_offset)
except IndexError:
info = (None, None)
observer(*info)
class State:
"""
State superclass. Contains a list of transitions, and transition methods.
Transition methods all have the same signature. They take 3 parameters:
- An `re` match object. ``match.string`` contains the matched input line,
``match.start()`` gives the start index of the match, and
``match.end()`` gives the end index.
- A context object, whose meaning is application-defined (initial value
``None``). It can be used to store any information required by the state
machine, and the retured context is passed on to the next transition
method unchanged.
- The name of the next state, a string, taken from the transitions list;
normally it is returned unchanged, but it may be altered by the
transition method if necessary.
Transition methods all return a 3-tuple:
- A context object, as (potentially) modified by the transition method.
- The next state name (a return value of ``None`` means no state change).
- The processing result, a list, which is accumulated by the state
machine.
Transition methods may raise an `EOFError` to cut processing short.
There are two implicit transitions, and corresponding transition methods
are defined: `bof()` handles the beginning-of-file, and `eof()` handles
the end-of-file. These methods have non-standard signatures and return
values. `bof()` returns the initial context and results, and may be used
to return a header string, or do any other processing needed. `eof()`
should handle any remaining context and wrap things up; it returns the
final processing result.
Typical applications need only subclass `State` (or a subclass), set the
`patterns` and `initial_transitions` class attributes, and provide
corresponding transition methods. The default object initialization will
take care of constructing the list of transitions.
"""
patterns = None
"""
{Name: pattern} mapping, used by `make_transition()`. Each pattern may
be a string or a compiled `re` pattern. Override in subclasses.
"""
initial_transitions = None
"""
A list of transitions to initialize when a `State` is instantiated.
Each entry is either a transition name string, or a (transition name, next
state name) pair. See `make_transitions()`. Override in subclasses.
"""
nested_sm = None
"""
The `StateMachine` class for handling nested processing.
If left as ``None``, `nested_sm` defaults to the class of the state's
controlling state machine. Override it in subclasses to avoid the default.
"""
nested_sm_kwargs = None
"""
Keyword arguments dictionary, passed to the `nested_sm` constructor.
Two keys must have entries in the dictionary:
- Key 'state_classes' must be set to a list of `State` classes.
- Key 'initial_state' must be set to the name of the initial state class.
If `nested_sm_kwargs` is left as ``None``, 'state_classes' defaults to the
class of the current state, and 'initial_state' defaults to the name of
the class of the current state. Override in subclasses to avoid the
defaults.
"""
def __init__(self, state_machine, debug=0):
"""
Initialize a `State` object; make & add initial transitions.
Parameters:
- `statemachine`: the controlling `StateMachine` object.
- `debug`: a boolean; produce verbose output if true (nonzero).
"""
self.transition_order = []
"""A list of transition names in search order."""
self.transitions = {}
"""
A mapping of transition names to 3-tuples containing
(compiled_pattern, transition_method, next_state_name). Initialized as
an instance attribute dynamically (instead of as a class attribute)
because it may make forward references to patterns and methods in this
or other classes.
"""
self.add_initial_transitions()
self.state_machine = state_machine
"""A reference to the controlling `StateMachine` object."""
self.debug = debug
"""Debugging mode on/off."""
if self.nested_sm is None:
self.nested_sm = self.state_machine.__class__
if self.nested_sm_kwargs is None:
self.nested_sm_kwargs = {'state_classes': [self.__class__],
'initial_state': self.__class__.__name__}
def runtime_init(self):
"""
Initialize this `State` before running the state machine; called from
`self.state_machine.run()`.
"""
pass
def unlink(self):
"""Remove circular references to objects no longer required."""
self.state_machine = None
def add_initial_transitions(self):
"""Make and add transitions listed in `self.initial_transitions`."""
if self.initial_transitions:
names, transitions = self.make_transitions(
self.initial_transitions)
self.add_transitions(names, transitions)
def add_transitions(self, names, transitions):
"""
Add a list of transitions to the start of the transition list.
Parameters:
- `names`: a list of transition names.
- `transitions`: a mapping of names to transition tuples.
Exceptions: `DuplicateTransitionError`, `UnknownTransitionError`.
"""
for name in names:
if name in self.transitions:
raise DuplicateTransitionError(name)
if name not in transitions:
raise UnknownTransitionError(name)
self.transition_order[:0] = names
self.transitions.update(transitions)
def add_transition(self, name, transition):
"""
Add a transition to the start of the transition list.
Parameter `transition`: a ready-made transition 3-tuple.
Exception: `DuplicateTransitionError`.
"""
if name in self.transitions:
raise DuplicateTransitionError(name)
self.transition_order[:0] = [name]
self.transitions[name] = transition
def remove_transition(self, name):
"""
Remove a transition by `name`.
Exception: `UnknownTransitionError`.
"""
try:
del self.transitions[name]
self.transition_order.remove(name)
except:
raise UnknownTransitionError(name)
def make_transition(self, name, next_state=None):
"""
Make & return a transition tuple based on `name`.
This is a convenience function to simplify transition creation.
Parameters:
- `name`: a string, the name of the transition pattern & method. This
`State` object must have a method called '`name`', and a dictionary
`self.patterns` containing a key '`name`'.
- `next_state`: a string, the name of the next `State` object for this
transition. A value of ``None`` (or absent) implies no state change
(i.e., continue with the same state).
Exceptions: `TransitionPatternNotFound`, `TransitionMethodNotFound`.
"""
if next_state is None:
next_state = self.__class__.__name__
try:
pattern = self.patterns[name]
if not hasattr(pattern, 'match'):
pattern = re.compile(pattern)
except KeyError:
raise TransitionPatternNotFound(
'%s.patterns[%r]' % (self.__class__.__name__, name))
try:
method = getattr(self, name)
except AttributeError:
raise TransitionMethodNotFound(
'%s.%s' % (self.__class__.__name__, name))
return (pattern, method, next_state)
def make_transitions(self, name_list):
"""
Return a list of transition names and a transition mapping.
Parameter `name_list`: a list, where each entry is either a transition
name string, or a 1- or 2-tuple (transition name, optional next state
name).
"""
stringtype = type('')
names = []
transitions = {}
for namestate in name_list:
if type(namestate) is stringtype:
transitions[namestate] = self.make_transition(namestate)
names.append(namestate)
else:
transitions[namestate[0]] = self.make_transition(*namestate)
names.append(namestate[0])
return names, transitions
def no_match(self, context, transitions):
"""
Called when there is no match from `StateMachine.check_line()`.
Return the same values returned by transition methods:
- context: unchanged;
- next state name: ``None``;
- empty result list.
Override in subclasses to catch this event.
"""
return context, None, []
def bof(self, context):
"""
Handle beginning-of-file. Return unchanged `context`, empty result.
Override in subclasses.
Parameter `context`: application-defined storage.
"""
return context, []
def eof(self, context):
"""
Handle end-of-file. Return empty result.
Override in subclasses.
Parameter `context`: application-defined storage.
"""
return []
def nop(self, match, context, next_state):
"""
A "do nothing" transition method.
Return unchanged `context` & `next_state`, empty result. Useful for
simple state changes (actionless transitions).
"""
return context, next_state, []
class StateMachineWS(StateMachine):
"""
`StateMachine` subclass specialized for whitespace recognition.
There are three methods provided for extracting indented text blocks:
- `get_indented()`: use when the indent is unknown.
- `get_known_indented()`: use when the indent is known for all lines.
- `get_first_known_indented()`: use when only the first line's indent is
known.
"""
def get_indented(self, until_blank=0, strip_indent=1):
"""
Return a block of indented lines of text, and info.
Extract an indented block where the indent is unknown for all lines.
:Parameters:
- `until_blank`: Stop collecting at the first blank line if true
(1).
- `strip_indent`: Strip common leading indent if true (1,
default).
:Return:
- the indented block (a list of lines of text),
- its indent,
- its first line offset from BOF, and
- whether or not it finished with a blank line.
"""
offset = self.abs_line_offset()
indented, indent, blank_finish = self.input_lines.get_indented(
self.line_offset, until_blank, strip_indent)
if indented:
self.next_line(len(indented) - 1) # advance to last indented line
while indented and not indented[0].strip():
indented.trim_start()
offset += 1
return indented, indent, offset, blank_finish
def get_known_indented(self, indent, until_blank=0, strip_indent=1):
"""
Return an indented block and info.
Extract an indented block where the indent is known for all lines.
Starting with the current line, extract the entire text block with at
least `indent` indentation (which must be whitespace, except for the
first line).
:Parameters:
- `indent`: The number of indent columns/characters.
- `until_blank`: Stop collecting at the first blank line if true
(1).
- `strip_indent`: Strip `indent` characters of indentation if true
(1, default).
:Return:
- the indented block,
- its first line offset from BOF, and
- whether or not it finished with a blank line.
"""
offset = self.abs_line_offset()
indented, indent, blank_finish = self.input_lines.get_indented(
self.line_offset, until_blank, strip_indent,
block_indent=indent)
self.next_line(len(indented) - 1) # advance to last indented line
while indented and not indented[0].strip():
indented.trim_start()
offset += 1
return indented, offset, blank_finish
def get_first_known_indented(self, indent, until_blank=0, strip_indent=1,
strip_top=1):
"""
Return an indented block and info.
Extract an indented block where the indent is known for the first line
and unknown for all other lines.
:Parameters:
- `indent`: The first line's indent (# of columns/characters).
- `until_blank`: Stop collecting at the first blank line if true
(1).
- `strip_indent`: Strip `indent` characters of indentation if true
(1, default).
- `strip_top`: Strip blank lines from the beginning of the block.
:Return:
- the indented block,
- its indent,
- its first line offset from BOF, and
- whether or not it finished with a blank line.
"""
offset = self.abs_line_offset()
indented, indent, blank_finish = self.input_lines.get_indented(
self.line_offset, until_blank, strip_indent,
first_indent=indent)
self.next_line(len(indented) - 1) # advance to last indented line
if strip_top:
while indented and not indented[0].strip():
indented.trim_start()
offset += 1
return indented, indent, offset, blank_finish
class StateWS(State):
"""
State superclass specialized for whitespace (blank lines & indents).
Use this class with `StateMachineWS`. The transitions 'blank' (for blank
lines) and 'indent' (for indented text blocks) are added automatically,
before any other transitions. The transition method `blank()` handles
blank lines and `indent()` handles nested indented blocks. Indented
blocks trigger a new state machine to be created by `indent()` and run.
The class of the state machine to be created is in `indent_sm`, and the
constructor keyword arguments are in the dictionary `indent_sm_kwargs`.
The methods `known_indent()` and `firstknown_indent()` are provided for
indented blocks where the indent (all lines' and first line's only,
respectively) is known to the transition method, along with the attributes
`known_indent_sm` and `known_indent_sm_kwargs`. Neither transition method
is triggered automatically.
"""
indent_sm = None
"""
The `StateMachine` class handling indented text blocks.
If left as ``None``, `indent_sm` defaults to the value of
`State.nested_sm`. Override it in subclasses to avoid the default.
"""
indent_sm_kwargs = None
"""
Keyword arguments dictionary, passed to the `indent_sm` constructor.
If left as ``None``, `indent_sm_kwargs` defaults to the value of
`State.nested_sm_kwargs`. Override it in subclasses to avoid the default.
"""
known_indent_sm = None
"""
The `StateMachine` class handling known-indented text blocks.
If left as ``None``, `known_indent_sm` defaults to the value of
`indent_sm`. Override it in subclasses to avoid the default.
"""
known_indent_sm_kwargs = None
"""
Keyword arguments dictionary, passed to the `known_indent_sm` constructor.
If left as ``None``, `known_indent_sm_kwargs` defaults to the value of
`indent_sm_kwargs`. Override it in subclasses to avoid the default.
"""
ws_patterns = {'blank': ' *$',
'indent': ' +'}
"""Patterns for default whitespace transitions. May be overridden in
subclasses."""
ws_initial_transitions = ('blank', 'indent')
"""Default initial whitespace transitions, added before those listed in
`State.initial_transitions`. May be overridden in subclasses."""
def __init__(self, state_machine, debug=0):
"""
Initialize a `StateSM` object; extends `State.__init__()`.
Check for indent state machine attributes, set defaults if not set.
"""
State.__init__(self, state_machine, debug)
if self.indent_sm is None:
self.indent_sm = self.nested_sm
if self.indent_sm_kwargs is None:
self.indent_sm_kwargs = self.nested_sm_kwargs
if self.known_indent_sm is None:
self.known_indent_sm = self.indent_sm
if self.known_indent_sm_kwargs is None:
self.known_indent_sm_kwargs = self.indent_sm_kwargs
def add_initial_transitions(self):
"""
Add whitespace-specific transitions before those defined in subclass.
Extends `State.add_initial_transitions()`.
"""
State.add_initial_transitions(self)
if self.patterns is None:
self.patterns = {}
self.patterns.update(self.ws_patterns)
names, transitions = self.make_transitions(
self.ws_initial_transitions)
self.add_transitions(names, transitions)
def blank(self, match, context, next_state):
"""Handle blank lines. Does nothing. Override in subclasses."""
return self.nop(match, context, next_state)
def indent(self, match, context, next_state):
"""
Handle an indented text block. Extend or override in subclasses.
Recursively run the registered state machine for indented blocks
(`self.indent_sm`).
"""
indented, indent, line_offset, blank_finish = \
self.state_machine.get_indented()
sm = self.indent_sm(debug=self.debug, **self.indent_sm_kwargs)
results = sm.run(indented, input_offset=line_offset)
return context, next_state, results
def known_indent(self, match, context, next_state):
"""
Handle a known-indent text block. Extend or override in subclasses.
Recursively run the registered state machine for known-indent indented
blocks (`self.known_indent_sm`). The indent is the length of the
match, ``match.end()``.
"""
indented, line_offset, blank_finish = \
self.state_machine.get_known_indented(match.end())
sm = self.known_indent_sm(debug=self.debug,
**self.known_indent_sm_kwargs)
results = sm.run(indented, input_offset=line_offset)
return context, next_state, results
def first_known_indent(self, match, context, next_state):
"""
Handle an indented text block (first line's indent known).
Extend or override in subclasses.
Recursively run the registered state machine for known-indent indented
blocks (`self.known_indent_sm`). The indent is the length of the
match, ``match.end()``.
"""
indented, line_offset, blank_finish = \
self.state_machine.get_first_known_indented(match.end())
sm = self.known_indent_sm(debug=self.debug,
**self.known_indent_sm_kwargs)
results = sm.run(indented, input_offset=line_offset)
return context, next_state, results
class _SearchOverride:
"""
Mix-in class to override `StateMachine` regular expression behavior.
Changes regular expression matching, from the default `re.match()`
(succeeds only if the pattern matches at the start of `self.line`) to
`re.search()` (succeeds if the pattern matches anywhere in `self.line`).
When subclassing a `StateMachine`, list this class **first** in the
inheritance list of the class definition.
"""
def match(self, pattern):
"""
Return the result of a regular expression search.
Overrides `StateMachine.match()`.
Parameter `pattern`: `re` compiled regular expression.
"""
return pattern.search(self.line)
class SearchStateMachine(_SearchOverride, StateMachine):
"""`StateMachine` which uses `re.search()` instead of `re.match()`."""
pass
class SearchStateMachineWS(_SearchOverride, StateMachineWS):
"""`StateMachineWS` which uses `re.search()` instead of `re.match()`."""
pass
class ViewList:
"""
List with extended functionality: slices of ViewList objects are child
lists, linked to their parents. Changes made to a child list also affect
the parent list. A child list is effectively a "view" (in the SQL sense)
of the parent list. Changes to parent lists, however, do *not* affect
active child lists. If a parent list is changed, any active child lists
should be recreated.
The start and end of the slice can be trimmed using the `trim_start()` and
`trim_end()` methods, without affecting the parent list. The link between
child and parent lists can be broken by calling `disconnect()` on the
child list.
Also, ViewList objects keep track of the source & offset of each item.
This information is accessible via the `source()`, `offset()`, and
`info()` methods.
"""
def __init__(self, initlist=None, source=None, items=None,
parent=None, parent_offset=None):
self.data = []
"""The actual list of data, flattened from various sources."""
self.items = []
"""A list of (source, offset) pairs, same length as `self.data`: the
source of each line and the offset of each line from the beginning of
its source."""
self.parent = parent
"""The parent list."""
self.parent_offset = parent_offset
"""Offset of this list from the beginning of the parent list."""
if isinstance(initlist, ViewList):
self.data = initlist.data[:]
self.items = initlist.items[:]
elif initlist is not None:
self.data = list(initlist)
if items:
self.items = items
else:
self.items = [(source, i) for i in range(len(initlist))]
assert len(self.data) == len(self.items), 'data mismatch'
def __str__(self):
return str(self.data)
def __repr__(self):
return '%s(%s, items=%s)' % (self.__class__.__name__,
self.data, self.items)
def __lt__(self, other): return self.data < self.__cast(other)
def __le__(self, other): return self.data <= self.__cast(other)
def __eq__(self, other): return self.data == self.__cast(other)
def __ne__(self, other): return self.data != self.__cast(other)
def __gt__(self, other): return self.data > self.__cast(other)
def __ge__(self, other): return self.data >= self.__cast(other)
def __cmp__(self, other): return cmp(self.data, self.__cast(other))
def __cast(self, other):
if isinstance(other, ViewList):
return other.data
else:
return other
def __contains__(self, item): return item in self.data
def __len__(self): return len(self.data)
# The __getitem__()/__setitem__() methods check whether the index
# is a slice first, since native list objects start supporting
# them directly in Python 2.3 (no exception is raised when
# indexing a list with a slice object; they just work).
def __getitem__(self, i):
if isinstance(i, types.SliceType):
assert i.step in (None, 1), 'cannot handle slice with stride'
return self.__class__(self.data[i.start:i.stop],
items=self.items[i.start:i.stop],
parent=self, parent_offset=i.start or 0)
else:
return self.data[i]
def __setitem__(self, i, item):
if isinstance(i, types.SliceType):
assert i.step in (None, 1), 'cannot handle slice with stride'
if not isinstance(item, ViewList):
raise TypeError('assigning non-ViewList to ViewList slice')
self.data[i.start:i.stop] = item.data
self.items[i.start:i.stop] = item.items
assert len(self.data) == len(self.items), 'data mismatch'
if self.parent:
self.parent[(i.start or 0) + self.parent_offset
: (i.stop or len(self)) + self.parent_offset] = item
else:
self.data[i] = item
if self.parent:
self.parent[i + self.parent_offset] = item
def __delitem__(self, i):
try:
del self.data[i]
del self.items[i]
if self.parent:
del self.parent[i + self.parent_offset]
except TypeError:
assert i.step is None, 'cannot handle slice with stride'
del self.data[i.start:i.stop]
del self.items[i.start:i.stop]
if self.parent:
del self.parent[(i.start or 0) + self.parent_offset
: (i.stop or len(self)) + self.parent_offset]
def __add__(self, other):
if isinstance(other, ViewList):
return self.__class__(self.data + other.data,
items=(self.items + other.items))
else:
raise TypeError('adding non-ViewList to a ViewList')
def __radd__(self, other):
if isinstance(other, ViewList):
return self.__class__(other.data + self.data,
items=(other.items + self.items))
else:
raise TypeError('adding ViewList to a non-ViewList')
def __iadd__(self, other):
if isinstance(other, ViewList):
self.data += other.data
else:
raise TypeError('argument to += must be a ViewList')
return self
def __mul__(self, n):
return self.__class__(self.data * n, items=(self.items * n))
__rmul__ = __mul__
def __imul__(self, n):
self.data *= n
self.items *= n
return self
def extend(self, other):
if not isinstance(other, ViewList):
raise TypeError('extending a ViewList with a non-ViewList')
if self.parent:
self.parent.insert(len(self.data) + self.parent_offset, other)
self.data.extend(other.data)
self.items.extend(other.items)
def append(self, item, source=None, offset=0):
if source is None:
self.extend(item)
else:
if self.parent:
self.parent.insert(len(self.data) + self.parent_offset, item,
source, offset)
self.data.append(item)
self.items.append((source, offset))
def insert(self, i, item, source=None, offset=0):
if source is None:
if not isinstance(item, ViewList):
raise TypeError('inserting non-ViewList with no source given')
self.data[i:i] = item.data
self.items[i:i] = item.items
if self.parent:
index = (len(self.data) + i) % len(self.data)
self.parent.insert(index + self.parent_offset, item)
else:
self.data.insert(i, item)
self.items.insert(i, (source, offset))
if self.parent:
index = (len(self.data) + i) % len(self.data)
self.parent.insert(index + self.parent_offset, item,
source, offset)
def pop(self, i=-1):
if self.parent:
index = (len(self.data) + i) % len(self.data)
self.parent.pop(index + self.parent_offset)
self.items.pop(i)
return self.data.pop(i)
def trim_start(self, n=1):
"""
Remove items from the start of the list, without touching the parent.
"""
if n > len(self.data):
raise IndexError("Size of trim too large; can't trim %s items "
"from a list of size %s." % (n, len(self.data)))
elif n < 0:
raise IndexError('Trim size must be >= 0.')
del self.data[:n]
del self.items[:n]
if self.parent:
self.parent_offset += n
def trim_end(self, n=1):
"""
Remove items from the end of the list, without touching the parent.
"""
if n > len(self.data):
raise IndexError("Size of trim too large; can't trim %s items "
"from a list of size %s." % (n, len(self.data)))
elif n < 0:
raise IndexError('Trim size must be >= 0.')
del self.data[-n:]
del self.items[-n:]
def remove(self, item):
index = self.index(item)
del self[index]
def count(self, item): return self.data.count(item)
def index(self, item): return self.data.index(item)
def reverse(self):
self.data.reverse()
self.items.reverse()
self.parent = None
def sort(self, *args):
tmp = zip(self.data, self.items)
tmp.sort(*args)
self.data = [entry[0] for entry in tmp]
self.items = [entry[1] for entry in tmp]
self.parent = None
def info(self, i):
"""Return source & offset for index `i`."""
try:
return self.items[i]
except IndexError:
if i == len(self.data): # Just past the end
return self.items[i - 1][0], None
else:
raise
def source(self, i):
"""Return source for index `i`."""
return self.info(i)[0]
def offset(self, i):
"""Return offset for index `i`."""
return self.info(i)[1]
def disconnect(self):
"""Break link between this list and parent list."""
self.parent = None
class StringList(ViewList):
"""A `ViewList` with string-specific methods."""
def trim_left(self, length, start=0, end=sys.maxint):
"""
Trim `length` characters off the beginning of each item, in-place,
from index `start` to `end`. No whitespace-checking is done on the
trimmed text. Does not affect slice parent.
"""
self.data[start:end] = [line[length:]
for line in self.data[start:end]]
def get_text_block(self, start, flush_left=0):
"""
Return a contiguous block of text.
If `flush_left` is true, raise `UnexpectedIndentationError` if an
indented line is encountered before the text block ends (with a blank
line).
"""
end = start
last = len(self.data)
while end < last:
line = self.data[end]
if not line.strip():
break
if flush_left and (line[0] == ' '):
source, offset = self.info(end)
raise UnexpectedIndentationError(self[start:end], source,
offset + 1)
end += 1
return self[start:end]
def get_indented(self, start=0, until_blank=0, strip_indent=1,
block_indent=None, first_indent=None):
"""
Extract and return a StringList of indented lines of text.
Collect all lines with indentation, determine the minimum indentation,
remove the minimum indentation from all indented lines (unless
`strip_indent` is false), and return them. All lines up to but not
including the first unindented line will be returned.
:Parameters:
- `start`: The index of the first line to examine.
- `until_blank`: Stop collecting at the first blank line if true.
- `strip_indent`: Strip common leading indent if true (default).
- `block_indent`: The indent of the entire block, if known.
- `first_indent`: The indent of the first line, if known.
:Return:
- a StringList of indented lines with mininum indent removed;
- the amount of the indent;
- a boolean: did the indented block finish with a blank line or EOF?
"""
indent = block_indent # start with None if unknown
end = start
if block_indent is not None and first_indent is None:
first_indent = block_indent
if first_indent is not None:
end += 1
last = len(self.data)
while end < last:
line = self.data[end]
if line and (line[0] != ' '
or (block_indent is not None
and line[:block_indent].strip())):
# Line not indented or insufficiently indented.
# Block finished properly iff the last indented line blank:
blank_finish = ((end > start)
and not self.data[end - 1].strip())
break
stripped = line.lstrip()
if not stripped: # blank line
if until_blank:
blank_finish = 1
break
elif block_indent is None:
line_indent = len(line) - len(stripped)
if indent is None:
indent = line_indent
else:
indent = min(indent, line_indent)
end += 1
else:
blank_finish = 1 # block ends at end of lines
block = self[start:end]
if first_indent is not None and block:
block.data[0] = block.data[0][first_indent:]
if indent and strip_indent:
block.trim_left(indent, start=(first_indent is not None))
return block, indent or 0, blank_finish
def get_2D_block(self, top, left, bottom, right, strip_indent=1):
block = self[top:bottom]
indent = right
for i in range(len(block.data)):
block.data[i] = line = block.data[i][left:right].rstrip()
if line:
indent = min(indent, len(line) - len(line.lstrip()))
if strip_indent and 0 < indent < right:
block.data = [line[indent:] for line in block.data]
return block
def pad_double_width(self, pad_char):
"""
Pad all double-width characters in self by appending `pad_char` to each.
For East Asian language support.
"""
if hasattr(unicodedata, 'east_asian_width'):
east_asian_width = unicodedata.east_asian_width
else:
return # new in Python 2.4
for i in range(len(self.data)):
line = self.data[i]
if isinstance(line, unicode):
new = []
for char in line:
new.append(char)
if east_asian_width(char) in 'WF': # 'W'ide & 'F'ull-width
new.append(pad_char)
self.data[i] = ''.join(new)
def replace(self, old, new):
"""Replace all occurrences of substring `old` with `new`."""
for i in range(len(self.data)):
self.data[i] = self.data[i].replace(old, new)
class StateMachineError(Exception): pass
class UnknownStateError(StateMachineError): pass
class DuplicateStateError(StateMachineError): pass
class UnknownTransitionError(StateMachineError): pass
class DuplicateTransitionError(StateMachineError): pass
class TransitionPatternNotFound(StateMachineError): pass
class TransitionMethodNotFound(StateMachineError): pass
class UnexpectedIndentationError(StateMachineError): pass
class TransitionCorrection(Exception):
"""
Raise from within a transition method to switch to another transition.
Raise with one argument, the new transition name.
"""
class StateCorrection(Exception):
"""
Raise from within a transition method to switch to another state.
Raise with one or two arguments: new state name, and an optional new
transition name.
"""
def string2lines(astring, tab_width=8, convert_whitespace=0,
whitespace=re.compile('[\v\f]')):
"""
Return a list of one-line strings with tabs expanded, no newlines, and
trailing whitespace stripped.
Each tab is expanded with between 1 and `tab_width` spaces, so that the
next character's index becomes a multiple of `tab_width` (8 by default).
Parameters:
- `astring`: a multi-line string.
- `tab_width`: the number of columns between tab stops.
- `convert_whitespace`: convert form feeds and vertical tabs to spaces?
"""
if convert_whitespace:
astring = whitespace.sub(' ', astring)
return [s.expandtabs(tab_width).rstrip() for s in astring.splitlines()]
def _exception_data():
"""
Return exception information:
- the exception's class name;
- the exception object;
- the name of the file containing the offending code;
- the line number of the offending code;
- the function name of the offending code.
"""
type, value, traceback = sys.exc_info()
while traceback.tb_next:
traceback = traceback.tb_next
code = traceback.tb_frame.f_code
return (type.__name__, value, code.co_filename, traceback.tb_lineno,
code.co_name)
| {
"repo_name": "rimbalinux/LMD3",
"path": "docutils/statemachine.py",
"copies": "2",
"size": "56698",
"license": "bsd-3-clause",
"hash": 6025836991004962000,
"line_mean": 36.2319622387,
"line_max": 80,
"alpha_frac": 0.5717661999,
"autogenerated": false,
"ratio": 4.482055335968379,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0005171551468936333,
"num_lines": 1483
} |
# $Id: statemachine.py 6388 2010-08-13 12:24:34Z milde $
# Author: David Goodger <goodger@python.org>
# Copyright: This module has been placed in the public domain.
"""
A finite state machine specialized for regular-expression-based text filters,
this module defines the following classes:
- `StateMachine`, a state machine
- `State`, a state superclass
- `StateMachineWS`, a whitespace-sensitive version of `StateMachine`
- `StateWS`, a state superclass for use with `StateMachineWS`
- `SearchStateMachine`, uses `re.search()` instead of `re.match()`
- `SearchStateMachineWS`, uses `re.search()` instead of `re.match()`
- `ViewList`, extends standard Python lists.
- `StringList`, string-specific ViewList.
Exception classes:
- `StateMachineError`
- `UnknownStateError`
- `DuplicateStateError`
- `UnknownTransitionError`
- `DuplicateTransitionError`
- `TransitionPatternNotFound`
- `TransitionMethodNotFound`
- `UnexpectedIndentationError`
- `TransitionCorrection`: Raised to switch to another transition.
- `StateCorrection`: Raised to switch to another state & transition.
Functions:
- `string2lines()`: split a multi-line string into a list of one-line strings
How To Use This Module
======================
(See the individual classes, methods, and attributes for details.)
1. Import it: ``import statemachine`` or ``from statemachine import ...``.
You will also need to ``import re``.
2. Derive a subclass of `State` (or `StateWS`) for each state in your state
machine::
class MyState(statemachine.State):
Within the state's class definition:
a) Include a pattern for each transition, in `State.patterns`::
patterns = {'atransition': r'pattern', ...}
b) Include a list of initial transitions to be set up automatically, in
`State.initial_transitions`::
initial_transitions = ['atransition', ...]
c) Define a method for each transition, with the same name as the
transition pattern::
def atransition(self, match, context, next_state):
# do something
result = [...] # a list
return context, next_state, result
# context, next_state may be altered
Transition methods may raise an `EOFError` to cut processing short.
d) You may wish to override the `State.bof()` and/or `State.eof()` implicit
transition methods, which handle the beginning- and end-of-file.
e) In order to handle nested processing, you may wish to override the
attributes `State.nested_sm` and/or `State.nested_sm_kwargs`.
If you are using `StateWS` as a base class, in order to handle nested
indented blocks, you may wish to:
- override the attributes `StateWS.indent_sm`,
`StateWS.indent_sm_kwargs`, `StateWS.known_indent_sm`, and/or
`StateWS.known_indent_sm_kwargs`;
- override the `StateWS.blank()` method; and/or
- override or extend the `StateWS.indent()`, `StateWS.known_indent()`,
and/or `StateWS.firstknown_indent()` methods.
3. Create a state machine object::
sm = StateMachine(state_classes=[MyState, ...],
initial_state='MyState')
4. Obtain the input text, which needs to be converted into a tab-free list of
one-line strings. For centralfitestoque, to read text from a file called
'inputfile'::
input_string = open('inputfile').read()
input_lines = statemachine.string2lines(input_string)
5. Run the state machine on the input text and collect the results, a list::
results = sm.run(input_lines)
6. Remove any lingering circular references::
sm.unlink()
"""
__docformat__ = 'restructuredtext'
import sys
import re
import types
import unicodedata
class StateMachine:
"""
A finite state machine for text filters using regular expressions.
The input is provided in the form of a list of one-line strings (no
newlines). States are subclasses of the `State` class. Transitions consist
of regular expression patterns and transition methods, and are defined in
each state.
The state machine is started with the `run()` method, which returns the
results of processing in a list.
"""
def __init__(self, state_classes, initial_state, debug=0):
"""
Initialize a `StateMachine` object; add state objects.
Parameters:
- `state_classes`: a list of `State` (sub)classes.
- `initial_state`: a string, the class name of the initial state.
- `debug`: a boolean; produce verbose output if true (nonzero).
"""
self.input_lines = None
"""`StringList` of input lines (without newlines).
Filled by `self.run()`."""
self.input_offset = 0
"""Offset of `self.input_lines` from the beginning of the file."""
self.line = None
"""Current input line."""
self.line_offset = -1
"""Current input line offset from beginning of `self.input_lines`."""
self.debug = debug
"""Debugging mode on/off."""
self.initial_state = initial_state
"""The name of the initial state (key to `self.states`)."""
self.current_state = initial_state
"""The name of the current state (key to `self.states`)."""
self.states = {}
"""Mapping of {state_name: State_object}."""
self.add_states(state_classes)
self.observers = []
"""List of bound methods or functions to call whenever the current
line changes. Observers are called with one argument, ``self``.
Cleared at the end of `run()`."""
def unlink(self):
"""Remove circular references to objects no longer required."""
for state in self.states.values():
state.unlink()
self.states = None
def run(self, input_lines, input_offset=0, context=None,
input_source=None, initial_state=None):
"""
Run the state machine on `input_lines`. Return results (a list).
Reset `self.line_offset` and `self.current_state`. Run the
beginning-of-file transition. Input one line at a time and check for a
matching transition. If a match is found, call the transition method
and possibly change the state. Store the context returned by the
transition method to be passed on to the next transition matched.
Accumulate the results returned by the transition methods in a list.
Run the end-of-file transition. Finally, return the accumulated
results.
Parameters:
- `input_lines`: a list of strings without newlines, or `StringList`.
- `input_offset`: the line offset of `input_lines` from the beginning
of the file.
- `context`: application-specific storage.
- `input_source`: name or path of source of `input_lines`.
- `initial_state`: name of initial state.
"""
self.runtime_init()
if isinstance(input_lines, StringList):
self.input_lines = input_lines
else:
self.input_lines = StringList(input_lines, source=input_source)
self.input_offset = input_offset
self.line_offset = -1
self.current_state = initial_state or self.initial_state
if self.debug:
print >>sys.stderr, (
'\nStateMachine.run: input_lines (line_offset=%s):\n| %s'
% (self.line_offset, '\n| '.join(self.input_lines)))
transitions = None
results = []
state = self.get_state()
try:
if self.debug:
print >>sys.stderr, ('\nStateMachine.run: bof transition')
context, result = state.bof(context)
results.extend(result)
while 1:
try:
try:
self.next_line()
if self.debug:
source, offset = self.input_lines.info(
self.line_offset)
print >>sys.stderr, (
'\nStateMachine.run: line (source=%r, '
'offset=%r):\n| %s'
% (source, offset, self.line))
context, next_state, result = self.check_line(
context, state, transitions)
except EOFError:
if self.debug:
print >>sys.stderr, (
'\nStateMachine.run: %s.eof transition'
% state.__class__.__name__)
result = state.eof(context)
results.extend(result)
break
else:
results.extend(result)
except TransitionCorrection, exception:
self.previous_line() # back up for another try
transitions = (exception.args[0],)
if self.debug:
print >>sys.stderr, (
'\nStateMachine.run: TransitionCorrection to '
'state "%s", transition %s.'
% (state.__class__.__name__, transitions[0]))
continue
except StateCorrection, exception:
self.previous_line() # back up for another try
next_state = exception.args[0]
if len(exception.args) == 1:
transitions = None
else:
transitions = (exception.args[1],)
if self.debug:
print >>sys.stderr, (
'\nStateMachine.run: StateCorrection to state '
'"%s", transition %s.'
% (next_state, transitions[0]))
else:
transitions = None
state = self.get_state(next_state)
except:
if self.debug:
self.error()
raise
self.observers = []
return results
def get_state(self, next_state=None):
"""
Return current state object; set it first if `next_state` given.
Parameter `next_state`: a string, the name of the next state.
Exception: `UnknownStateError` raised if `next_state` unknown.
"""
if next_state:
if self.debug and next_state != self.current_state:
print >>sys.stderr, \
('\nStateMachine.get_state: Changing state from '
'"%s" to "%s" (input line %s).'
% (self.current_state, next_state,
self.abs_line_number()))
self.current_state = next_state
try:
return self.states[self.current_state]
except KeyError:
raise UnknownStateError(self.current_state)
def next_line(self, n=1):
"""Load `self.line` with the `n`'th next line and return it."""
try:
try:
self.line_offset += n
self.line = self.input_lines[self.line_offset]
except IndexError:
self.line = None
raise EOFError
return self.line
finally:
self.notify_observers()
def is_next_line_blank(self):
"""Return 1 if the next line is blank or non-existant."""
try:
return not self.input_lines[self.line_offset + 1].strip()
except IndexError:
return 1
def at_eof(self):
"""Return 1 if the input is at or past end-of-file."""
return self.line_offset >= len(self.input_lines) - 1
def at_bof(self):
"""Return 1 if the input is at or before beginning-of-file."""
return self.line_offset <= 0
def previous_line(self, n=1):
"""Load `self.line` with the `n`'th previous line and return it."""
self.line_offset -= n
if self.line_offset < 0:
self.line = None
else:
self.line = self.input_lines[self.line_offset]
self.notify_observers()
return self.line
def goto_line(self, line_offset):
"""Jump to absolute line offset `line_offset`, load and return it."""
try:
try:
self.line_offset = line_offset - self.input_offset
self.line = self.input_lines[self.line_offset]
except IndexError:
self.line = None
raise EOFError
return self.line
finally:
self.notify_observers()
def get_source(self, line_offset):
"""Return source of line at absolute line offset `line_offset`."""
return self.input_lines.source(line_offset - self.input_offset)
def abs_line_offset(self):
"""Return line offset of current line, from beginning of file."""
return self.line_offset + self.input_offset
def abs_line_number(self):
"""Return line number of current line (counting from 1)."""
return self.line_offset + self.input_offset + 1
def get_source_and_line(self, lineno=None):
"""Return (source, line) tuple for current or given line number.
Looks up the source and line number in the `self.input_lines`
StringList instance to count for included source files.
If the optional argument `lineno` is given, convert it from an
absolute line number to the corresponding (source, line) pair.
"""
if lineno is None:
offset = self.line_offset
else:
offset = lineno - self.input_offset - 1
try:
src, srcoffset = self.input_lines.info(offset)
srcline = srcoffset + 1
except (TypeError):
# line is None if index is "Just past the end"
src, srcline = self.get_source_and_line(offset + self.input_offset)
return src, srcline + 1
except (IndexError): # `offset` is off the list
src, srcline = None, None
# raise AssertionError('cannot find line %d in %s lines' %
# (offset, len(self.input_lines)))
# # list(self.input_lines.lines())))
# assert offset == srcoffset, str(self.input_lines)
# print "get_source_and_line(%s):" % lineno,
# print offset + 1, '->', src, srcline
# print self.input_lines
return (src, srcline)
def insert_input(self, input_lines, source):
self.input_lines.insert(self.line_offset + 1, '',
source='internal padding after '+source,
offset=len(input_lines))
self.input_lines.insert(self.line_offset + 1, '',
source='internal padding before '+source,
offset=-1)
self.input_lines.insert(self.line_offset + 2,
StringList(input_lines, source))
def get_text_block(self, flush_left=0):
"""
Return a contiguous block of text.
If `flush_left` is true, raise `UnexpectedIndentationError` if an
indented line is encountered before the text block ends (with a blank
line).
"""
try:
block = self.input_lines.get_text_block(self.line_offset,
flush_left)
self.next_line(len(block) - 1)
return block
except UnexpectedIndentationError, error:
block, source, lineno = error.args
self.next_line(len(block) - 1) # advance to last line of block
raise
def check_line(self, context, state, transitions=None):
"""
Examine one line of input for a transition match & execute its method.
Parameters:
- `context`: application-dependent storage.
- `state`: a `State` object, the current state.
- `transitions`: an optional ordered list of transition names to try,
instead of ``state.transition_order``.
Return the values returned by the transition method:
- context: possibly modified from the parameter `context`;
- next state name (`State` subclass name);
- the result output of the transition, a list.
When there is no match, ``state.no_match()`` is called and its return
value is returned.
"""
if transitions is None:
transitions = state.transition_order
state_correction = None
if self.debug:
print >>sys.stderr, (
'\nStateMachine.check_line: state="%s", transitions=%r.'
% (state.__class__.__name__, transitions))
for name in transitions:
pattern, method, next_state = state.transitions[name]
match = pattern.match(self.line)
if match:
if self.debug:
print >>sys.stderr, (
'\nStateMachine.check_line: Matched transition '
'"%s" in state "%s".'
% (name, state.__class__.__name__))
return method(match, context, next_state)
else:
if self.debug:
print >>sys.stderr, (
'\nStateMachine.check_line: No match in state "%s".'
% state.__class__.__name__)
return state.no_match(context, transitions)
def add_state(self, state_class):
"""
Initialize & add a `state_class` (`State` subclass) object.
Exception: `DuplicateStateError` raised if `state_class` was already
added.
"""
statename = state_class.__name__
if statename in self.states:
raise DuplicateStateError(statename)
self.states[statename] = state_class(self, self.debug)
def add_states(self, state_classes):
"""
Add `state_classes` (a list of `State` subclasses).
"""
for state_class in state_classes:
self.add_state(state_class)
def runtime_init(self):
"""
Initialize `self.states`.
"""
for state in self.states.values():
state.runtime_init()
def error(self):
"""Report error details."""
type, value, module, line, function = _exception_data()
print >>sys.stderr, '%s: %s' % (type, value)
print >>sys.stderr, 'input line %s' % (self.abs_line_number())
print >>sys.stderr, ('module %s, line %s, function %s'
% (module, line, function))
def attach_observer(self, observer):
"""
The `observer` parameter is a function or bound method which takes two
arguments, the source and offset of the current line.
"""
self.observers.append(observer)
def detach_observer(self, observer):
self.observers.remove(observer)
def notify_observers(self):
for observer in self.observers:
try:
info = self.input_lines.info(self.line_offset)
except IndexError:
info = (None, None)
observer(*info)
class State:
"""
State superclass. Contains a list of transitions, and transition methods.
Transition methods all have the same signature. They take 3 parameters:
- An `re` match object. ``match.string`` contains the matched input line,
``match.start()`` gives the start index of the match, and
``match.end()`` gives the end index.
- A context object, whose meaning is application-defined (initial value
``None``). It can be used to store any information required by the state
machine, and the retured context is passed on to the next transition
method unchanged.
- The name of the next state, a string, taken from the transitions list;
normally it is returned unchanged, but it may be altered by the
transition method if necessary.
Transition methods all return a 3-tuple:
- A context object, as (potentially) modified by the transition method.
- The next state name (a return value of ``None`` means no state change).
- The processing result, a list, which is accumulated by the state
machine.
Transition methods may raise an `EOFError` to cut processing short.
There are two implicit transitions, and corresponding transition methods
are defined: `bof()` handles the beginning-of-file, and `eof()` handles
the end-of-file. These methods have non-standard signatures and return
values. `bof()` returns the initial context and results, and may be used
to return a header string, or do any other processing needed. `eof()`
should handle any remaining context and wrap things up; it returns the
final processing result.
Typical applications need only subclass `State` (or a subclass), set the
`patterns` and `initial_transitions` class attributes, and provide
corresponding transition methods. The default object initialization will
take care of constructing the list of transitions.
"""
patterns = None
"""
{Name: pattern} mapping, used by `make_transition()`. Each pattern may
be a string or a compiled `re` pattern. Override in subclasses.
"""
initial_transitions = None
"""
A list of transitions to initialize when a `State` is instantiated.
Each entry is either a transition name string, or a (transition name, next
state name) pair. See `make_transitions()`. Override in subclasses.
"""
nested_sm = None
"""
The `StateMachine` class for handling nested processing.
If left as ``None``, `nested_sm` defaults to the class of the state's
controlling state machine. Override it in subclasses to avoid the default.
"""
nested_sm_kwargs = None
"""
Keyword arguments dictionary, passed to the `nested_sm` constructor.
Two keys must have entries in the dictionary:
- Key 'state_classes' must be set to a list of `State` classes.
- Key 'initial_state' must be set to the name of the initial state class.
If `nested_sm_kwargs` is left as ``None``, 'state_classes' defaults to the
class of the current state, and 'initial_state' defaults to the name of
the class of the current state. Override in subclasses to avoid the
defaults.
"""
def __init__(self, state_machine, debug=0):
"""
Initialize a `State` object; make & add initial transitions.
Parameters:
- `statemachine`: the controlling `StateMachine` object.
- `debug`: a boolean; produce verbose output if true (nonzero).
"""
self.transition_order = []
"""A list of transition names in search order."""
self.transitions = {}
"""
A mapping of transition names to 3-tuples containing
(compiled_pattern, transition_method, next_state_name). Initialized as
an instance attribute dynamically (instead of as a class attribute)
because it may make forward references to patterns and methods in this
or other classes.
"""
self.add_initial_transitions()
self.state_machine = state_machine
"""A reference to the controlling `StateMachine` object."""
self.debug = debug
"""Debugging mode on/off."""
if self.nested_sm is None:
self.nested_sm = self.state_machine.__class__
if self.nested_sm_kwargs is None:
self.nested_sm_kwargs = {'state_classes': [self.__class__],
'initial_state': self.__class__.__name__}
def runtime_init(self):
"""
Initialize this `State` before running the state machine; called from
`self.state_machine.run()`.
"""
pass
def unlink(self):
"""Remove circular references to objects no longer required."""
self.state_machine = None
def add_initial_transitions(self):
"""Make and add transitions listed in `self.initial_transitions`."""
if self.initial_transitions:
names, transitions = self.make_transitions(
self.initial_transitions)
self.add_transitions(names, transitions)
def add_transitions(self, names, transitions):
"""
Add a list of transitions to the start of the transition list.
Parameters:
- `names`: a list of transition names.
- `transitions`: a mapping of names to transition tuples.
Exceptions: `DuplicateTransitionError`, `UnknownTransitionError`.
"""
for name in names:
if name in self.transitions:
raise DuplicateTransitionError(name)
if name not in transitions:
raise UnknownTransitionError(name)
self.transition_order[:0] = names
self.transitions.update(transitions)
def add_transition(self, name, transition):
"""
Add a transition to the start of the transition list.
Parameter `transition`: a ready-made transition 3-tuple.
Exception: `DuplicateTransitionError`.
"""
if name in self.transitions:
raise DuplicateTransitionError(name)
self.transition_order[:0] = [name]
self.transitions[name] = transition
def remove_transition(self, name):
"""
Remove a transition by `name`.
Exception: `UnknownTransitionError`.
"""
try:
del self.transitions[name]
self.transition_order.remove(name)
except:
raise UnknownTransitionError(name)
def make_transition(self, name, next_state=None):
"""
Make & return a transition tuple based on `name`.
This is a convenience function to simplify transition creation.
Parameters:
- `name`: a string, the name of the transition pattern & method. This
`State` object must have a method called '`name`', and a dictionary
`self.patterns` containing a key '`name`'.
- `next_state`: a string, the name of the next `State` object for this
transition. A value of ``None`` (or absent) implies no state change
(i.e., continue with the same state).
Exceptions: `TransitionPatternNotFound`, `TransitionMethodNotFound`.
"""
if next_state is None:
next_state = self.__class__.__name__
try:
pattern = self.patterns[name]
if not hasattr(pattern, 'match'):
pattern = re.compile(pattern)
except KeyError:
raise TransitionPatternNotFound(
'%s.patterns[%r]' % (self.__class__.__name__, name))
try:
method = getattr(self, name)
except AttributeError:
raise TransitionMethodNotFound(
'%s.%s' % (self.__class__.__name__, name))
return (pattern, method, next_state)
def make_transitions(self, name_list):
"""
Return a list of transition names and a transition mapping.
Parameter `name_list`: a list, where each entry is either a transition
name string, or a 1- or 2-tuple (transition name, optional next state
name).
"""
stringtype = type('')
names = []
transitions = {}
for namestate in name_list:
if type(namestate) is stringtype:
transitions[namestate] = self.make_transition(namestate)
names.append(namestate)
else:
transitions[namestate[0]] = self.make_transition(*namestate)
names.append(namestate[0])
return names, transitions
def no_match(self, context, transitions):
"""
Called when there is no match from `StateMachine.check_line()`.
Return the same values returned by transition methods:
- context: unchanged;
- next state name: ``None``;
- empty result list.
Override in subclasses to catch this event.
"""
return context, None, []
def bof(self, context):
"""
Handle beginning-of-file. Return unchanged `context`, empty result.
Override in subclasses.
Parameter `context`: application-defined storage.
"""
return context, []
def eof(self, context):
"""
Handle end-of-file. Return empty result.
Override in subclasses.
Parameter `context`: application-defined storage.
"""
return []
def nop(self, match, context, next_state):
"""
A "do nothing" transition method.
Return unchanged `context` & `next_state`, empty result. Useful for
simple state changes (actionless transitions).
"""
return context, next_state, []
class StateMachineWS(StateMachine):
"""
`StateMachine` subclass specialized for whitespace recognition.
There are three methods provided for extracting indented text blocks:
- `get_indented()`: use when the indent is unknown.
- `get_known_indented()`: use when the indent is known for all lines.
- `get_first_known_indented()`: use when only the first line's indent is
known.
"""
def get_indented(self, until_blank=0, strip_indent=1):
"""
Return a block of indented lines of text, and info.
Extract an indented block where the indent is unknown for all lines.
:Parameters:
- `until_blank`: Stop collecting at the first blank line if true
(1).
- `strip_indent`: Strip common leading indent if true (1,
default).
:Return:
- the indented block (a list of lines of text),
- its indent,
- its first line offset from BOF, and
- whether or not it finished with a blank line.
"""
offset = self.abs_line_offset()
indented, indent, blank_finish = self.input_lines.get_indented(
self.line_offset, until_blank, strip_indent)
if indented:
self.next_line(len(indented) - 1) # advance to last indented line
while indented and not indented[0].strip():
indented.trim_start()
offset += 1
return indented, indent, offset, blank_finish
def get_known_indented(self, indent, until_blank=0, strip_indent=1):
"""
Return an indented block and info.
Extract an indented block where the indent is known for all lines.
Starting with the current line, extract the entire text block with at
least `indent` indentation (which must be whitespace, except for the
first line).
:Parameters:
- `indent`: The number of indent columns/characters.
- `until_blank`: Stop collecting at the first blank line if true
(1).
- `strip_indent`: Strip `indent` characters of indentation if true
(1, default).
:Return:
- the indented block,
- its first line offset from BOF, and
- whether or not it finished with a blank line.
"""
offset = self.abs_line_offset()
indented, indent, blank_finish = self.input_lines.get_indented(
self.line_offset, until_blank, strip_indent,
block_indent=indent)
self.next_line(len(indented) - 1) # advance to last indented line
while indented and not indented[0].strip():
indented.trim_start()
offset += 1
return indented, offset, blank_finish
def get_first_known_indented(self, indent, until_blank=0, strip_indent=1,
strip_top=1):
"""
Return an indented block and info.
Extract an indented block where the indent is known for the first line
and unknown for all other lines.
:Parameters:
- `indent`: The first line's indent (# of columns/characters).
- `until_blank`: Stop collecting at the first blank line if true
(1).
- `strip_indent`: Strip `indent` characters of indentation if true
(1, default).
- `strip_top`: Strip blank lines from the beginning of the block.
:Return:
- the indented block,
- its indent,
- its first line offset from BOF, and
- whether or not it finished with a blank line.
"""
offset = self.abs_line_offset()
indented, indent, blank_finish = self.input_lines.get_indented(
self.line_offset, until_blank, strip_indent,
first_indent=indent)
self.next_line(len(indented) - 1) # advance to last indented line
if strip_top:
while indented and not indented[0].strip():
indented.trim_start()
offset += 1
return indented, indent, offset, blank_finish
class StateWS(State):
"""
State superclass specialized for whitespace (blank lines & indents).
Use this class with `StateMachineWS`. The transitions 'blank' (for blank
lines) and 'indent' (for indented text blocks) are added automatically,
before any other transitions. The transition method `blank()` handles
blank lines and `indent()` handles nested indented blocks. Indented
blocks trigger a new state machine to be created by `indent()` and run.
The class of the state machine to be created is in `indent_sm`, and the
constructor keyword arguments are in the dictionary `indent_sm_kwargs`.
The methods `known_indent()` and `firstknown_indent()` are provided for
indented blocks where the indent (all lines' and first line's only,
respectively) is known to the transition method, along with the attributes
`known_indent_sm` and `known_indent_sm_kwargs`. Neither transition method
is triggered automatically.
"""
indent_sm = None
"""
The `StateMachine` class handling indented text blocks.
If left as ``None``, `indent_sm` defaults to the value of
`State.nested_sm`. Override it in subclasses to avoid the default.
"""
indent_sm_kwargs = None
"""
Keyword arguments dictionary, passed to the `indent_sm` constructor.
If left as ``None``, `indent_sm_kwargs` defaults to the value of
`State.nested_sm_kwargs`. Override it in subclasses to avoid the default.
"""
known_indent_sm = None
"""
The `StateMachine` class handling known-indented text blocks.
If left as ``None``, `known_indent_sm` defaults to the value of
`indent_sm`. Override it in subclasses to avoid the default.
"""
known_indent_sm_kwargs = None
"""
Keyword arguments dictionary, passed to the `known_indent_sm` constructor.
If left as ``None``, `known_indent_sm_kwargs` defaults to the value of
`indent_sm_kwargs`. Override it in subclasses to avoid the default.
"""
ws_patterns = {'blank': ' *$',
'indent': ' +'}
"""Patterns for default whitespace transitions. May be overridden in
subclasses."""
ws_initial_transitions = ('blank', 'indent')
"""Default initial whitespace transitions, added before those listed in
`State.initial_transitions`. May be overridden in subclasses."""
def __init__(self, state_machine, debug=0):
"""
Initialize a `StateSM` object; extends `State.__init__()`.
Check for indent state machine attributes, set defaults if not set.
"""
State.__init__(self, state_machine, debug)
if self.indent_sm is None:
self.indent_sm = self.nested_sm
if self.indent_sm_kwargs is None:
self.indent_sm_kwargs = self.nested_sm_kwargs
if self.known_indent_sm is None:
self.known_indent_sm = self.indent_sm
if self.known_indent_sm_kwargs is None:
self.known_indent_sm_kwargs = self.indent_sm_kwargs
def add_initial_transitions(self):
"""
Add whitespace-specific transitions before those defined in subclass.
Extends `State.add_initial_transitions()`.
"""
State.add_initial_transitions(self)
if self.patterns is None:
self.patterns = {}
self.patterns.update(self.ws_patterns)
names, transitions = self.make_transitions(
self.ws_initial_transitions)
self.add_transitions(names, transitions)
def blank(self, match, context, next_state):
"""Handle blank lines. Does nothing. Override in subclasses."""
return self.nop(match, context, next_state)
def indent(self, match, context, next_state):
"""
Handle an indented text block. Extend or override in subclasses.
Recursively run the registered state machine for indented blocks
(`self.indent_sm`).
"""
indented, indent, line_offset, blank_finish = \
self.state_machine.get_indented()
sm = self.indent_sm(debug=self.debug, **self.indent_sm_kwargs)
results = sm.run(indented, input_offset=line_offset)
return context, next_state, results
def known_indent(self, match, context, next_state):
"""
Handle a known-indent text block. Extend or override in subclasses.
Recursively run the registered state machine for known-indent indented
blocks (`self.known_indent_sm`). The indent is the length of the
match, ``match.end()``.
"""
indented, line_offset, blank_finish = \
self.state_machine.get_known_indented(match.end())
sm = self.known_indent_sm(debug=self.debug,
**self.known_indent_sm_kwargs)
results = sm.run(indented, input_offset=line_offset)
return context, next_state, results
def first_known_indent(self, match, context, next_state):
"""
Handle an indented text block (first line's indent known).
Extend or override in subclasses.
Recursively run the registered state machine for known-indent indented
blocks (`self.known_indent_sm`). The indent is the length of the
match, ``match.end()``.
"""
indented, line_offset, blank_finish = \
self.state_machine.get_first_known_indented(match.end())
sm = self.known_indent_sm(debug=self.debug,
**self.known_indent_sm_kwargs)
results = sm.run(indented, input_offset=line_offset)
return context, next_state, results
class _SearchOverride:
"""
Mix-in class to override `StateMachine` regular expression behavior.
Changes regular expression matching, from the default `re.match()`
(succeeds only if the pattern matches at the start of `self.line`) to
`re.search()` (succeeds if the pattern matches anywhere in `self.line`).
When subclassing a `StateMachine`, list this class **first** in the
inheritance list of the class definition.
"""
def match(self, pattern):
"""
Return the result of a regular expression search.
Overrides `StateMachine.match()`.
Parameter `pattern`: `re` compiled regular expression.
"""
return pattern.search(self.line)
class SearchStateMachine(_SearchOverride, StateMachine):
"""`StateMachine` which uses `re.search()` instead of `re.match()`."""
pass
class SearchStateMachineWS(_SearchOverride, StateMachineWS):
"""`StateMachineWS` which uses `re.search()` instead of `re.match()`."""
pass
class ViewList:
"""
List with extended functionality: slices of ViewList objects are child
lists, linked to their parents. Changes made to a child list also affect
the parent list. A child list is effectively a "view" (in the SQL sense)
of the parent list. Changes to parent lists, however, do *not* affect
active child lists. If a parent list is changed, any active child lists
should be recreated.
The start and end of the slice can be trimmed using the `trim_start()` and
`trim_end()` methods, without affecting the parent list. The link between
child and parent lists can be broken by calling `disconnect()` on the
child list.
Also, ViewList objects keep track of the source & offset of each item.
This information is accessible via the `source()`, `offset()`, and
`info()` methods.
"""
def __init__(self, initlist=None, source=None, items=None,
parent=None, parent_offset=None):
self.data = []
"""The actual list of data, flattened from various sources."""
self.items = []
"""A list of (source, offset) pairs, same length as `self.data`: the
source of each line and the offset of each line from the beginning of
its source."""
self.parent = parent
"""The parent list."""
self.parent_offset = parent_offset
"""Offset of this list from the beginning of the parent list."""
if isinstance(initlist, ViewList):
self.data = initlist.data[:]
self.items = initlist.items[:]
elif initlist is not None:
self.data = list(initlist)
if items:
self.items = items
else:
self.items = [(source, i) for i in range(len(initlist))]
assert len(self.data) == len(self.items), 'data mismatch'
def __str__(self):
return str(self.data)
def __repr__(self):
return '%s(%s, items=%s)' % (self.__class__.__name__,
self.data, self.items)
def __lt__(self, other): return self.data < self.__cast(other)
def __le__(self, other): return self.data <= self.__cast(other)
def __eq__(self, other): return self.data == self.__cast(other)
def __ne__(self, other): return self.data != self.__cast(other)
def __gt__(self, other): return self.data > self.__cast(other)
def __ge__(self, other): return self.data >= self.__cast(other)
def __cmp__(self, other): return cmp(self.data, self.__cast(other))
def __cast(self, other):
if isinstance(other, ViewList):
return other.data
else:
return other
def __contains__(self, item): return item in self.data
def __len__(self): return len(self.data)
# The __getitem__()/__setitem__() methods check whether the index
# is a slice first, since indexing a native list with a slice object
# just works.
def __getitem__(self, i):
if isinstance(i, types.SliceType):
assert i.step in (None, 1), 'cannot handle slice with stride'
return self.__class__(self.data[i.start:i.stop],
items=self.items[i.start:i.stop],
parent=self, parent_offset=i.start or 0)
else:
return self.data[i]
def __setitem__(self, i, item):
if isinstance(i, types.SliceType):
assert i.step in (None, 1), 'cannot handle slice with stride'
if not isinstance(item, ViewList):
raise TypeError('assigning non-ViewList to ViewList slice')
self.data[i.start:i.stop] = item.data
self.items[i.start:i.stop] = item.items
assert len(self.data) == len(self.items), 'data mismatch'
if self.parent:
self.parent[(i.start or 0) + self.parent_offset
: (i.stop or len(self)) + self.parent_offset] = item
else:
self.data[i] = item
if self.parent:
self.parent[i + self.parent_offset] = item
def __delitem__(self, i):
try:
del self.data[i]
del self.items[i]
if self.parent:
del self.parent[i + self.parent_offset]
except TypeError:
assert i.step is None, 'cannot handle slice with stride'
del self.data[i.start:i.stop]
del self.items[i.start:i.stop]
if self.parent:
del self.parent[(i.start or 0) + self.parent_offset
: (i.stop or len(self)) + self.parent_offset]
def __add__(self, other):
if isinstance(other, ViewList):
return self.__class__(self.data + other.data,
items=(self.items + other.items))
else:
raise TypeError('adding non-ViewList to a ViewList')
def __radd__(self, other):
if isinstance(other, ViewList):
return self.__class__(other.data + self.data,
items=(other.items + self.items))
else:
raise TypeError('adding ViewList to a non-ViewList')
def __iadd__(self, other):
if isinstance(other, ViewList):
self.data += other.data
else:
raise TypeError('argument to += must be a ViewList')
return self
def __mul__(self, n):
return self.__class__(self.data * n, items=(self.items * n))
__rmul__ = __mul__
def __imul__(self, n):
self.data *= n
self.items *= n
return self
def extend(self, other):
if not isinstance(other, ViewList):
raise TypeError('extending a ViewList with a non-ViewList')
if self.parent:
self.parent.insert(len(self.data) + self.parent_offset, other)
self.data.extend(other.data)
self.items.extend(other.items)
def append(self, item, source=None, offset=0):
if source is None:
self.extend(item)
else:
if self.parent:
self.parent.insert(len(self.data) + self.parent_offset, item,
source, offset)
self.data.append(item)
self.items.append((source, offset))
def insert(self, i, item, source=None, offset=0):
if source is None:
if not isinstance(item, ViewList):
raise TypeError('inserting non-ViewList with no source given')
self.data[i:i] = item.data
self.items[i:i] = item.items
if self.parent:
index = (len(self.data) + i) % len(self.data)
self.parent.insert(index + self.parent_offset, item)
else:
self.data.insert(i, item)
self.items.insert(i, (source, offset))
if self.parent:
index = (len(self.data) + i) % len(self.data)
self.parent.insert(index + self.parent_offset, item,
source, offset)
def pop(self, i=-1):
if self.parent:
index = (len(self.data) + i) % len(self.data)
self.parent.pop(index + self.parent_offset)
self.items.pop(i)
return self.data.pop(i)
def trim_start(self, n=1):
"""
Remove items from the start of the list, without touching the parent.
"""
if n > len(self.data):
raise IndexError("Size of trim too large; can't trim %s items "
"from a list of size %s." % (n, len(self.data)))
elif n < 0:
raise IndexError('Trim size must be >= 0.')
del self.data[:n]
del self.items[:n]
if self.parent:
self.parent_offset += n
def trim_end(self, n=1):
"""
Remove items from the end of the list, without touching the parent.
"""
if n > len(self.data):
raise IndexError("Size of trim too large; can't trim %s items "
"from a list of size %s." % (n, len(self.data)))
elif n < 0:
raise IndexError('Trim size must be >= 0.')
del self.data[-n:]
del self.items[-n:]
def remove(self, item):
index = self.index(item)
del self[index]
def count(self, item): return self.data.count(item)
def index(self, item): return self.data.index(item)
def reverse(self):
self.data.reverse()
self.items.reverse()
self.parent = None
def sort(self, *args):
tmp = zip(self.data, self.items)
tmp.sort(*args)
self.data = [entry[0] for entry in tmp]
self.items = [entry[1] for entry in tmp]
self.parent = None
def info(self, i):
"""Return source & offset for index `i`."""
try:
return self.items[i]
except IndexError:
if i == len(self.data): # Just past the end
return self.items[i - 1][0], None
else:
raise
def source(self, i):
"""Return source for index `i`."""
return self.info(i)[0]
def offset(self, i):
"""Return offset for index `i`."""
return self.info(i)[1]
def disconnect(self):
"""Break link between this list and parent list."""
self.parent = None
def xitems(self):
"""Return iterator yielding (source, offset, value) tuples."""
for (value, (source, offset)) in zip(self.data, self.items):
yield (source, offset, value)
def pprint(self):
"""Print the list in `grep` format (`source:offset:value` lines)"""
for line in self.xitems():
print "%s:%d:%s" % line
class StringList(ViewList):
"""A `ViewList` with string-specific methods."""
def trim_left(self, length, start=0, end=sys.maxint):
"""
Trim `length` characters off the beginning of each item, in-place,
from index `start` to `end`. No whitespace-checking is done on the
trimmed text. Does not affect slice parent.
"""
self.data[start:end] = [line[length:]
for line in self.data[start:end]]
def get_text_block(self, start, flush_left=0):
"""
Return a contiguous block of text.
If `flush_left` is true, raise `UnexpectedIndentationError` if an
indented line is encountered before the text block ends (with a blank
line).
"""
end = start
last = len(self.data)
while end < last:
line = self.data[end]
if not line.strip():
break
if flush_left and (line[0] == ' '):
source, offset = self.info(end)
raise UnexpectedIndentationError(self[start:end], source,
offset + 1)
end += 1
return self[start:end]
def get_indented(self, start=0, until_blank=0, strip_indent=1,
block_indent=None, first_indent=None):
"""
Extract and return a StringList of indented lines of text.
Collect all lines with indentation, determine the minimum indentation,
remove the minimum indentation from all indented lines (unless
`strip_indent` is false), and return them. All lines up to but not
including the first unindented line will be returned.
:Parameters:
- `start`: The index of the first line to examine.
- `until_blank`: Stop collecting at the first blank line if true.
- `strip_indent`: Strip common leading indent if true (default).
- `block_indent`: The indent of the entire block, if known.
- `first_indent`: The indent of the first line, if known.
:Return:
- a StringList of indented lines with mininum indent removed;
- the amount of the indent;
- a boolean: did the indented block finish with a blank line or EOF?
"""
indent = block_indent # start with None if unknown
end = start
if block_indent is not None and first_indent is None:
first_indent = block_indent
if first_indent is not None:
end += 1
last = len(self.data)
while end < last:
line = self.data[end]
if line and (line[0] != ' '
or (block_indent is not None
and line[:block_indent].strip())):
# Line not indented or insufficiently indented.
# Block finished properly iff the last indented line blank:
blank_finish = ((end > start)
and not self.data[end - 1].strip())
break
stripped = line.lstrip()
if not stripped: # blank line
if until_blank:
blank_finish = 1
break
elif block_indent is None:
line_indent = len(line) - len(stripped)
if indent is None:
indent = line_indent
else:
indent = min(indent, line_indent)
end += 1
else:
blank_finish = 1 # block ends at end of lines
block = self[start:end]
if first_indent is not None and block:
block.data[0] = block.data[0][first_indent:]
if indent and strip_indent:
block.trim_left(indent, start=(first_indent is not None))
return block, indent or 0, blank_finish
def get_2D_block(self, top, left, bottom, right, strip_indent=1):
block = self[top:bottom]
indent = right
for i in range(len(block.data)):
block.data[i] = line = block.data[i][left:right].rstrip()
if line:
indent = min(indent, len(line) - len(line.lstrip()))
if strip_indent and 0 < indent < right:
block.data = [line[indent:] for line in block.data]
return block
def pad_double_width(self, pad_char):
"""
Pad all double-width characters in self by appending `pad_char` to each.
For East Asian language support.
"""
if hasattr(unicodedata, 'east_asian_width'):
east_asian_width = unicodedata.east_asian_width
else:
return # new in Python 2.4
for i in range(len(self.data)):
line = self.data[i]
if isinstance(line, unicode):
new = []
for char in line:
new.append(char)
if east_asian_width(char) in 'WF': # 'W'ide & 'F'ull-width
new.append(pad_char)
self.data[i] = ''.join(new)
def replace(self, old, new):
"""Replace all occurrences of substring `old` with `new`."""
for i in range(len(self.data)):
self.data[i] = self.data[i].replace(old, new)
class StateMachineError(Exception): pass
class UnknownStateError(StateMachineError): pass
class DuplicateStateError(StateMachineError): pass
class UnknownTransitionError(StateMachineError): pass
class DuplicateTransitionError(StateMachineError): pass
class TransitionPatternNotFound(StateMachineError): pass
class TransitionMethodNotFound(StateMachineError): pass
class UnexpectedIndentationError(StateMachineError): pass
class TransitionCorrection(Exception):
"""
Raise from within a transition method to switch to another transition.
Raise with one argument, the new transition name.
"""
class StateCorrection(Exception):
"""
Raise from within a transition method to switch to another state.
Raise with one or two arguments: new state name, and an optional new
transition name.
"""
def string2lines(astring, tab_width=8, convert_whitespace=0,
whitespace=re.compile('[\v\f]')):
"""
Return a list of one-line strings with tabs expanded, no newlines, and
trailing whitespace stripped.
Each tab is expanded with between 1 and `tab_width` spaces, so that the
next character's index becomes a multiple of `tab_width` (8 by default).
Parameters:
- `astring`: a multi-line string.
- `tab_width`: the number of columns between tab stops.
- `convert_whitespace`: convert form feeds and vertical tabs to spaces?
"""
if convert_whitespace:
astring = whitespace.sub(' ', astring)
return [s.expandtabs(tab_width).rstrip() for s in astring.splitlines()]
def _exception_data():
"""
Return exception information:
- the exception's class name;
- the exception object;
- the name of the file containing the offending code;
- the line number of the offending code;
- the function name of the offending code.
"""
type, value, traceback = sys.exc_info()
while traceback.tb_next:
traceback = traceback.tb_next
code = traceback.tb_frame.f_code
return (type.__name__, value, code.co_filename, traceback.tb_lineno,
code.co_name)
| {
"repo_name": "akiokio/centralfitestoque",
"path": "src/.pycharm_helpers/docutils/statemachine.py",
"copies": "1",
"size": "57006",
"license": "bsd-2-clause",
"hash": 8387044142694240000,
"line_mean": 36.3809836066,
"line_max": 80,
"alpha_frac": 0.5857453601,
"autogenerated": false,
"ratio": 4.385076923076923,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0004301106649088642,
"num_lines": 1525
} |
# $Id: statemachine.py 7037 2011-05-19 08:56:27Z milde $
# Author: David Goodger <goodger@python.org>
# Copyright: This module has been placed in the public domain.
"""
A finite state machine specialized for regular-expression-based text filters,
this module defines the following classes:
- `StateMachine`, a state machine
- `State`, a state superclass
- `StateMachineWS`, a whitespace-sensitive version of `StateMachine`
- `StateWS`, a state superclass for use with `StateMachineWS`
- `SearchStateMachine`, uses `re.search()` instead of `re.match()`
- `SearchStateMachineWS`, uses `re.search()` instead of `re.match()`
- `ViewList`, extends standard Python lists.
- `StringList`, string-specific ViewList.
Exception classes:
- `StateMachineError`
- `UnknownStateError`
- `DuplicateStateError`
- `UnknownTransitionError`
- `DuplicateTransitionError`
- `TransitionPatternNotFound`
- `TransitionMethodNotFound`
- `UnexpectedIndentationError`
- `TransitionCorrection`: Raised to switch to another transition.
- `StateCorrection`: Raised to switch to another state & transition.
Functions:
- `string2lines()`: split a multi-line string into a list of one-line strings
How To Use This Module
======================
(See the individual classes, methods, and attributes for details.)
1. Import it: ``import statemachine`` or ``from statemachine import ...``.
You will also need to ``import re``.
2. Derive a subclass of `State` (or `StateWS`) for each state in your state
machine::
class MyState(statemachine.State):
Within the state's class definition:
a) Include a pattern for each transition, in `State.patterns`::
patterns = {'atransition': r'pattern', ...}
b) Include a list of initial transitions to be set up automatically, in
`State.initial_transitions`::
initial_transitions = ['atransition', ...]
c) Define a method for each transition, with the same name as the
transition pattern::
def atransition(self, match, context, next_state):
# do something
result = [...] # a list
return context, next_state, result
# context, next_state may be altered
Transition methods may raise an `EOFError` to cut processing short.
d) You may wish to override the `State.bof()` and/or `State.eof()` implicit
transition methods, which handle the beginning- and end-of-file.
e) In order to handle nested processing, you may wish to override the
attributes `State.nested_sm` and/or `State.nested_sm_kwargs`.
If you are using `StateWS` as a base class, in order to handle nested
indented blocks, you may wish to:
- override the attributes `StateWS.indent_sm`,
`StateWS.indent_sm_kwargs`, `StateWS.known_indent_sm`, and/or
`StateWS.known_indent_sm_kwargs`;
- override the `StateWS.blank()` method; and/or
- override or extend the `StateWS.indent()`, `StateWS.known_indent()`,
and/or `StateWS.firstknown_indent()` methods.
3. Create a state machine object::
sm = StateMachine(state_classes=[MyState, ...],
initial_state='MyState')
4. Obtain the input text, which needs to be converted into a tab-free list of
one-line strings. For example, to read text from a file called
'inputfile'::
input_string = open('inputfile').read()
input_lines = statemachine.string2lines(input_string)
5. Run the state machine on the input text and collect the results, a list::
results = sm.run(input_lines)
6. Remove any lingering circular references::
sm.unlink()
"""
__docformat__ = 'restructuredtext'
import sys
import re
import types
import unicodedata
from docutils.error_reporting import ErrorOutput
class StateMachine:
"""
A finite state machine for text filters using regular expressions.
The input is provided in the form of a list of one-line strings (no
newlines). States are subclasses of the `State` class. Transitions consist
of regular expression patterns and transition methods, and are defined in
each state.
The state machine is started with the `run()` method, which returns the
results of processing in a list.
"""
def __init__(self, state_classes, initial_state, debug=0):
"""
Initialize a `StateMachine` object; add state objects.
Parameters:
- `state_classes`: a list of `State` (sub)classes.
- `initial_state`: a string, the class name of the initial state.
- `debug`: a boolean; produce verbose output if true (nonzero).
"""
self.input_lines = None
"""`StringList` of input lines (without newlines).
Filled by `self.run()`."""
self.input_offset = 0
"""Offset of `self.input_lines` from the beginning of the file."""
self.line = None
"""Current input line."""
self.line_offset = -1
"""Current input line offset from beginning of `self.input_lines`."""
self.debug = debug
"""Debugging mode on/off."""
self.initial_state = initial_state
"""The name of the initial state (key to `self.states`)."""
self.current_state = initial_state
"""The name of the current state (key to `self.states`)."""
self.states = {}
"""Mapping of {state_name: State_object}."""
self.add_states(state_classes)
self.observers = []
"""List of bound methods or functions to call whenever the current
line changes. Observers are called with one argument, ``self``.
Cleared at the end of `run()`."""
self._stderr = ErrorOutput()
"""Wrapper around sys.stderr catching en-/decoding errors"""
def unlink(self):
"""Remove circular references to objects no longer required."""
for state in self.states.values():
state.unlink()
self.states = None
def run(self, input_lines, input_offset=0, context=None,
input_source=None, initial_state=None):
"""
Run the state machine on `input_lines`. Return results (a list).
Reset `self.line_offset` and `self.current_state`. Run the
beginning-of-file transition. Input one line at a time and check for a
matching transition. If a match is found, call the transition method
and possibly change the state. Store the context returned by the
transition method to be passed on to the next transition matched.
Accumulate the results returned by the transition methods in a list.
Run the end-of-file transition. Finally, return the accumulated
results.
Parameters:
- `input_lines`: a list of strings without newlines, or `StringList`.
- `input_offset`: the line offset of `input_lines` from the beginning
of the file.
- `context`: application-specific storage.
- `input_source`: name or path of source of `input_lines`.
- `initial_state`: name of initial state.
"""
self.runtime_init()
if isinstance(input_lines, StringList):
self.input_lines = input_lines
else:
self.input_lines = StringList(input_lines, source=input_source)
self.input_offset = input_offset
self.line_offset = -1
self.current_state = initial_state or self.initial_state
if self.debug:
print >>self._stderr, (
u'\nStateMachine.run: input_lines (line_offset=%s):\n| %s'
% (self.line_offset, u'\n| '.join(self.input_lines)))
transitions = None
results = []
state = self.get_state()
try:
if self.debug:
print >>self._stderr, '\nStateMachine.run: bof transition'
context, result = state.bof(context)
results.extend(result)
while 1:
try:
try:
self.next_line()
if self.debug:
source, offset = self.input_lines.info(
self.line_offset)
print >>self._stderr, (
u'\nStateMachine.run: line (source=%r, '
u'offset=%r):\n| %s'
% (source, offset, self.line))
context, next_state, result = self.check_line(
context, state, transitions)
except EOFError:
if self.debug:
print >>self._stderr, (
'\nStateMachine.run: %s.eof transition'
% state.__class__.__name__)
result = state.eof(context)
results.extend(result)
break
else:
results.extend(result)
except TransitionCorrection, exception:
self.previous_line() # back up for another try
transitions = (exception.args[0],)
if self.debug:
print >>self._stderr, (
'\nStateMachine.run: TransitionCorrection to '
'state "%s", transition %s.'
% (state.__class__.__name__, transitions[0]))
continue
except StateCorrection, exception:
self.previous_line() # back up for another try
next_state = exception.args[0]
if len(exception.args) == 1:
transitions = None
else:
transitions = (exception.args[1],)
if self.debug:
print >>self._stderr, (
'\nStateMachine.run: StateCorrection to state '
'"%s", transition %s.'
% (next_state, transitions[0]))
else:
transitions = None
state = self.get_state(next_state)
except:
if self.debug:
self.error()
raise
self.observers = []
return results
def get_state(self, next_state=None):
"""
Return current state object; set it first if `next_state` given.
Parameter `next_state`: a string, the name of the next state.
Exception: `UnknownStateError` raised if `next_state` unknown.
"""
if next_state:
if self.debug and next_state != self.current_state:
print >>self._stderr, (
'\nStateMachine.get_state: Changing state from '
'"%s" to "%s" (input line %s).'
% (self.current_state, next_state,
self.abs_line_number()))
self.current_state = next_state
try:
return self.states[self.current_state]
except KeyError:
raise UnknownStateError(self.current_state)
def next_line(self, n=1):
"""Load `self.line` with the `n`'th next line and return it."""
try:
try:
self.line_offset += n
self.line = self.input_lines[self.line_offset]
except IndexError:
self.line = None
raise EOFError
return self.line
finally:
self.notify_observers()
def is_next_line_blank(self):
"""Return 1 if the next line is blank or non-existant."""
try:
return not self.input_lines[self.line_offset + 1].strip()
except IndexError:
return 1
def at_eof(self):
"""Return 1 if the input is at or past end-of-file."""
return self.line_offset >= len(self.input_lines) - 1
def at_bof(self):
"""Return 1 if the input is at or before beginning-of-file."""
return self.line_offset <= 0
def previous_line(self, n=1):
"""Load `self.line` with the `n`'th previous line and return it."""
self.line_offset -= n
if self.line_offset < 0:
self.line = None
else:
self.line = self.input_lines[self.line_offset]
self.notify_observers()
return self.line
def goto_line(self, line_offset):
"""Jump to absolute line offset `line_offset`, load and return it."""
try:
try:
self.line_offset = line_offset - self.input_offset
self.line = self.input_lines[self.line_offset]
except IndexError:
self.line = None
raise EOFError
return self.line
finally:
self.notify_observers()
def get_source(self, line_offset):
"""Return source of line at absolute line offset `line_offset`."""
return self.input_lines.source(line_offset - self.input_offset)
def abs_line_offset(self):
"""Return line offset of current line, from beginning of file."""
return self.line_offset + self.input_offset
def abs_line_number(self):
"""Return line number of current line (counting from 1)."""
return self.line_offset + self.input_offset + 1
def get_source_and_line(self, lineno=None):
"""Return (source, line) tuple for current or given line number.
Looks up the source and line number in the `self.input_lines`
StringList instance to count for included source files.
If the optional argument `lineno` is given, convert it from an
absolute line number to the corresponding (source, line) pair.
"""
if lineno is None:
offset = self.line_offset
else:
offset = lineno - self.input_offset - 1
try:
src, srcoffset = self.input_lines.info(offset)
srcline = srcoffset + 1
except (TypeError):
# line is None if index is "Just past the end"
src, srcline = self.get_source_and_line(offset + self.input_offset)
return src, srcline + 1
except (IndexError): # `offset` is off the list
src, srcline = None, None
# raise AssertionError('cannot find line %d in %s lines' %
# (offset, len(self.input_lines)))
# # list(self.input_lines.lines())))
# assert offset == srcoffset, str(self.input_lines)
# print "get_source_and_line(%s):" % lineno,
# print offset + 1, '->', src, srcline
# print self.input_lines
return (src, srcline)
def insert_input(self, input_lines, source):
self.input_lines.insert(self.line_offset + 1, '',
source='internal padding after '+source,
offset=len(input_lines))
self.input_lines.insert(self.line_offset + 1, '',
source='internal padding before '+source,
offset=-1)
self.input_lines.insert(self.line_offset + 2,
StringList(input_lines, source))
def get_text_block(self, flush_left=0):
"""
Return a contiguous block of text.
If `flush_left` is true, raise `UnexpectedIndentationError` if an
indented line is encountered before the text block ends (with a blank
line).
"""
try:
block = self.input_lines.get_text_block(self.line_offset,
flush_left)
self.next_line(len(block) - 1)
return block
except UnexpectedIndentationError, error:
block, source, lineno = error.args
self.next_line(len(block) - 1) # advance to last line of block
raise
def check_line(self, context, state, transitions=None):
"""
Examine one line of input for a transition match & execute its method.
Parameters:
- `context`: application-dependent storage.
- `state`: a `State` object, the current state.
- `transitions`: an optional ordered list of transition names to try,
instead of ``state.transition_order``.
Return the values returned by the transition method:
- context: possibly modified from the parameter `context`;
- next state name (`State` subclass name);
- the result output of the transition, a list.
When there is no match, ``state.no_match()`` is called and its return
value is returned.
"""
if transitions is None:
transitions = state.transition_order
state_correction = None
if self.debug:
print >>self._stderr, (
'\nStateMachine.check_line: state="%s", transitions=%r.'
% (state.__class__.__name__, transitions))
for name in transitions:
pattern, method, next_state = state.transitions[name]
match = pattern.match(self.line)
if match:
if self.debug:
print >>self._stderr, (
'\nStateMachine.check_line: Matched transition '
'"%s" in state "%s".'
% (name, state.__class__.__name__))
return method(match, context, next_state)
else:
if self.debug:
print >>self._stderr, (
'\nStateMachine.check_line: No match in state "%s".'
% state.__class__.__name__)
return state.no_match(context, transitions)
def add_state(self, state_class):
"""
Initialize & add a `state_class` (`State` subclass) object.
Exception: `DuplicateStateError` raised if `state_class` was already
added.
"""
statename = state_class.__name__
if statename in self.states:
raise DuplicateStateError(statename)
self.states[statename] = state_class(self, self.debug)
def add_states(self, state_classes):
"""
Add `state_classes` (a list of `State` subclasses).
"""
for state_class in state_classes:
self.add_state(state_class)
def runtime_init(self):
"""
Initialize `self.states`.
"""
for state in self.states.values():
state.runtime_init()
def error(self):
"""Report error details."""
type, value, module, line, function = _exception_data()
print >>self._stderr, u'%s: %s' % (type, value)
print >>self._stderr, 'input line %s' % (self.abs_line_number())
print >>self._stderr, (u'module %s, line %s, function %s' %
(module, line, function))
def attach_observer(self, observer):
"""
The `observer` parameter is a function or bound method which takes two
arguments, the source and offset of the current line.
"""
self.observers.append(observer)
def detach_observer(self, observer):
self.observers.remove(observer)
def notify_observers(self):
for observer in self.observers:
try:
info = self.input_lines.info(self.line_offset)
except IndexError:
info = (None, None)
observer(*info)
class State:
"""
State superclass. Contains a list of transitions, and transition methods.
Transition methods all have the same signature. They take 3 parameters:
- An `re` match object. ``match.string`` contains the matched input line,
``match.start()`` gives the start index of the match, and
``match.end()`` gives the end index.
- A context object, whose meaning is application-defined (initial value
``None``). It can be used to store any information required by the state
machine, and the retured context is passed on to the next transition
method unchanged.
- The name of the next state, a string, taken from the transitions list;
normally it is returned unchanged, but it may be altered by the
transition method if necessary.
Transition methods all return a 3-tuple:
- A context object, as (potentially) modified by the transition method.
- The next state name (a return value of ``None`` means no state change).
- The processing result, a list, which is accumulated by the state
machine.
Transition methods may raise an `EOFError` to cut processing short.
There are two implicit transitions, and corresponding transition methods
are defined: `bof()` handles the beginning-of-file, and `eof()` handles
the end-of-file. These methods have non-standard signatures and return
values. `bof()` returns the initial context and results, and may be used
to return a header string, or do any other processing needed. `eof()`
should handle any remaining context and wrap things up; it returns the
final processing result.
Typical applications need only subclass `State` (or a subclass), set the
`patterns` and `initial_transitions` class attributes, and provide
corresponding transition methods. The default object initialization will
take care of constructing the list of transitions.
"""
patterns = None
"""
{Name: pattern} mapping, used by `make_transition()`. Each pattern may
be a string or a compiled `re` pattern. Override in subclasses.
"""
initial_transitions = None
"""
A list of transitions to initialize when a `State` is instantiated.
Each entry is either a transition name string, or a (transition name, next
state name) pair. See `make_transitions()`. Override in subclasses.
"""
nested_sm = None
"""
The `StateMachine` class for handling nested processing.
If left as ``None``, `nested_sm` defaults to the class of the state's
controlling state machine. Override it in subclasses to avoid the default.
"""
nested_sm_kwargs = None
"""
Keyword arguments dictionary, passed to the `nested_sm` constructor.
Two keys must have entries in the dictionary:
- Key 'state_classes' must be set to a list of `State` classes.
- Key 'initial_state' must be set to the name of the initial state class.
If `nested_sm_kwargs` is left as ``None``, 'state_classes' defaults to the
class of the current state, and 'initial_state' defaults to the name of
the class of the current state. Override in subclasses to avoid the
defaults.
"""
def __init__(self, state_machine, debug=0):
"""
Initialize a `State` object; make & add initial transitions.
Parameters:
- `statemachine`: the controlling `StateMachine` object.
- `debug`: a boolean; produce verbose output if true (nonzero).
"""
self.transition_order = []
"""A list of transition names in search order."""
self.transitions = {}
"""
A mapping of transition names to 3-tuples containing
(compiled_pattern, transition_method, next_state_name). Initialized as
an instance attribute dynamically (instead of as a class attribute)
because it may make forward references to patterns and methods in this
or other classes.
"""
self.add_initial_transitions()
self.state_machine = state_machine
"""A reference to the controlling `StateMachine` object."""
self.debug = debug
"""Debugging mode on/off."""
if self.nested_sm is None:
self.nested_sm = self.state_machine.__class__
if self.nested_sm_kwargs is None:
self.nested_sm_kwargs = {'state_classes': [self.__class__],
'initial_state': self.__class__.__name__}
def runtime_init(self):
"""
Initialize this `State` before running the state machine; called from
`self.state_machine.run()`.
"""
pass
def unlink(self):
"""Remove circular references to objects no longer required."""
self.state_machine = None
def add_initial_transitions(self):
"""Make and add transitions listed in `self.initial_transitions`."""
if self.initial_transitions:
names, transitions = self.make_transitions(
self.initial_transitions)
self.add_transitions(names, transitions)
def add_transitions(self, names, transitions):
"""
Add a list of transitions to the start of the transition list.
Parameters:
- `names`: a list of transition names.
- `transitions`: a mapping of names to transition tuples.
Exceptions: `DuplicateTransitionError`, `UnknownTransitionError`.
"""
for name in names:
if name in self.transitions:
raise DuplicateTransitionError(name)
if name not in transitions:
raise UnknownTransitionError(name)
self.transition_order[:0] = names
self.transitions.update(transitions)
def add_transition(self, name, transition):
"""
Add a transition to the start of the transition list.
Parameter `transition`: a ready-made transition 3-tuple.
Exception: `DuplicateTransitionError`.
"""
if name in self.transitions:
raise DuplicateTransitionError(name)
self.transition_order[:0] = [name]
self.transitions[name] = transition
def remove_transition(self, name):
"""
Remove a transition by `name`.
Exception: `UnknownTransitionError`.
"""
try:
del self.transitions[name]
self.transition_order.remove(name)
except:
raise UnknownTransitionError(name)
def make_transition(self, name, next_state=None):
"""
Make & return a transition tuple based on `name`.
This is a convenience function to simplify transition creation.
Parameters:
- `name`: a string, the name of the transition pattern & method. This
`State` object must have a method called '`name`', and a dictionary
`self.patterns` containing a key '`name`'.
- `next_state`: a string, the name of the next `State` object for this
transition. A value of ``None`` (or absent) implies no state change
(i.e., continue with the same state).
Exceptions: `TransitionPatternNotFound`, `TransitionMethodNotFound`.
"""
if next_state is None:
next_state = self.__class__.__name__
try:
pattern = self.patterns[name]
if not hasattr(pattern, 'match'):
pattern = re.compile(pattern)
except KeyError:
raise TransitionPatternNotFound(
'%s.patterns[%r]' % (self.__class__.__name__, name))
try:
method = getattr(self, name)
except AttributeError:
raise TransitionMethodNotFound(
'%s.%s' % (self.__class__.__name__, name))
return (pattern, method, next_state)
def make_transitions(self, name_list):
"""
Return a list of transition names and a transition mapping.
Parameter `name_list`: a list, where each entry is either a transition
name string, or a 1- or 2-tuple (transition name, optional next state
name).
"""
stringtype = type('')
names = []
transitions = {}
for namestate in name_list:
if type(namestate) is stringtype:
transitions[namestate] = self.make_transition(namestate)
names.append(namestate)
else:
transitions[namestate[0]] = self.make_transition(*namestate)
names.append(namestate[0])
return names, transitions
def no_match(self, context, transitions):
"""
Called when there is no match from `StateMachine.check_line()`.
Return the same values returned by transition methods:
- context: unchanged;
- next state name: ``None``;
- empty result list.
Override in subclasses to catch this event.
"""
return context, None, []
def bof(self, context):
"""
Handle beginning-of-file. Return unchanged `context`, empty result.
Override in subclasses.
Parameter `context`: application-defined storage.
"""
return context, []
def eof(self, context):
"""
Handle end-of-file. Return empty result.
Override in subclasses.
Parameter `context`: application-defined storage.
"""
return []
def nop(self, match, context, next_state):
"""
A "do nothing" transition method.
Return unchanged `context` & `next_state`, empty result. Useful for
simple state changes (actionless transitions).
"""
return context, next_state, []
class StateMachineWS(StateMachine):
"""
`StateMachine` subclass specialized for whitespace recognition.
There are three methods provided for extracting indented text blocks:
- `get_indented()`: use when the indent is unknown.
- `get_known_indented()`: use when the indent is known for all lines.
- `get_first_known_indented()`: use when only the first line's indent is
known.
"""
def get_indented(self, until_blank=0, strip_indent=1):
"""
Return a block of indented lines of text, and info.
Extract an indented block where the indent is unknown for all lines.
:Parameters:
- `until_blank`: Stop collecting at the first blank line if true
(1).
- `strip_indent`: Strip common leading indent if true (1,
default).
:Return:
- the indented block (a list of lines of text),
- its indent,
- its first line offset from BOF, and
- whether or not it finished with a blank line.
"""
offset = self.abs_line_offset()
indented, indent, blank_finish = self.input_lines.get_indented(
self.line_offset, until_blank, strip_indent)
if indented:
self.next_line(len(indented) - 1) # advance to last indented line
while indented and not indented[0].strip():
indented.trim_start()
offset += 1
return indented, indent, offset, blank_finish
def get_known_indented(self, indent, until_blank=0, strip_indent=1):
"""
Return an indented block and info.
Extract an indented block where the indent is known for all lines.
Starting with the current line, extract the entire text block with at
least `indent` indentation (which must be whitespace, except for the
first line).
:Parameters:
- `indent`: The number of indent columns/characters.
- `until_blank`: Stop collecting at the first blank line if true
(1).
- `strip_indent`: Strip `indent` characters of indentation if true
(1, default).
:Return:
- the indented block,
- its first line offset from BOF, and
- whether or not it finished with a blank line.
"""
offset = self.abs_line_offset()
indented, indent, blank_finish = self.input_lines.get_indented(
self.line_offset, until_blank, strip_indent,
block_indent=indent)
self.next_line(len(indented) - 1) # advance to last indented line
while indented and not indented[0].strip():
indented.trim_start()
offset += 1
return indented, offset, blank_finish
def get_first_known_indented(self, indent, until_blank=0, strip_indent=1,
strip_top=1):
"""
Return an indented block and info.
Extract an indented block where the indent is known for the first line
and unknown for all other lines.
:Parameters:
- `indent`: The first line's indent (# of columns/characters).
- `until_blank`: Stop collecting at the first blank line if true
(1).
- `strip_indent`: Strip `indent` characters of indentation if true
(1, default).
- `strip_top`: Strip blank lines from the beginning of the block.
:Return:
- the indented block,
- its indent,
- its first line offset from BOF, and
- whether or not it finished with a blank line.
"""
offset = self.abs_line_offset()
indented, indent, blank_finish = self.input_lines.get_indented(
self.line_offset, until_blank, strip_indent,
first_indent=indent)
self.next_line(len(indented) - 1) # advance to last indented line
if strip_top:
while indented and not indented[0].strip():
indented.trim_start()
offset += 1
return indented, indent, offset, blank_finish
class StateWS(State):
"""
State superclass specialized for whitespace (blank lines & indents).
Use this class with `StateMachineWS`. The transitions 'blank' (for blank
lines) and 'indent' (for indented text blocks) are added automatically,
before any other transitions. The transition method `blank()` handles
blank lines and `indent()` handles nested indented blocks. Indented
blocks trigger a new state machine to be created by `indent()` and run.
The class of the state machine to be created is in `indent_sm`, and the
constructor keyword arguments are in the dictionary `indent_sm_kwargs`.
The methods `known_indent()` and `firstknown_indent()` are provided for
indented blocks where the indent (all lines' and first line's only,
respectively) is known to the transition method, along with the attributes
`known_indent_sm` and `known_indent_sm_kwargs`. Neither transition method
is triggered automatically.
"""
indent_sm = None
"""
The `StateMachine` class handling indented text blocks.
If left as ``None``, `indent_sm` defaults to the value of
`State.nested_sm`. Override it in subclasses to avoid the default.
"""
indent_sm_kwargs = None
"""
Keyword arguments dictionary, passed to the `indent_sm` constructor.
If left as ``None``, `indent_sm_kwargs` defaults to the value of
`State.nested_sm_kwargs`. Override it in subclasses to avoid the default.
"""
known_indent_sm = None
"""
The `StateMachine` class handling known-indented text blocks.
If left as ``None``, `known_indent_sm` defaults to the value of
`indent_sm`. Override it in subclasses to avoid the default.
"""
known_indent_sm_kwargs = None
"""
Keyword arguments dictionary, passed to the `known_indent_sm` constructor.
If left as ``None``, `known_indent_sm_kwargs` defaults to the value of
`indent_sm_kwargs`. Override it in subclasses to avoid the default.
"""
ws_patterns = {'blank': ' *$',
'indent': ' +'}
"""Patterns for default whitespace transitions. May be overridden in
subclasses."""
ws_initial_transitions = ('blank', 'indent')
"""Default initial whitespace transitions, added before those listed in
`State.initial_transitions`. May be overridden in subclasses."""
def __init__(self, state_machine, debug=0):
"""
Initialize a `StateSM` object; extends `State.__init__()`.
Check for indent state machine attributes, set defaults if not set.
"""
State.__init__(self, state_machine, debug)
if self.indent_sm is None:
self.indent_sm = self.nested_sm
if self.indent_sm_kwargs is None:
self.indent_sm_kwargs = self.nested_sm_kwargs
if self.known_indent_sm is None:
self.known_indent_sm = self.indent_sm
if self.known_indent_sm_kwargs is None:
self.known_indent_sm_kwargs = self.indent_sm_kwargs
def add_initial_transitions(self):
"""
Add whitespace-specific transitions before those defined in subclass.
Extends `State.add_initial_transitions()`.
"""
State.add_initial_transitions(self)
if self.patterns is None:
self.patterns = {}
self.patterns.update(self.ws_patterns)
names, transitions = self.make_transitions(
self.ws_initial_transitions)
self.add_transitions(names, transitions)
def blank(self, match, context, next_state):
"""Handle blank lines. Does nothing. Override in subclasses."""
return self.nop(match, context, next_state)
def indent(self, match, context, next_state):
"""
Handle an indented text block. Extend or override in subclasses.
Recursively run the registered state machine for indented blocks
(`self.indent_sm`).
"""
indented, indent, line_offset, blank_finish = \
self.state_machine.get_indented()
sm = self.indent_sm(debug=self.debug, **self.indent_sm_kwargs)
results = sm.run(indented, input_offset=line_offset)
return context, next_state, results
def known_indent(self, match, context, next_state):
"""
Handle a known-indent text block. Extend or override in subclasses.
Recursively run the registered state machine for known-indent indented
blocks (`self.known_indent_sm`). The indent is the length of the
match, ``match.end()``.
"""
indented, line_offset, blank_finish = \
self.state_machine.get_known_indented(match.end())
sm = self.known_indent_sm(debug=self.debug,
**self.known_indent_sm_kwargs)
results = sm.run(indented, input_offset=line_offset)
return context, next_state, results
def first_known_indent(self, match, context, next_state):
"""
Handle an indented text block (first line's indent known).
Extend or override in subclasses.
Recursively run the registered state machine for known-indent indented
blocks (`self.known_indent_sm`). The indent is the length of the
match, ``match.end()``.
"""
indented, line_offset, blank_finish = \
self.state_machine.get_first_known_indented(match.end())
sm = self.known_indent_sm(debug=self.debug,
**self.known_indent_sm_kwargs)
results = sm.run(indented, input_offset=line_offset)
return context, next_state, results
class _SearchOverride:
"""
Mix-in class to override `StateMachine` regular expression behavior.
Changes regular expression matching, from the default `re.match()`
(succeeds only if the pattern matches at the start of `self.line`) to
`re.search()` (succeeds if the pattern matches anywhere in `self.line`).
When subclassing a `StateMachine`, list this class **first** in the
inheritance list of the class definition.
"""
def match(self, pattern):
"""
Return the result of a regular expression search.
Overrides `StateMachine.match()`.
Parameter `pattern`: `re` compiled regular expression.
"""
return pattern.search(self.line)
class SearchStateMachine(_SearchOverride, StateMachine):
"""`StateMachine` which uses `re.search()` instead of `re.match()`."""
pass
class SearchStateMachineWS(_SearchOverride, StateMachineWS):
"""`StateMachineWS` which uses `re.search()` instead of `re.match()`."""
pass
class ViewList:
"""
List with extended functionality: slices of ViewList objects are child
lists, linked to their parents. Changes made to a child list also affect
the parent list. A child list is effectively a "view" (in the SQL sense)
of the parent list. Changes to parent lists, however, do *not* affect
active child lists. If a parent list is changed, any active child lists
should be recreated.
The start and end of the slice can be trimmed using the `trim_start()` and
`trim_end()` methods, without affecting the parent list. The link between
child and parent lists can be broken by calling `disconnect()` on the
child list.
Also, ViewList objects keep track of the source & offset of each item.
This information is accessible via the `source()`, `offset()`, and
`info()` methods.
"""
def __init__(self, initlist=None, source=None, items=None,
parent=None, parent_offset=None):
self.data = []
"""The actual list of data, flattened from various sources."""
self.items = []
"""A list of (source, offset) pairs, same length as `self.data`: the
source of each line and the offset of each line from the beginning of
its source."""
self.parent = parent
"""The parent list."""
self.parent_offset = parent_offset
"""Offset of this list from the beginning of the parent list."""
if isinstance(initlist, ViewList):
self.data = initlist.data[:]
self.items = initlist.items[:]
elif initlist is not None:
self.data = list(initlist)
if items:
self.items = items
else:
self.items = [(source, i) for i in range(len(initlist))]
assert len(self.data) == len(self.items), 'data mismatch'
def __str__(self):
return str(self.data)
def __repr__(self):
return '%s(%s, items=%s)' % (self.__class__.__name__,
self.data, self.items)
def __lt__(self, other): return self.data < self.__cast(other)
def __le__(self, other): return self.data <= self.__cast(other)
def __eq__(self, other): return self.data == self.__cast(other)
def __ne__(self, other): return self.data != self.__cast(other)
def __gt__(self, other): return self.data > self.__cast(other)
def __ge__(self, other): return self.data >= self.__cast(other)
def __cmp__(self, other): return cmp(self.data, self.__cast(other))
def __cast(self, other):
if isinstance(other, ViewList):
return other.data
else:
return other
def __contains__(self, item): return item in self.data
def __len__(self): return len(self.data)
# The __getitem__()/__setitem__() methods check whether the index
# is a slice first, since indexing a native list with a slice object
# just works.
def __getitem__(self, i):
if isinstance(i, types.SliceType):
assert i.step in (None, 1), 'cannot handle slice with stride'
return self.__class__(self.data[i.start:i.stop],
items=self.items[i.start:i.stop],
parent=self, parent_offset=i.start or 0)
else:
return self.data[i]
def __setitem__(self, i, item):
if isinstance(i, types.SliceType):
assert i.step in (None, 1), 'cannot handle slice with stride'
if not isinstance(item, ViewList):
raise TypeError('assigning non-ViewList to ViewList slice')
self.data[i.start:i.stop] = item.data
self.items[i.start:i.stop] = item.items
assert len(self.data) == len(self.items), 'data mismatch'
if self.parent:
self.parent[(i.start or 0) + self.parent_offset
: (i.stop or len(self)) + self.parent_offset] = item
else:
self.data[i] = item
if self.parent:
self.parent[i + self.parent_offset] = item
def __delitem__(self, i):
try:
del self.data[i]
del self.items[i]
if self.parent:
del self.parent[i + self.parent_offset]
except TypeError:
assert i.step is None, 'cannot handle slice with stride'
del self.data[i.start:i.stop]
del self.items[i.start:i.stop]
if self.parent:
del self.parent[(i.start or 0) + self.parent_offset
: (i.stop or len(self)) + self.parent_offset]
def __add__(self, other):
if isinstance(other, ViewList):
return self.__class__(self.data + other.data,
items=(self.items + other.items))
else:
raise TypeError('adding non-ViewList to a ViewList')
def __radd__(self, other):
if isinstance(other, ViewList):
return self.__class__(other.data + self.data,
items=(other.items + self.items))
else:
raise TypeError('adding ViewList to a non-ViewList')
def __iadd__(self, other):
if isinstance(other, ViewList):
self.data += other.data
else:
raise TypeError('argument to += must be a ViewList')
return self
def __mul__(self, n):
return self.__class__(self.data * n, items=(self.items * n))
__rmul__ = __mul__
def __imul__(self, n):
self.data *= n
self.items *= n
return self
def extend(self, other):
if not isinstance(other, ViewList):
raise TypeError('extending a ViewList with a non-ViewList')
if self.parent:
self.parent.insert(len(self.data) + self.parent_offset, other)
self.data.extend(other.data)
self.items.extend(other.items)
def append(self, item, source=None, offset=0):
if source is None:
self.extend(item)
else:
if self.parent:
self.parent.insert(len(self.data) + self.parent_offset, item,
source, offset)
self.data.append(item)
self.items.append((source, offset))
def insert(self, i, item, source=None, offset=0):
if source is None:
if not isinstance(item, ViewList):
raise TypeError('inserting non-ViewList with no source given')
self.data[i:i] = item.data
self.items[i:i] = item.items
if self.parent:
index = (len(self.data) + i) % len(self.data)
self.parent.insert(index + self.parent_offset, item)
else:
self.data.insert(i, item)
self.items.insert(i, (source, offset))
if self.parent:
index = (len(self.data) + i) % len(self.data)
self.parent.insert(index + self.parent_offset, item,
source, offset)
def pop(self, i=-1):
if self.parent:
index = (len(self.data) + i) % len(self.data)
self.parent.pop(index + self.parent_offset)
self.items.pop(i)
return self.data.pop(i)
def trim_start(self, n=1):
"""
Remove items from the start of the list, without touching the parent.
"""
if n > len(self.data):
raise IndexError("Size of trim too large; can't trim %s items "
"from a list of size %s." % (n, len(self.data)))
elif n < 0:
raise IndexError('Trim size must be >= 0.')
del self.data[:n]
del self.items[:n]
if self.parent:
self.parent_offset += n
def trim_end(self, n=1):
"""
Remove items from the end of the list, without touching the parent.
"""
if n > len(self.data):
raise IndexError("Size of trim too large; can't trim %s items "
"from a list of size %s." % (n, len(self.data)))
elif n < 0:
raise IndexError('Trim size must be >= 0.')
del self.data[-n:]
del self.items[-n:]
def remove(self, item):
index = self.index(item)
del self[index]
def count(self, item): return self.data.count(item)
def index(self, item): return self.data.index(item)
def reverse(self):
self.data.reverse()
self.items.reverse()
self.parent = None
def sort(self, *args):
tmp = zip(self.data, self.items)
tmp.sort(*args)
self.data = [entry[0] for entry in tmp]
self.items = [entry[1] for entry in tmp]
self.parent = None
def info(self, i):
"""Return source & offset for index `i`."""
try:
return self.items[i]
except IndexError:
if i == len(self.data): # Just past the end
return self.items[i - 1][0], None
else:
raise
def source(self, i):
"""Return source for index `i`."""
return self.info(i)[0]
def offset(self, i):
"""Return offset for index `i`."""
return self.info(i)[1]
def disconnect(self):
"""Break link between this list and parent list."""
self.parent = None
def xitems(self):
"""Return iterator yielding (source, offset, value) tuples."""
for (value, (source, offset)) in zip(self.data, self.items):
yield (source, offset, value)
def pprint(self):
"""Print the list in `grep` format (`source:offset:value` lines)"""
for line in self.xitems():
print "%s:%d:%s" % line
class StringList(ViewList):
"""A `ViewList` with string-specific methods."""
def trim_left(self, length, start=0, end=sys.maxint):
"""
Trim `length` characters off the beginning of each item, in-place,
from index `start` to `end`. No whitespace-checking is done on the
trimmed text. Does not affect slice parent.
"""
self.data[start:end] = [line[length:]
for line in self.data[start:end]]
def get_text_block(self, start, flush_left=0):
"""
Return a contiguous block of text.
If `flush_left` is true, raise `UnexpectedIndentationError` if an
indented line is encountered before the text block ends (with a blank
line).
"""
end = start
last = len(self.data)
while end < last:
line = self.data[end]
if not line.strip():
break
if flush_left and (line[0] == ' '):
source, offset = self.info(end)
raise UnexpectedIndentationError(self[start:end], source,
offset + 1)
end += 1
return self[start:end]
def get_indented(self, start=0, until_blank=0, strip_indent=1,
block_indent=None, first_indent=None):
"""
Extract and return a StringList of indented lines of text.
Collect all lines with indentation, determine the minimum indentation,
remove the minimum indentation from all indented lines (unless
`strip_indent` is false), and return them. All lines up to but not
including the first unindented line will be returned.
:Parameters:
- `start`: The index of the first line to examine.
- `until_blank`: Stop collecting at the first blank line if true.
- `strip_indent`: Strip common leading indent if true (default).
- `block_indent`: The indent of the entire block, if known.
- `first_indent`: The indent of the first line, if known.
:Return:
- a StringList of indented lines with mininum indent removed;
- the amount of the indent;
- a boolean: did the indented block finish with a blank line or EOF?
"""
indent = block_indent # start with None if unknown
end = start
if block_indent is not None and first_indent is None:
first_indent = block_indent
if first_indent is not None:
end += 1
last = len(self.data)
while end < last:
line = self.data[end]
if line and (line[0] != ' '
or (block_indent is not None
and line[:block_indent].strip())):
# Line not indented or insufficiently indented.
# Block finished properly iff the last indented line blank:
blank_finish = ((end > start)
and not self.data[end - 1].strip())
break
stripped = line.lstrip()
if not stripped: # blank line
if until_blank:
blank_finish = 1
break
elif block_indent is None:
line_indent = len(line) - len(stripped)
if indent is None:
indent = line_indent
else:
indent = min(indent, line_indent)
end += 1
else:
blank_finish = 1 # block ends at end of lines
block = self[start:end]
if first_indent is not None and block:
block.data[0] = block.data[0][first_indent:]
if indent and strip_indent:
block.trim_left(indent, start=(first_indent is not None))
return block, indent or 0, blank_finish
def get_2D_block(self, top, left, bottom, right, strip_indent=1):
block = self[top:bottom]
indent = right
for i in range(len(block.data)):
block.data[i] = line = block.data[i][left:right].rstrip()
if line:
indent = min(indent, len(line) - len(line.lstrip()))
if strip_indent and 0 < indent < right:
block.data = [line[indent:] for line in block.data]
return block
def pad_double_width(self, pad_char):
"""
Pad all double-width characters in self by appending `pad_char` to each.
For East Asian language support.
"""
if hasattr(unicodedata, 'east_asian_width'):
east_asian_width = unicodedata.east_asian_width
else:
return # new in Python 2.4
for i in range(len(self.data)):
line = self.data[i]
if isinstance(line, unicode):
new = []
for char in line:
new.append(char)
if east_asian_width(char) in 'WF': # 'W'ide & 'F'ull-width
new.append(pad_char)
self.data[i] = ''.join(new)
def replace(self, old, new):
"""Replace all occurrences of substring `old` with `new`."""
for i in range(len(self.data)):
self.data[i] = self.data[i].replace(old, new)
class StateMachineError(Exception): pass
class UnknownStateError(StateMachineError): pass
class DuplicateStateError(StateMachineError): pass
class UnknownTransitionError(StateMachineError): pass
class DuplicateTransitionError(StateMachineError): pass
class TransitionPatternNotFound(StateMachineError): pass
class TransitionMethodNotFound(StateMachineError): pass
class UnexpectedIndentationError(StateMachineError): pass
class TransitionCorrection(Exception):
"""
Raise from within a transition method to switch to another transition.
Raise with one argument, the new transition name.
"""
class StateCorrection(Exception):
"""
Raise from within a transition method to switch to another state.
Raise with one or two arguments: new state name, and an optional new
transition name.
"""
def string2lines(astring, tab_width=8, convert_whitespace=0,
whitespace=re.compile('[\v\f]')):
"""
Return a list of one-line strings with tabs expanded, no newlines, and
trailing whitespace stripped.
Each tab is expanded with between 1 and `tab_width` spaces, so that the
next character's index becomes a multiple of `tab_width` (8 by default).
Parameters:
- `astring`: a multi-line string.
- `tab_width`: the number of columns between tab stops.
- `convert_whitespace`: convert form feeds and vertical tabs to spaces?
"""
if convert_whitespace:
astring = whitespace.sub(' ', astring)
return [s.expandtabs(tab_width).rstrip() for s in astring.splitlines()]
def _exception_data():
"""
Return exception information:
- the exception's class name;
- the exception object;
- the name of the file containing the offending code;
- the line number of the offending code;
- the function name of the offending code.
"""
type, value, traceback = sys.exc_info()
while traceback.tb_next:
traceback = traceback.tb_next
code = traceback.tb_frame.f_code
return (type.__name__, value, code.co_filename, traceback.tb_lineno,
code.co_name)
| {
"repo_name": "johngian/remo",
"path": "vendor-local/lib/python/docutils/statemachine.py",
"copies": "6",
"size": "57172",
"license": "bsd-3-clause",
"hash": -4942795082218577000,
"line_mean": 36.3917593198,
"line_max": 80,
"alpha_frac": 0.5861260757,
"autogenerated": false,
"ratio": 4.386374098511585,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0004914148524791128,
"num_lines": 1529
} |
# $Id: statemachine.py 7037 2011-05-19 08:56:27Z milde $
# Author: David Goodger <goodger@python.org>
# Copyright: This module has been placed in the public domain.
"""
A finite state machine specialized for regular-expression-based text filters,
this module defines the following classes:
- `StateMachine`, a state machine
- `State`, a state superclass
- `StateMachineWS`, a whitespace-sensitive version of `StateMachine`
- `StateWS`, a state superclass for use with `StateMachineWS`
- `SearchStateMachine`, uses `re.search()` instead of `re.match()`
- `SearchStateMachineWS`, uses `re.search()` instead of `re.match()`
- `ViewList`, extends standard Python lists.
- `StringList`, string-specific ViewList.
Exception classes:
- `StateMachineError`
- `UnknownStateError`
- `DuplicateStateError`
- `UnknownTransitionError`
- `DuplicateTransitionError`
- `TransitionPatternNotFound`
- `TransitionMethodNotFound`
- `UnexpectedIndentationError`
- `TransitionCorrection`: Raised to switch to another transition.
- `StateCorrection`: Raised to switch to another state & transition.
Functions:
- `string2lines()`: split a multi-line string into a list of one-line strings
How To Use This Module
======================
(See the individual classes, methods, and attributes for details.)
1. Import it: ``import statemachine`` or ``from statemachine import ...``.
You will also need to ``import re``.
2. Derive a subclass of `State` (or `StateWS`) for each state in your state
machine::
class MyState(statemachine.State):
Within the state's class definition:
a) Include a pattern for each transition, in `State.patterns`::
patterns = {'atransition': r'pattern', ...}
b) Include a list of initial transitions to be set up automatically, in
`State.initial_transitions`::
initial_transitions = ['atransition', ...]
c) Define a method for each transition, with the same name as the
transition pattern::
def atransition(self, match, context, next_state):
# do something
result = [...] # a list
return context, next_state, result
# context, next_state may be altered
Transition methods may raise an `EOFError` to cut processing short.
d) You may wish to override the `State.bof()` and/or `State.eof()` implicit
transition methods, which handle the beginning- and end-of-file.
e) In order to handle nested processing, you may wish to override the
attributes `State.nested_sm` and/or `State.nested_sm_kwargs`.
If you are using `StateWS` as a base class, in order to handle nested
indented blocks, you may wish to:
- override the attributes `StateWS.indent_sm`,
`StateWS.indent_sm_kwargs`, `StateWS.known_indent_sm`, and/or
`StateWS.known_indent_sm_kwargs`;
- override the `StateWS.blank()` method; and/or
- override or extend the `StateWS.indent()`, `StateWS.known_indent()`,
and/or `StateWS.firstknown_indent()` methods.
3. Create a state machine object::
sm = StateMachine(state_classes=[MyState, ...],
initial_state='MyState')
4. Obtain the input text, which needs to be converted into a tab-free list of
one-line strings. For example, to read text from a file called
'inputfile'::
input_string = open('inputfile').read()
input_lines = statemachine.string2lines(input_string)
5. Run the state machine on the input text and collect the results, a list::
results = sm.run(input_lines)
6. Remove any lingering circular references::
sm.unlink()
"""
__docformat__ = 'restructuredtext'
import sys
import re
import types
import unicodedata
from docutils.error_reporting import ErrorOutput
class StateMachine:
"""
A finite state machine for text filters using regular expressions.
The input is provided in the form of a list of one-line strings (no
newlines). States are subclasses of the `State` class. Transitions consist
of regular expression patterns and transition methods, and are defined in
each state.
The state machine is started with the `run()` method, which returns the
results of processing in a list.
"""
def __init__(self, state_classes, initial_state, debug=0):
"""
Initialize a `StateMachine` object; add state objects.
Parameters:
- `state_classes`: a list of `State` (sub)classes.
- `initial_state`: a string, the class name of the initial state.
- `debug`: a boolean; produce verbose output if true (nonzero).
"""
self.input_lines = None
"""`StringList` of input lines (without newlines).
Filled by `self.run()`."""
self.input_offset = 0
"""Offset of `self.input_lines` from the beginning of the file."""
self.line = None
"""Current input line."""
self.line_offset = -1
"""Current input line offset from beginning of `self.input_lines`."""
self.debug = debug
"""Debugging mode on/off."""
self.initial_state = initial_state
"""The name of the initial state (key to `self.states`)."""
self.current_state = initial_state
"""The name of the current state (key to `self.states`)."""
self.states = {}
"""Mapping of {state_name: State_object}."""
self.add_states(state_classes)
self.observers = []
"""List of bound methods or functions to call whenever the current
line changes. Observers are called with one argument, ``self``.
Cleared at the end of `run()`."""
self._stderr = ErrorOutput()
"""Wrapper around sys.stderr catching en-/decoding errors"""
def unlink(self):
"""Remove circular references to objects no longer required."""
for state in self.states.values():
state.unlink()
self.states = None
def run(self, input_lines, input_offset=0, context=None,
input_source=None, initial_state=None):
"""
Run the state machine on `input_lines`. Return results (a list).
Reset `self.line_offset` and `self.current_state`. Run the
beginning-of-file transition. Input one line at a time and check for a
matching transition. If a match is found, call the transition method
and possibly change the state. Store the context returned by the
transition method to be passed on to the next transition matched.
Accumulate the results returned by the transition methods in a list.
Run the end-of-file transition. Finally, return the accumulated
results.
Parameters:
- `input_lines`: a list of strings without newlines, or `StringList`.
- `input_offset`: the line offset of `input_lines` from the beginning
of the file.
- `context`: application-specific storage.
- `input_source`: name or path of source of `input_lines`.
- `initial_state`: name of initial state.
"""
self.runtime_init()
if isinstance(input_lines, StringList):
self.input_lines = input_lines
else:
self.input_lines = StringList(input_lines, source=input_source)
self.input_offset = input_offset
self.line_offset = -1
self.current_state = initial_state or self.initial_state
if self.debug:
print >>self._stderr, (
u'\nStateMachine.run: input_lines (line_offset=%s):\n| %s'
% (self.line_offset, u'\n| '.join(self.input_lines)))
transitions = None
results = []
state = self.get_state()
try:
if self.debug:
print >>self._stderr, '\nStateMachine.run: bof transition'
context, result = state.bof(context)
results.extend(result)
while 1:
try:
try:
self.next_line()
if self.debug:
source, offset = self.input_lines.info(
self.line_offset)
print >>self._stderr, (
u'\nStateMachine.run: line (source=%r, '
u'offset=%r):\n| %s'
% (source, offset, self.line))
context, next_state, result = self.check_line(
context, state, transitions)
except EOFError:
if self.debug:
print >>self._stderr, (
'\nStateMachine.run: %s.eof transition'
% state.__class__.__name__)
result = state.eof(context)
results.extend(result)
break
else:
results.extend(result)
except TransitionCorrection, exception:
self.previous_line() # back up for another try
transitions = (exception.args[0],)
if self.debug:
print >>self._stderr, (
'\nStateMachine.run: TransitionCorrection to '
'state "%s", transition %s.'
% (state.__class__.__name__, transitions[0]))
continue
except StateCorrection, exception:
self.previous_line() # back up for another try
next_state = exception.args[0]
if len(exception.args) == 1:
transitions = None
else:
transitions = (exception.args[1],)
if self.debug:
print >>self._stderr, (
'\nStateMachine.run: StateCorrection to state '
'"%s", transition %s.'
% (next_state, transitions[0]))
else:
transitions = None
state = self.get_state(next_state)
except:
if self.debug:
self.error()
raise
self.observers = []
return results
def get_state(self, next_state=None):
"""
Return current state object; set it first if `next_state` given.
Parameter `next_state`: a string, the name of the next state.
Exception: `UnknownStateError` raised if `next_state` unknown.
"""
if next_state:
if self.debug and next_state != self.current_state:
print >>self._stderr, (
'\nStateMachine.get_state: Changing state from '
'"%s" to "%s" (input line %s).'
% (self.current_state, next_state,
self.abs_line_number()))
self.current_state = next_state
try:
return self.states[self.current_state]
except KeyError:
raise UnknownStateError(self.current_state)
def next_line(self, n=1):
"""Load `self.line` with the `n`'th next line and return it."""
try:
try:
self.line_offset += n
self.line = self.input_lines[self.line_offset]
except IndexError:
self.line = None
raise EOFError
return self.line
finally:
self.notify_observers()
def is_next_line_blank(self):
"""Return 1 if the next line is blank or non-existant."""
try:
return not self.input_lines[self.line_offset + 1].strip()
except IndexError:
return 1
def at_eof(self):
"""Return 1 if the input is at or past end-of-file."""
return self.line_offset >= len(self.input_lines) - 1
def at_bof(self):
"""Return 1 if the input is at or before beginning-of-file."""
return self.line_offset <= 0
def previous_line(self, n=1):
"""Load `self.line` with the `n`'th previous line and return it."""
self.line_offset -= n
if self.line_offset < 0:
self.line = None
else:
self.line = self.input_lines[self.line_offset]
self.notify_observers()
return self.line
def goto_line(self, line_offset):
"""Jump to absolute line offset `line_offset`, load and return it."""
try:
try:
self.line_offset = line_offset - self.input_offset
self.line = self.input_lines[self.line_offset]
except IndexError:
self.line = None
raise EOFError
return self.line
finally:
self.notify_observers()
def get_source(self, line_offset):
"""Return source of line at absolute line offset `line_offset`."""
return self.input_lines.source(line_offset - self.input_offset)
def abs_line_offset(self):
"""Return line offset of current line, from beginning of file."""
return self.line_offset + self.input_offset
def abs_line_number(self):
"""Return line number of current line (counting from 1)."""
return self.line_offset + self.input_offset + 1
def get_source_and_line(self, lineno=None):
"""Return (source, line) tuple for current or given line number.
Looks up the source and line number in the `self.input_lines`
StringList instance to count for included source files.
If the optional argument `lineno` is given, convert it from an
absolute line number to the corresponding (source, line) pair.
"""
if lineno is None:
offset = self.line_offset
else:
offset = lineno - self.input_offset - 1
try:
src, srcoffset = self.input_lines.info(offset)
srcline = srcoffset + 1
except (TypeError):
# line is None if index is "Just past the end"
src, srcline = self.get_source_and_line(offset + self.input_offset)
return src, srcline + 1
except (IndexError): # `offset` is off the list
src, srcline = None, None
# raise AssertionError('cannot find line %d in %s lines' %
# (offset, len(self.input_lines)))
# # list(self.input_lines.lines())))
# assert offset == srcoffset, str(self.input_lines)
# print "get_source_and_line(%s):" % lineno,
# print offset + 1, '->', src, srcline
# print self.input_lines
return (src, srcline)
def insert_input(self, input_lines, source):
self.input_lines.insert(self.line_offset + 1, '',
source='internal padding after '+source,
offset=len(input_lines))
self.input_lines.insert(self.line_offset + 1, '',
source='internal padding before '+source,
offset=-1)
self.input_lines.insert(self.line_offset + 2,
StringList(input_lines, source))
def get_text_block(self, flush_left=0):
"""
Return a contiguous block of text.
If `flush_left` is true, raise `UnexpectedIndentationError` if an
indented line is encountered before the text block ends (with a blank
line).
"""
try:
block = self.input_lines.get_text_block(self.line_offset,
flush_left)
self.next_line(len(block) - 1)
return block
except UnexpectedIndentationError, error:
block, source, lineno = error.args
self.next_line(len(block) - 1) # advance to last line of block
raise
def check_line(self, context, state, transitions=None):
"""
Examine one line of input for a transition match & execute its method.
Parameters:
- `context`: application-dependent storage.
- `state`: a `State` object, the current state.
- `transitions`: an optional ordered list of transition names to try,
instead of ``state.transition_order``.
Return the values returned by the transition method:
- context: possibly modified from the parameter `context`;
- next state name (`State` subclass name);
- the result output of the transition, a list.
When there is no match, ``state.no_match()`` is called and its return
value is returned.
"""
if transitions is None:
transitions = state.transition_order
state_correction = None
if self.debug:
print >>self._stderr, (
'\nStateMachine.check_line: state="%s", transitions=%r.'
% (state.__class__.__name__, transitions))
for name in transitions:
pattern, method, next_state = state.transitions[name]
match = pattern.match(self.line)
if match:
if self.debug:
print >>self._stderr, (
'\nStateMachine.check_line: Matched transition '
'"%s" in state "%s".'
% (name, state.__class__.__name__))
return method(match, context, next_state)
else:
if self.debug:
print >>self._stderr, (
'\nStateMachine.check_line: No match in state "%s".'
% state.__class__.__name__)
return state.no_match(context, transitions)
def add_state(self, state_class):
"""
Initialize & add a `state_class` (`State` subclass) object.
Exception: `DuplicateStateError` raised if `state_class` was already
added.
"""
statename = state_class.__name__
if statename in self.states:
raise DuplicateStateError(statename)
self.states[statename] = state_class(self, self.debug)
def add_states(self, state_classes):
"""
Add `state_classes` (a list of `State` subclasses).
"""
for state_class in state_classes:
self.add_state(state_class)
def runtime_init(self):
"""
Initialize `self.states`.
"""
for state in self.states.values():
state.runtime_init()
def error(self):
"""Report error details."""
type, value, module, line, function = _exception_data()
print >>self._stderr, u'%s: %s' % (type, value)
print >>self._stderr, 'input line %s' % (self.abs_line_number())
print >>self._stderr, (u'module %s, line %s, function %s' %
(module, line, function))
def attach_observer(self, observer):
"""
The `observer` parameter is a function or bound method which takes two
arguments, the source and offset of the current line.
"""
self.observers.append(observer)
def detach_observer(self, observer):
self.observers.remove(observer)
def notify_observers(self):
for observer in self.observers:
try:
info = self.input_lines.info(self.line_offset)
except IndexError:
info = (None, None)
observer(*info)
class State:
"""
State superclass. Contains a list of transitions, and transition methods.
Transition methods all have the same signature. They take 3 parameters:
- An `re` match object. ``match.string`` contains the matched input line,
``match.start()`` gives the start index of the match, and
``match.end()`` gives the end index.
- A context object, whose meaning is application-defined (initial value
``None``). It can be used to store any information required by the state
machine, and the retured context is passed on to the next transition
method unchanged.
- The name of the next state, a string, taken from the transitions list;
normally it is returned unchanged, but it may be altered by the
transition method if necessary.
Transition methods all return a 3-tuple:
- A context object, as (potentially) modified by the transition method.
- The next state name (a return value of ``None`` means no state change).
- The processing result, a list, which is accumulated by the state
machine.
Transition methods may raise an `EOFError` to cut processing short.
There are two implicit transitions, and corresponding transition methods
are defined: `bof()` handles the beginning-of-file, and `eof()` handles
the end-of-file. These methods have non-standard signatures and return
values. `bof()` returns the initial context and results, and may be used
to return a header string, or do any other processing needed. `eof()`
should handle any remaining context and wrap things up; it returns the
final processing result.
Typical applications need only subclass `State` (or a subclass), set the
`patterns` and `initial_transitions` class attributes, and provide
corresponding transition methods. The default object initialization will
take care of constructing the list of transitions.
"""
patterns = None
"""
{Name: pattern} mapping, used by `make_transition()`. Each pattern may
be a string or a compiled `re` pattern. Override in subclasses.
"""
initial_transitions = None
"""
A list of transitions to initialize when a `State` is instantiated.
Each entry is either a transition name string, or a (transition name, next
state name) pair. See `make_transitions()`. Override in subclasses.
"""
nested_sm = None
"""
The `StateMachine` class for handling nested processing.
If left as ``None``, `nested_sm` defaults to the class of the state's
controlling state machine. Override it in subclasses to avoid the default.
"""
nested_sm_kwargs = None
"""
Keyword arguments dictionary, passed to the `nested_sm` constructor.
Two keys must have entries in the dictionary:
- Key 'state_classes' must be set to a list of `State` classes.
- Key 'initial_state' must be set to the name of the initial state class.
If `nested_sm_kwargs` is left as ``None``, 'state_classes' defaults to the
class of the current state, and 'initial_state' defaults to the name of
the class of the current state. Override in subclasses to avoid the
defaults.
"""
def __init__(self, state_machine, debug=0):
"""
Initialize a `State` object; make & add initial transitions.
Parameters:
- `statemachine`: the controlling `StateMachine` object.
- `debug`: a boolean; produce verbose output if true (nonzero).
"""
self.transition_order = []
"""A list of transition names in search order."""
self.transitions = {}
"""
A mapping of transition names to 3-tuples containing
(compiled_pattern, transition_method, next_state_name). Initialized as
an instance attribute dynamically (instead of as a class attribute)
because it may make forward references to patterns and methods in this
or other classes.
"""
self.add_initial_transitions()
self.state_machine = state_machine
"""A reference to the controlling `StateMachine` object."""
self.debug = debug
"""Debugging mode on/off."""
if self.nested_sm is None:
self.nested_sm = self.state_machine.__class__
if self.nested_sm_kwargs is None:
self.nested_sm_kwargs = {'state_classes': [self.__class__],
'initial_state': self.__class__.__name__}
def runtime_init(self):
"""
Initialize this `State` before running the state machine; called from
`self.state_machine.run()`.
"""
pass
def unlink(self):
"""Remove circular references to objects no longer required."""
self.state_machine = None
def add_initial_transitions(self):
"""Make and add transitions listed in `self.initial_transitions`."""
if self.initial_transitions:
names, transitions = self.make_transitions(
self.initial_transitions)
self.add_transitions(names, transitions)
def add_transitions(self, names, transitions):
"""
Add a list of transitions to the start of the transition list.
Parameters:
- `names`: a list of transition names.
- `transitions`: a mapping of names to transition tuples.
Exceptions: `DuplicateTransitionError`, `UnknownTransitionError`.
"""
for name in names:
if name in self.transitions:
raise DuplicateTransitionError(name)
if name not in transitions:
raise UnknownTransitionError(name)
self.transition_order[:0] = names
self.transitions.update(transitions)
def add_transition(self, name, transition):
"""
Add a transition to the start of the transition list.
Parameter `transition`: a ready-made transition 3-tuple.
Exception: `DuplicateTransitionError`.
"""
if name in self.transitions:
raise DuplicateTransitionError(name)
self.transition_order[:0] = [name]
self.transitions[name] = transition
def remove_transition(self, name):
"""
Remove a transition by `name`.
Exception: `UnknownTransitionError`.
"""
try:
del self.transitions[name]
self.transition_order.remove(name)
except:
raise UnknownTransitionError(name)
def make_transition(self, name, next_state=None):
"""
Make & return a transition tuple based on `name`.
This is a convenience function to simplify transition creation.
Parameters:
- `name`: a string, the name of the transition pattern & method. This
`State` object must have a method called '`name`', and a dictionary
`self.patterns` containing a key '`name`'.
- `next_state`: a string, the name of the next `State` object for this
transition. A value of ``None`` (or absent) implies no state change
(i.e., continue with the same state).
Exceptions: `TransitionPatternNotFound`, `TransitionMethodNotFound`.
"""
if next_state is None:
next_state = self.__class__.__name__
try:
pattern = self.patterns[name]
if not hasattr(pattern, 'match'):
pattern = re.compile(pattern)
except KeyError:
raise TransitionPatternNotFound(
'%s.patterns[%r]' % (self.__class__.__name__, name))
try:
method = getattr(self, name)
except AttributeError:
raise TransitionMethodNotFound(
'%s.%s' % (self.__class__.__name__, name))
return (pattern, method, next_state)
def make_transitions(self, name_list):
"""
Return a list of transition names and a transition mapping.
Parameter `name_list`: a list, where each entry is either a transition
name string, or a 1- or 2-tuple (transition name, optional next state
name).
"""
stringtype = type('')
names = []
transitions = {}
for namestate in name_list:
if type(namestate) is stringtype:
transitions[namestate] = self.make_transition(namestate)
names.append(namestate)
else:
transitions[namestate[0]] = self.make_transition(*namestate)
names.append(namestate[0])
return names, transitions
def no_match(self, context, transitions):
"""
Called when there is no match from `StateMachine.check_line()`.
Return the same values returned by transition methods:
- context: unchanged;
- next state name: ``None``;
- empty result list.
Override in subclasses to catch this event.
"""
return context, None, []
def bof(self, context):
"""
Handle beginning-of-file. Return unchanged `context`, empty result.
Override in subclasses.
Parameter `context`: application-defined storage.
"""
return context, []
def eof(self, context):
"""
Handle end-of-file. Return empty result.
Override in subclasses.
Parameter `context`: application-defined storage.
"""
return []
def nop(self, match, context, next_state):
"""
A "do nothing" transition method.
Return unchanged `context` & `next_state`, empty result. Useful for
simple state changes (actionless transitions).
"""
return context, next_state, []
class StateMachineWS(StateMachine):
"""
`StateMachine` subclass specialized for whitespace recognition.
There are three methods provided for extracting indented text blocks:
- `get_indented()`: use when the indent is unknown.
- `get_known_indented()`: use when the indent is known for all lines.
- `get_first_known_indented()`: use when only the first line's indent is
known.
"""
def get_indented(self, until_blank=0, strip_indent=1):
"""
Return a block of indented lines of text, and info.
Extract an indented block where the indent is unknown for all lines.
:Parameters:
- `until_blank`: Stop collecting at the first blank line if true
(1).
- `strip_indent`: Strip common leading indent if true (1,
default).
:Return:
- the indented block (a list of lines of text),
- its indent,
- its first line offset from BOF, and
- whether or not it finished with a blank line.
"""
offset = self.abs_line_offset()
indented, indent, blank_finish = self.input_lines.get_indented(
self.line_offset, until_blank, strip_indent)
if indented:
self.next_line(len(indented) - 1) # advance to last indented line
while indented and not indented[0].strip():
indented.trim_start()
offset += 1
return indented, indent, offset, blank_finish
def get_known_indented(self, indent, until_blank=0, strip_indent=1):
"""
Return an indented block and info.
Extract an indented block where the indent is known for all lines.
Starting with the current line, extract the entire text block with at
least `indent` indentation (which must be whitespace, except for the
first line).
:Parameters:
- `indent`: The number of indent columns/characters.
- `until_blank`: Stop collecting at the first blank line if true
(1).
- `strip_indent`: Strip `indent` characters of indentation if true
(1, default).
:Return:
- the indented block,
- its first line offset from BOF, and
- whether or not it finished with a blank line.
"""
offset = self.abs_line_offset()
indented, indent, blank_finish = self.input_lines.get_indented(
self.line_offset, until_blank, strip_indent,
block_indent=indent)
self.next_line(len(indented) - 1) # advance to last indented line
while indented and not indented[0].strip():
indented.trim_start()
offset += 1
return indented, offset, blank_finish
def get_first_known_indented(self, indent, until_blank=0, strip_indent=1,
strip_top=1):
"""
Return an indented block and info.
Extract an indented block where the indent is known for the first line
and unknown for all other lines.
:Parameters:
- `indent`: The first line's indent (# of columns/characters).
- `until_blank`: Stop collecting at the first blank line if true
(1).
- `strip_indent`: Strip `indent` characters of indentation if true
(1, default).
- `strip_top`: Strip blank lines from the beginning of the block.
:Return:
- the indented block,
- its indent,
- its first line offset from BOF, and
- whether or not it finished with a blank line.
"""
offset = self.abs_line_offset()
indented, indent, blank_finish = self.input_lines.get_indented(
self.line_offset, until_blank, strip_indent,
first_indent=indent)
self.next_line(len(indented) - 1) # advance to last indented line
if strip_top:
while indented and not indented[0].strip():
indented.trim_start()
offset += 1
return indented, indent, offset, blank_finish
class StateWS(State):
"""
State superclass specialized for whitespace (blank lines & indents).
Use this class with `StateMachineWS`. The transitions 'blank' (for blank
lines) and 'indent' (for indented text blocks) are added automatically,
before any other transitions. The transition method `blank()` handles
blank lines and `indent()` handles nested indented blocks. Indented
blocks trigger a new state machine to be created by `indent()` and run.
The class of the state machine to be created is in `indent_sm`, and the
constructor keyword arguments are in the dictionary `indent_sm_kwargs`.
The methods `known_indent()` and `firstknown_indent()` are provided for
indented blocks where the indent (all lines' and first line's only,
respectively) is known to the transition method, along with the attributes
`known_indent_sm` and `known_indent_sm_kwargs`. Neither transition method
is triggered automatically.
"""
indent_sm = None
"""
The `StateMachine` class handling indented text blocks.
If left as ``None``, `indent_sm` defaults to the value of
`State.nested_sm`. Override it in subclasses to avoid the default.
"""
indent_sm_kwargs = None
"""
Keyword arguments dictionary, passed to the `indent_sm` constructor.
If left as ``None``, `indent_sm_kwargs` defaults to the value of
`State.nested_sm_kwargs`. Override it in subclasses to avoid the default.
"""
known_indent_sm = None
"""
The `StateMachine` class handling known-indented text blocks.
If left as ``None``, `known_indent_sm` defaults to the value of
`indent_sm`. Override it in subclasses to avoid the default.
"""
known_indent_sm_kwargs = None
"""
Keyword arguments dictionary, passed to the `known_indent_sm` constructor.
If left as ``None``, `known_indent_sm_kwargs` defaults to the value of
`indent_sm_kwargs`. Override it in subclasses to avoid the default.
"""
ws_patterns = {'blank': ' *$',
'indent': ' +'}
"""Patterns for default whitespace transitions. May be overridden in
subclasses."""
ws_initial_transitions = ('blank', 'indent')
"""Default initial whitespace transitions, added before those listed in
`State.initial_transitions`. May be overridden in subclasses."""
def __init__(self, state_machine, debug=0):
"""
Initialize a `StateSM` object; extends `State.__init__()`.
Check for indent state machine attributes, set defaults if not set.
"""
State.__init__(self, state_machine, debug)
if self.indent_sm is None:
self.indent_sm = self.nested_sm
if self.indent_sm_kwargs is None:
self.indent_sm_kwargs = self.nested_sm_kwargs
if self.known_indent_sm is None:
self.known_indent_sm = self.indent_sm
if self.known_indent_sm_kwargs is None:
self.known_indent_sm_kwargs = self.indent_sm_kwargs
def add_initial_transitions(self):
"""
Add whitespace-specific transitions before those defined in subclass.
Extends `State.add_initial_transitions()`.
"""
State.add_initial_transitions(self)
if self.patterns is None:
self.patterns = {}
self.patterns.update(self.ws_patterns)
names, transitions = self.make_transitions(
self.ws_initial_transitions)
self.add_transitions(names, transitions)
def blank(self, match, context, next_state):
"""Handle blank lines. Does nothing. Override in subclasses."""
return self.nop(match, context, next_state)
def indent(self, match, context, next_state):
"""
Handle an indented text block. Extend or override in subclasses.
Recursively run the registered state machine for indented blocks
(`self.indent_sm`).
"""
indented, indent, line_offset, blank_finish = \
self.state_machine.get_indented()
sm = self.indent_sm(debug=self.debug, **self.indent_sm_kwargs)
results = sm.run(indented, input_offset=line_offset)
return context, next_state, results
def known_indent(self, match, context, next_state):
"""
Handle a known-indent text block. Extend or override in subclasses.
Recursively run the registered state machine for known-indent indented
blocks (`self.known_indent_sm`). The indent is the length of the
match, ``match.end()``.
"""
indented, line_offset, blank_finish = \
self.state_machine.get_known_indented(match.end())
sm = self.known_indent_sm(debug=self.debug,
**self.known_indent_sm_kwargs)
results = sm.run(indented, input_offset=line_offset)
return context, next_state, results
def first_known_indent(self, match, context, next_state):
"""
Handle an indented text block (first line's indent known).
Extend or override in subclasses.
Recursively run the registered state machine for known-indent indented
blocks (`self.known_indent_sm`). The indent is the length of the
match, ``match.end()``.
"""
indented, line_offset, blank_finish = \
self.state_machine.get_first_known_indented(match.end())
sm = self.known_indent_sm(debug=self.debug,
**self.known_indent_sm_kwargs)
results = sm.run(indented, input_offset=line_offset)
return context, next_state, results
class _SearchOverride:
"""
Mix-in class to override `StateMachine` regular expression behavior.
Changes regular expression matching, from the default `re.match()`
(succeeds only if the pattern matches at the start of `self.line`) to
`re.search()` (succeeds if the pattern matches anywhere in `self.line`).
When subclassing a `StateMachine`, list this class **first** in the
inheritance list of the class definition.
"""
def match(self, pattern):
"""
Return the result of a regular expression search.
Overrides `StateMachine.match()`.
Parameter `pattern`: `re` compiled regular expression.
"""
return pattern.search(self.line)
class SearchStateMachine(_SearchOverride, StateMachine):
"""`StateMachine` which uses `re.search()` instead of `re.match()`."""
pass
class SearchStateMachineWS(_SearchOverride, StateMachineWS):
"""`StateMachineWS` which uses `re.search()` instead of `re.match()`."""
pass
class ViewList:
"""
List with extended functionality: slices of ViewList objects are child
lists, linked to their parents. Changes made to a child list also affect
the parent list. A child list is effectively a "view" (in the SQL sense)
of the parent list. Changes to parent lists, however, do *not* affect
active child lists. If a parent list is changed, any active child lists
should be recreated.
The start and end of the slice can be trimmed using the `trim_start()` and
`trim_end()` methods, without affecting the parent list. The link between
child and parent lists can be broken by calling `disconnect()` on the
child list.
Also, ViewList objects keep track of the source & offset of each item.
This information is accessible via the `source()`, `offset()`, and
`info()` methods.
"""
def __init__(self, initlist=None, source=None, items=None,
parent=None, parent_offset=None):
self.data = []
"""The actual list of data, flattened from various sources."""
self.items = []
"""A list of (source, offset) pairs, same length as `self.data`: the
source of each line and the offset of each line from the beginning of
its source."""
self.parent = parent
"""The parent list."""
self.parent_offset = parent_offset
"""Offset of this list from the beginning of the parent list."""
if isinstance(initlist, ViewList):
self.data = initlist.data[:]
self.items = initlist.items[:]
elif initlist is not None:
self.data = list(initlist)
if items:
self.items = items
else:
self.items = [(source, i) for i in range(len(initlist))]
assert len(self.data) == len(self.items), 'data mismatch'
def __str__(self):
return str(self.data)
def __repr__(self):
return '%s(%s, items=%s)' % (self.__class__.__name__,
self.data, self.items)
def __lt__(self, other): return self.data < self.__cast(other)
def __le__(self, other): return self.data <= self.__cast(other)
def __eq__(self, other): return self.data == self.__cast(other)
def __ne__(self, other): return self.data != self.__cast(other)
def __gt__(self, other): return self.data > self.__cast(other)
def __ge__(self, other): return self.data >= self.__cast(other)
def __cmp__(self, other): return cmp(self.data, self.__cast(other))
def __cast(self, other):
if isinstance(other, ViewList):
return other.data
else:
return other
def __contains__(self, item): return item in self.data
def __len__(self): return len(self.data)
# The __getitem__()/__setitem__() methods check whether the index
# is a slice first, since indexing a native list with a slice object
# just works.
def __getitem__(self, i):
if isinstance(i, types.SliceType):
assert i.step in (None, 1), 'cannot handle slice with stride'
return self.__class__(self.data[i.start:i.stop],
items=self.items[i.start:i.stop],
parent=self, parent_offset=i.start or 0)
else:
return self.data[i]
def __setitem__(self, i, item):
if isinstance(i, types.SliceType):
assert i.step in (None, 1), 'cannot handle slice with stride'
if not isinstance(item, ViewList):
raise TypeError('assigning non-ViewList to ViewList slice')
self.data[i.start:i.stop] = item.data
self.items[i.start:i.stop] = item.items
assert len(self.data) == len(self.items), 'data mismatch'
if self.parent:
self.parent[(i.start or 0) + self.parent_offset
: (i.stop or len(self)) + self.parent_offset] = item
else:
self.data[i] = item
if self.parent:
self.parent[i + self.parent_offset] = item
def __delitem__(self, i):
try:
del self.data[i]
del self.items[i]
if self.parent:
del self.parent[i + self.parent_offset]
except TypeError:
assert i.step is None, 'cannot handle slice with stride'
del self.data[i.start:i.stop]
del self.items[i.start:i.stop]
if self.parent:
del self.parent[(i.start or 0) + self.parent_offset
: (i.stop or len(self)) + self.parent_offset]
def __add__(self, other):
if isinstance(other, ViewList):
return self.__class__(self.data + other.data,
items=(self.items + other.items))
else:
raise TypeError('adding non-ViewList to a ViewList')
def __radd__(self, other):
if isinstance(other, ViewList):
return self.__class__(other.data + self.data,
items=(other.items + self.items))
else:
raise TypeError('adding ViewList to a non-ViewList')
def __iadd__(self, other):
if isinstance(other, ViewList):
self.data += other.data
else:
raise TypeError('argument to += must be a ViewList')
return self
def __mul__(self, n):
return self.__class__(self.data * n, items=(self.items * n))
__rmul__ = __mul__
def __imul__(self, n):
self.data *= n
self.items *= n
return self
def extend(self, other):
if not isinstance(other, ViewList):
raise TypeError('extending a ViewList with a non-ViewList')
if self.parent:
self.parent.insert(len(self.data) + self.parent_offset, other)
self.data.extend(other.data)
self.items.extend(other.items)
def append(self, item, source=None, offset=0):
if source is None:
self.extend(item)
else:
if self.parent:
self.parent.insert(len(self.data) + self.parent_offset, item,
source, offset)
self.data.append(item)
self.items.append((source, offset))
def insert(self, i, item, source=None, offset=0):
if source is None:
if not isinstance(item, ViewList):
raise TypeError('inserting non-ViewList with no source given')
self.data[i:i] = item.data
self.items[i:i] = item.items
if self.parent:
index = (len(self.data) + i) % len(self.data)
self.parent.insert(index + self.parent_offset, item)
else:
self.data.insert(i, item)
self.items.insert(i, (source, offset))
if self.parent:
index = (len(self.data) + i) % len(self.data)
self.parent.insert(index + self.parent_offset, item,
source, offset)
def pop(self, i=-1):
if self.parent:
index = (len(self.data) + i) % len(self.data)
self.parent.pop(index + self.parent_offset)
self.items.pop(i)
return self.data.pop(i)
def trim_start(self, n=1):
"""
Remove items from the start of the list, without touching the parent.
"""
if n > len(self.data):
raise IndexError("Size of trim too large; can't trim %s items "
"from a list of size %s." % (n, len(self.data)))
elif n < 0:
raise IndexError('Trim size must be >= 0.')
del self.data[:n]
del self.items[:n]
if self.parent:
self.parent_offset += n
def trim_end(self, n=1):
"""
Remove items from the end of the list, without touching the parent.
"""
if n > len(self.data):
raise IndexError("Size of trim too large; can't trim %s items "
"from a list of size %s." % (n, len(self.data)))
elif n < 0:
raise IndexError('Trim size must be >= 0.')
del self.data[-n:]
del self.items[-n:]
def remove(self, item):
index = self.index(item)
del self[index]
def count(self, item): return self.data.count(item)
def index(self, item): return self.data.index(item)
def reverse(self):
self.data.reverse()
self.items.reverse()
self.parent = None
def sort(self, *args):
tmp = zip(self.data, self.items)
tmp.sort(*args)
self.data = [entry[0] for entry in tmp]
self.items = [entry[1] for entry in tmp]
self.parent = None
def info(self, i):
"""Return source & offset for index `i`."""
try:
return self.items[i]
except IndexError:
if i == len(self.data): # Just past the end
return self.items[i - 1][0], None
else:
raise
def source(self, i):
"""Return source for index `i`."""
return self.info(i)[0]
def offset(self, i):
"""Return offset for index `i`."""
return self.info(i)[1]
def disconnect(self):
"""Break link between this list and parent list."""
self.parent = None
def xitems(self):
"""Return iterator yielding (source, offset, value) tuples."""
for (value, (source, offset)) in zip(self.data, self.items):
yield (source, offset, value)
def pprint(self):
"""Print the list in `grep` format (`source:offset:value` lines)"""
for line in self.xitems():
print "%s:%d:%s" % line
class StringList(ViewList):
"""A `ViewList` with string-specific methods."""
def trim_left(self, length, start=0, end=sys.maxint):
"""
Trim `length` characters off the beginning of each item, in-place,
from index `start` to `end`. No whitespace-checking is done on the
trimmed text. Does not affect slice parent.
"""
self.data[start:end] = [line[length:]
for line in self.data[start:end]]
def get_text_block(self, start, flush_left=0):
"""
Return a contiguous block of text.
If `flush_left` is true, raise `UnexpectedIndentationError` if an
indented line is encountered before the text block ends (with a blank
line).
"""
end = start
last = len(self.data)
while end < last:
line = self.data[end]
if not line.strip():
break
if flush_left and (line[0] == ' '):
source, offset = self.info(end)
raise UnexpectedIndentationError(self[start:end], source,
offset + 1)
end += 1
return self[start:end]
def get_indented(self, start=0, until_blank=0, strip_indent=1,
block_indent=None, first_indent=None):
"""
Extract and return a StringList of indented lines of text.
Collect all lines with indentation, determine the minimum indentation,
remove the minimum indentation from all indented lines (unless
`strip_indent` is false), and return them. All lines up to but not
including the first unindented line will be returned.
:Parameters:
- `start`: The index of the first line to examine.
- `until_blank`: Stop collecting at the first blank line if true.
- `strip_indent`: Strip common leading indent if true (default).
- `block_indent`: The indent of the entire block, if known.
- `first_indent`: The indent of the first line, if known.
:Return:
- a StringList of indented lines with mininum indent removed;
- the amount of the indent;
- a boolean: did the indented block finish with a blank line or EOF?
"""
indent = block_indent # start with None if unknown
end = start
if block_indent is not None and first_indent is None:
first_indent = block_indent
if first_indent is not None:
end += 1
last = len(self.data)
while end < last:
line = self.data[end]
if line and (line[0] != ' '
or (block_indent is not None
and line[:block_indent].strip())):
# Line not indented or insufficiently indented.
# Block finished properly iff the last indented line blank:
blank_finish = ((end > start)
and not self.data[end - 1].strip())
break
stripped = line.lstrip()
if not stripped: # blank line
if until_blank:
blank_finish = 1
break
elif block_indent is None:
line_indent = len(line) - len(stripped)
if indent is None:
indent = line_indent
else:
indent = min(indent, line_indent)
end += 1
else:
blank_finish = 1 # block ends at end of lines
block = self[start:end]
if first_indent is not None and block:
block.data[0] = block.data[0][first_indent:]
if indent and strip_indent:
block.trim_left(indent, start=(first_indent is not None))
return block, indent or 0, blank_finish
def get_2D_block(self, top, left, bottom, right, strip_indent=1):
block = self[top:bottom]
indent = right
for i in range(len(block.data)):
block.data[i] = line = block.data[i][left:right].rstrip()
if line:
indent = min(indent, len(line) - len(line.lstrip()))
if strip_indent and 0 < indent < right:
block.data = [line[indent:] for line in block.data]
return block
def pad_double_width(self, pad_char):
"""
Pad all double-width characters in self by appending `pad_char` to each.
For East Asian language support.
"""
if hasattr(unicodedata, 'east_asian_width'):
east_asian_width = unicodedata.east_asian_width
else:
return # new in Python 2.4
for i in range(len(self.data)):
line = self.data[i]
if isinstance(line, unicode):
new = []
for char in line:
new.append(char)
if east_asian_width(char) in 'WF': # 'W'ide & 'F'ull-width
new.append(pad_char)
self.data[i] = ''.join(new)
def replace(self, old, new):
"""Replace all occurrences of substring `old` with `new`."""
for i in range(len(self.data)):
self.data[i] = self.data[i].replace(old, new)
class StateMachineError(Exception): pass
class UnknownStateError(StateMachineError): pass
class DuplicateStateError(StateMachineError): pass
class UnknownTransitionError(StateMachineError): pass
class DuplicateTransitionError(StateMachineError): pass
class TransitionPatternNotFound(StateMachineError): pass
class TransitionMethodNotFound(StateMachineError): pass
class UnexpectedIndentationError(StateMachineError): pass
class TransitionCorrection(Exception):
"""
Raise from within a transition method to switch to another transition.
Raise with one argument, the new transition name.
"""
class StateCorrection(Exception):
"""
Raise from within a transition method to switch to another state.
Raise with one or two arguments: new state name, and an optional new
transition name.
"""
def string2lines(astring, tab_width=8, convert_whitespace=0,
whitespace=re.compile('[\v\f]')):
"""
Return a list of one-line strings with tabs expanded, no newlines, and
trailing whitespace stripped.
Each tab is expanded with between 1 and `tab_width` spaces, so that the
next character's index becomes a multiple of `tab_width` (8 by default).
Parameters:
- `astring`: a multi-line string.
- `tab_width`: the number of columns between tab stops.
- `convert_whitespace`: convert form feeds and vertical tabs to spaces?
"""
if convert_whitespace:
astring = whitespace.sub(' ', astring)
return [s.expandtabs(tab_width).rstrip() for s in astring.splitlines()]
def _exception_data():
"""
Return exception information:
- the exception's class name;
- the exception object;
- the name of the file containing the offending code;
- the line number of the offending code;
- the function name of the offending code.
"""
type, value, traceback = sys.exc_info()
while traceback.tb_next:
traceback = traceback.tb_next
code = traceback.tb_frame.f_code
return (type.__name__, value, code.co_filename, traceback.tb_lineno,
code.co_name)
| {
"repo_name": "cuongthai/cuongthai-s-blog",
"path": "docutils/statemachine.py",
"copies": "2",
"size": "58701",
"license": "bsd-3-clause",
"hash": -5418455314101009000,
"line_mean": 36.3917593198,
"line_max": 80,
"alpha_frac": 0.5708590995,
"autogenerated": false,
"ratio": 4.4751848745902265,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6046043974090226,
"avg_score": null,
"num_lines": null
} |
# $Id: statemachine.py 7320 2012-01-19 22:33:02Z milde $
# Author: David Goodger <goodger@python.org>
# Copyright: This module has been placed in the public domain.
"""
A finite state machine specialized for regular-expression-based text filters,
this module defines the following classes:
- `StateMachine`, a state machine
- `State`, a state superclass
- `StateMachineWS`, a whitespace-sensitive version of `StateMachine`
- `StateWS`, a state superclass for use with `StateMachineWS`
- `SearchStateMachine`, uses `re.search()` instead of `re.match()`
- `SearchStateMachineWS`, uses `re.search()` instead of `re.match()`
- `ViewList`, extends standard Python lists.
- `StringList`, string-specific ViewList.
Exception classes:
- `StateMachineError`
- `UnknownStateError`
- `DuplicateStateError`
- `UnknownTransitionError`
- `DuplicateTransitionError`
- `TransitionPatternNotFound`
- `TransitionMethodNotFound`
- `UnexpectedIndentationError`
- `TransitionCorrection`: Raised to switch to another transition.
- `StateCorrection`: Raised to switch to another state & transition.
Functions:
- `string2lines()`: split a multi-line string into a list of one-line strings
How To Use This Module
======================
(See the individual classes, methods, and attributes for details.)
1. Import it: ``import statemachine`` or ``from statemachine import ...``.
You will also need to ``import re``.
2. Derive a subclass of `State` (or `StateWS`) for each state in your state
machine::
class MyState(statemachine.State):
Within the state's class definition:
a) Include a pattern for each transition, in `State.patterns`::
patterns = {'atransition': r'pattern', ...}
b) Include a list of initial transitions to be set up automatically, in
`State.initial_transitions`::
initial_transitions = ['atransition', ...]
c) Define a method for each transition, with the same name as the
transition pattern::
def atransition(self, match, context, next_state):
# do something
result = [...] # a list
return context, next_state, result
# context, next_state may be altered
Transition methods may raise an `EOFError` to cut processing short.
d) You may wish to override the `State.bof()` and/or `State.eof()` implicit
transition methods, which handle the beginning- and end-of-file.
e) In order to handle nested processing, you may wish to override the
attributes `State.nested_sm` and/or `State.nested_sm_kwargs`.
If you are using `StateWS` as a base class, in order to handle nested
indented blocks, you may wish to:
- override the attributes `StateWS.indent_sm`,
`StateWS.indent_sm_kwargs`, `StateWS.known_indent_sm`, and/or
`StateWS.known_indent_sm_kwargs`;
- override the `StateWS.blank()` method; and/or
- override or extend the `StateWS.indent()`, `StateWS.known_indent()`,
and/or `StateWS.firstknown_indent()` methods.
3. Create a state machine object::
sm = StateMachine(state_classes=[MyState, ...],
initial_state='MyState')
4. Obtain the input text, which needs to be converted into a tab-free list of
one-line strings. For example, to read text from a file called
'inputfile'::
input_string = open('inputfile').read()
input_lines = statemachine.string2lines(input_string)
5. Run the state machine on the input text and collect the results, a list::
results = sm.run(input_lines)
6. Remove any lingering circular references::
sm.unlink()
"""
__docformat__ = 'restructuredtext'
import sys
import re
import types
import unicodedata
from docutils import utils
from docutils.error_reporting import ErrorOutput
class StateMachine:
"""
A finite state machine for text filters using regular expressions.
The input is provided in the form of a list of one-line strings (no
newlines). States are subclasses of the `State` class. Transitions consist
of regular expression patterns and transition methods, and are defined in
each state.
The state machine is started with the `run()` method, which returns the
results of processing in a list.
"""
def __init__(self, state_classes, initial_state, debug=False):
"""
Initialize a `StateMachine` object; add state objects.
Parameters:
- `state_classes`: a list of `State` (sub)classes.
- `initial_state`: a string, the class name of the initial state.
- `debug`: a boolean; produce verbose output if true (nonzero).
"""
self.input_lines = None
"""`StringList` of input lines (without newlines).
Filled by `self.run()`."""
self.input_offset = 0
"""Offset of `self.input_lines` from the beginning of the file."""
self.line = None
"""Current input line."""
self.line_offset = -1
"""Current input line offset from beginning of `self.input_lines`."""
self.debug = debug
"""Debugging mode on/off."""
self.initial_state = initial_state
"""The name of the initial state (key to `self.states`)."""
self.current_state = initial_state
"""The name of the current state (key to `self.states`)."""
self.states = {}
"""Mapping of {state_name: State_object}."""
self.add_states(state_classes)
self.observers = []
"""List of bound methods or functions to call whenever the current
line changes. Observers are called with one argument, ``self``.
Cleared at the end of `run()`."""
self._stderr = ErrorOutput()
"""Wrapper around sys.stderr catching en-/decoding errors"""
def unlink(self):
"""Remove circular references to objects no longer required."""
for state in self.states.values():
state.unlink()
self.states = None
def run(self, input_lines, input_offset=0, context=None,
input_source=None, initial_state=None):
"""
Run the state machine on `input_lines`. Return results (a list).
Reset `self.line_offset` and `self.current_state`. Run the
beginning-of-file transition. Input one line at a time and check for a
matching transition. If a match is found, call the transition method
and possibly change the state. Store the context returned by the
transition method to be passed on to the next transition matched.
Accumulate the results returned by the transition methods in a list.
Run the end-of-file transition. Finally, return the accumulated
results.
Parameters:
- `input_lines`: a list of strings without newlines, or `StringList`.
- `input_offset`: the line offset of `input_lines` from the beginning
of the file.
- `context`: application-specific storage.
- `input_source`: name or path of source of `input_lines`.
- `initial_state`: name of initial state.
"""
self.runtime_init()
if isinstance(input_lines, StringList):
self.input_lines = input_lines
else:
self.input_lines = StringList(input_lines, source=input_source)
self.input_offset = input_offset
self.line_offset = -1
self.current_state = initial_state or self.initial_state
if self.debug:
print >>self._stderr, (
u'\nStateMachine.run: input_lines (line_offset=%s):\n| %s'
% (self.line_offset, u'\n| '.join(self.input_lines)))
transitions = None
results = []
state = self.get_state()
try:
if self.debug:
print >>self._stderr, '\nStateMachine.run: bof transition'
context, result = state.bof(context)
results.extend(result)
while True:
try:
try:
self.next_line()
if self.debug:
source, offset = self.input_lines.info(
self.line_offset)
print >>self._stderr, (
u'\nStateMachine.run: line (source=%r, '
u'offset=%r):\n| %s'
% (source, offset, self.line))
context, next_state, result = self.check_line(
context, state, transitions)
except EOFError:
if self.debug:
print >>self._stderr, (
'\nStateMachine.run: %s.eof transition'
% state.__class__.__name__)
result = state.eof(context)
results.extend(result)
break
else:
results.extend(result)
except TransitionCorrection, exception:
self.previous_line() # back up for another try
transitions = (exception.args[0],)
if self.debug:
print >>self._stderr, (
'\nStateMachine.run: TransitionCorrection to '
'state "%s", transition %s.'
% (state.__class__.__name__, transitions[0]))
continue
except StateCorrection, exception:
self.previous_line() # back up for another try
next_state = exception.args[0]
if len(exception.args) == 1:
transitions = None
else:
transitions = (exception.args[1],)
if self.debug:
print >>self._stderr, (
'\nStateMachine.run: StateCorrection to state '
'"%s", transition %s.'
% (next_state, transitions[0]))
else:
transitions = None
state = self.get_state(next_state)
except:
if self.debug:
self.error()
raise
self.observers = []
return results
def get_state(self, next_state=None):
"""
Return current state object; set it first if `next_state` given.
Parameter `next_state`: a string, the name of the next state.
Exception: `UnknownStateError` raised if `next_state` unknown.
"""
if next_state:
if self.debug and next_state != self.current_state:
print >>self._stderr, (
'\nStateMachine.get_state: Changing state from '
'"%s" to "%s" (input line %s).'
% (self.current_state, next_state,
self.abs_line_number()))
self.current_state = next_state
try:
return self.states[self.current_state]
except KeyError:
raise UnknownStateError(self.current_state)
def next_line(self, n=1):
"""Load `self.line` with the `n`'th next line and return it."""
try:
try:
self.line_offset += n
self.line = self.input_lines[self.line_offset]
except IndexError:
self.line = None
raise EOFError
return self.line
finally:
self.notify_observers()
def is_next_line_blank(self):
"""Return 1 if the next line is blank or non-existant."""
try:
return not self.input_lines[self.line_offset + 1].strip()
except IndexError:
return 1
def at_eof(self):
"""Return 1 if the input is at or past end-of-file."""
return self.line_offset >= len(self.input_lines) - 1
def at_bof(self):
"""Return 1 if the input is at or before beginning-of-file."""
return self.line_offset <= 0
def previous_line(self, n=1):
"""Load `self.line` with the `n`'th previous line and return it."""
self.line_offset -= n
if self.line_offset < 0:
self.line = None
else:
self.line = self.input_lines[self.line_offset]
self.notify_observers()
return self.line
def goto_line(self, line_offset):
"""Jump to absolute line offset `line_offset`, load and return it."""
try:
try:
self.line_offset = line_offset - self.input_offset
self.line = self.input_lines[self.line_offset]
except IndexError:
self.line = None
raise EOFError
return self.line
finally:
self.notify_observers()
def get_source(self, line_offset):
"""Return source of line at absolute line offset `line_offset`."""
return self.input_lines.source(line_offset - self.input_offset)
def abs_line_offset(self):
"""Return line offset of current line, from beginning of file."""
return self.line_offset + self.input_offset
def abs_line_number(self):
"""Return line number of current line (counting from 1)."""
return self.line_offset + self.input_offset + 1
def get_source_and_line(self, lineno=None):
"""Return (source, line) tuple for current or given line number.
Looks up the source and line number in the `self.input_lines`
StringList instance to count for included source files.
If the optional argument `lineno` is given, convert it from an
absolute line number to the corresponding (source, line) pair.
"""
if lineno is None:
offset = self.line_offset
else:
offset = lineno - self.input_offset - 1
try:
src, srcoffset = self.input_lines.info(offset)
srcline = srcoffset + 1
except (TypeError):
# line is None if index is "Just past the end"
src, srcline = self.get_source_and_line(offset + self.input_offset)
return src, srcline + 1
except (IndexError): # `offset` is off the list
src, srcline = None, None
# raise AssertionError('cannot find line %d in %s lines' %
# (offset, len(self.input_lines)))
# # list(self.input_lines.lines())))
# assert offset == srcoffset, str(self.input_lines)
# print "get_source_and_line(%s):" % lineno,
# print offset + 1, '->', src, srcline
# print self.input_lines
return (src, srcline)
def insert_input(self, input_lines, source):
self.input_lines.insert(self.line_offset + 1, '',
source='internal padding after '+source,
offset=len(input_lines))
self.input_lines.insert(self.line_offset + 1, '',
source='internal padding before '+source,
offset=-1)
self.input_lines.insert(self.line_offset + 2,
StringList(input_lines, source))
def get_text_block(self, flush_left=False):
"""
Return a contiguous block of text.
If `flush_left` is true, raise `UnexpectedIndentationError` if an
indented line is encountered before the text block ends (with a blank
line).
"""
try:
block = self.input_lines.get_text_block(self.line_offset,
flush_left)
self.next_line(len(block) - 1)
return block
except UnexpectedIndentationError, err:
block = err.args[0]
self.next_line(len(block) - 1) # advance to last line of block
raise
def check_line(self, context, state, transitions=None):
"""
Examine one line of input for a transition match & execute its method.
Parameters:
- `context`: application-dependent storage.
- `state`: a `State` object, the current state.
- `transitions`: an optional ordered list of transition names to try,
instead of ``state.transition_order``.
Return the values returned by the transition method:
- context: possibly modified from the parameter `context`;
- next state name (`State` subclass name);
- the result output of the transition, a list.
When there is no match, ``state.no_match()`` is called and its return
value is returned.
"""
if transitions is None:
transitions = state.transition_order
state_correction = None
if self.debug:
print >>self._stderr, (
'\nStateMachine.check_line: state="%s", transitions=%r.'
% (state.__class__.__name__, transitions))
for name in transitions:
pattern, method, next_state = state.transitions[name]
match = pattern.match(self.line)
if match:
if self.debug:
print >>self._stderr, (
'\nStateMachine.check_line: Matched transition '
'"%s" in state "%s".'
% (name, state.__class__.__name__))
return method(match, context, next_state)
else:
if self.debug:
print >>self._stderr, (
'\nStateMachine.check_line: No match in state "%s".'
% state.__class__.__name__)
return state.no_match(context, transitions)
def add_state(self, state_class):
"""
Initialize & add a `state_class` (`State` subclass) object.
Exception: `DuplicateStateError` raised if `state_class` was already
added.
"""
statename = state_class.__name__
if statename in self.states:
raise DuplicateStateError(statename)
self.states[statename] = state_class(self, self.debug)
def add_states(self, state_classes):
"""
Add `state_classes` (a list of `State` subclasses).
"""
for state_class in state_classes:
self.add_state(state_class)
def runtime_init(self):
"""
Initialize `self.states`.
"""
for state in self.states.values():
state.runtime_init()
def error(self):
"""Report error details."""
type, value, module, line, function = _exception_data()
print >>self._stderr, u'%s: %s' % (type, value)
print >>self._stderr, 'input line %s' % (self.abs_line_number())
print >>self._stderr, (u'module %s, line %s, function %s' %
(module, line, function))
def attach_observer(self, observer):
"""
The `observer` parameter is a function or bound method which takes two
arguments, the source and offset of the current line.
"""
self.observers.append(observer)
def detach_observer(self, observer):
self.observers.remove(observer)
def notify_observers(self):
for observer in self.observers:
try:
info = self.input_lines.info(self.line_offset)
except IndexError:
info = (None, None)
observer(*info)
class State:
"""
State superclass. Contains a list of transitions, and transition methods.
Transition methods all have the same signature. They take 3 parameters:
- An `re` match object. ``match.string`` contains the matched input line,
``match.start()`` gives the start index of the match, and
``match.end()`` gives the end index.
- A context object, whose meaning is application-defined (initial value
``None``). It can be used to store any information required by the state
machine, and the retured context is passed on to the next transition
method unchanged.
- The name of the next state, a string, taken from the transitions list;
normally it is returned unchanged, but it may be altered by the
transition method if necessary.
Transition methods all return a 3-tuple:
- A context object, as (potentially) modified by the transition method.
- The next state name (a return value of ``None`` means no state change).
- The processing result, a list, which is accumulated by the state
machine.
Transition methods may raise an `EOFError` to cut processing short.
There are two implicit transitions, and corresponding transition methods
are defined: `bof()` handles the beginning-of-file, and `eof()` handles
the end-of-file. These methods have non-standard signatures and return
values. `bof()` returns the initial context and results, and may be used
to return a header string, or do any other processing needed. `eof()`
should handle any remaining context and wrap things up; it returns the
final processing result.
Typical applications need only subclass `State` (or a subclass), set the
`patterns` and `initial_transitions` class attributes, and provide
corresponding transition methods. The default object initialization will
take care of constructing the list of transitions.
"""
patterns = None
"""
{Name: pattern} mapping, used by `make_transition()`. Each pattern may
be a string or a compiled `re` pattern. Override in subclasses.
"""
initial_transitions = None
"""
A list of transitions to initialize when a `State` is instantiated.
Each entry is either a transition name string, or a (transition name, next
state name) pair. See `make_transitions()`. Override in subclasses.
"""
nested_sm = None
"""
The `StateMachine` class for handling nested processing.
If left as ``None``, `nested_sm` defaults to the class of the state's
controlling state machine. Override it in subclasses to avoid the default.
"""
nested_sm_kwargs = None
"""
Keyword arguments dictionary, passed to the `nested_sm` constructor.
Two keys must have entries in the dictionary:
- Key 'state_classes' must be set to a list of `State` classes.
- Key 'initial_state' must be set to the name of the initial state class.
If `nested_sm_kwargs` is left as ``None``, 'state_classes' defaults to the
class of the current state, and 'initial_state' defaults to the name of
the class of the current state. Override in subclasses to avoid the
defaults.
"""
def __init__(self, state_machine, debug=False):
"""
Initialize a `State` object; make & add initial transitions.
Parameters:
- `statemachine`: the controlling `StateMachine` object.
- `debug`: a boolean; produce verbose output if true.
"""
self.transition_order = []
"""A list of transition names in search order."""
self.transitions = {}
"""
A mapping of transition names to 3-tuples containing
(compiled_pattern, transition_method, next_state_name). Initialized as
an instance attribute dynamically (instead of as a class attribute)
because it may make forward references to patterns and methods in this
or other classes.
"""
self.add_initial_transitions()
self.state_machine = state_machine
"""A reference to the controlling `StateMachine` object."""
self.debug = debug
"""Debugging mode on/off."""
if self.nested_sm is None:
self.nested_sm = self.state_machine.__class__
if self.nested_sm_kwargs is None:
self.nested_sm_kwargs = {'state_classes': [self.__class__],
'initial_state': self.__class__.__name__}
def runtime_init(self):
"""
Initialize this `State` before running the state machine; called from
`self.state_machine.run()`.
"""
pass
def unlink(self):
"""Remove circular references to objects no longer required."""
self.state_machine = None
def add_initial_transitions(self):
"""Make and add transitions listed in `self.initial_transitions`."""
if self.initial_transitions:
names, transitions = self.make_transitions(
self.initial_transitions)
self.add_transitions(names, transitions)
def add_transitions(self, names, transitions):
"""
Add a list of transitions to the start of the transition list.
Parameters:
- `names`: a list of transition names.
- `transitions`: a mapping of names to transition tuples.
Exceptions: `DuplicateTransitionError`, `UnknownTransitionError`.
"""
for name in names:
if name in self.transitions:
raise DuplicateTransitionError(name)
if name not in transitions:
raise UnknownTransitionError(name)
self.transition_order[:0] = names
self.transitions.update(transitions)
def add_transition(self, name, transition):
"""
Add a transition to the start of the transition list.
Parameter `transition`: a ready-made transition 3-tuple.
Exception: `DuplicateTransitionError`.
"""
if name in self.transitions:
raise DuplicateTransitionError(name)
self.transition_order[:0] = [name]
self.transitions[name] = transition
def remove_transition(self, name):
"""
Remove a transition by `name`.
Exception: `UnknownTransitionError`.
"""
try:
del self.transitions[name]
self.transition_order.remove(name)
except:
raise UnknownTransitionError(name)
def make_transition(self, name, next_state=None):
"""
Make & return a transition tuple based on `name`.
This is a convenience function to simplify transition creation.
Parameters:
- `name`: a string, the name of the transition pattern & method. This
`State` object must have a method called '`name`', and a dictionary
`self.patterns` containing a key '`name`'.
- `next_state`: a string, the name of the next `State` object for this
transition. A value of ``None`` (or absent) implies no state change
(i.e., continue with the same state).
Exceptions: `TransitionPatternNotFound`, `TransitionMethodNotFound`.
"""
if next_state is None:
next_state = self.__class__.__name__
try:
pattern = self.patterns[name]
if not hasattr(pattern, 'match'):
pattern = re.compile(pattern)
except KeyError:
raise TransitionPatternNotFound(
'%s.patterns[%r]' % (self.__class__.__name__, name))
try:
method = getattr(self, name)
except AttributeError:
raise TransitionMethodNotFound(
'%s.%s' % (self.__class__.__name__, name))
return (pattern, method, next_state)
def make_transitions(self, name_list):
"""
Return a list of transition names and a transition mapping.
Parameter `name_list`: a list, where each entry is either a transition
name string, or a 1- or 2-tuple (transition name, optional next state
name).
"""
stringtype = type('')
names = []
transitions = {}
for namestate in name_list:
if type(namestate) is stringtype:
transitions[namestate] = self.make_transition(namestate)
names.append(namestate)
else:
transitions[namestate[0]] = self.make_transition(*namestate)
names.append(namestate[0])
return names, transitions
def no_match(self, context, transitions):
"""
Called when there is no match from `StateMachine.check_line()`.
Return the same values returned by transition methods:
- context: unchanged;
- next state name: ``None``;
- empty result list.
Override in subclasses to catch this event.
"""
return context, None, []
def bof(self, context):
"""
Handle beginning-of-file. Return unchanged `context`, empty result.
Override in subclasses.
Parameter `context`: application-defined storage.
"""
return context, []
def eof(self, context):
"""
Handle end-of-file. Return empty result.
Override in subclasses.
Parameter `context`: application-defined storage.
"""
return []
def nop(self, match, context, next_state):
"""
A "do nothing" transition method.
Return unchanged `context` & `next_state`, empty result. Useful for
simple state changes (actionless transitions).
"""
return context, next_state, []
class StateMachineWS(StateMachine):
"""
`StateMachine` subclass specialized for whitespace recognition.
There are three methods provided for extracting indented text blocks:
- `get_indented()`: use when the indent is unknown.
- `get_known_indented()`: use when the indent is known for all lines.
- `get_first_known_indented()`: use when only the first line's indent is
known.
"""
def get_indented(self, until_blank=False, strip_indent=True):
"""
Return a block of indented lines of text, and info.
Extract an indented block where the indent is unknown for all lines.
:Parameters:
- `until_blank`: Stop collecting at the first blank line if true.
- `strip_indent`: Strip common leading indent if true (default).
:Return:
- the indented block (a list of lines of text),
- its indent,
- its first line offset from BOF, and
- whether or not it finished with a blank line.
"""
offset = self.abs_line_offset()
indented, indent, blank_finish = self.input_lines.get_indented(
self.line_offset, until_blank, strip_indent)
if indented:
self.next_line(len(indented) - 1) # advance to last indented line
while indented and not indented[0].strip():
indented.trim_start()
offset += 1
return indented, indent, offset, blank_finish
def get_known_indented(self, indent, until_blank=False, strip_indent=True):
"""
Return an indented block and info.
Extract an indented block where the indent is known for all lines.
Starting with the current line, extract the entire text block with at
least `indent` indentation (which must be whitespace, except for the
first line).
:Parameters:
- `indent`: The number of indent columns/characters.
- `until_blank`: Stop collecting at the first blank line if true.
- `strip_indent`: Strip `indent` characters of indentation if true
(default).
:Return:
- the indented block,
- its first line offset from BOF, and
- whether or not it finished with a blank line.
"""
offset = self.abs_line_offset()
indented, indent, blank_finish = self.input_lines.get_indented(
self.line_offset, until_blank, strip_indent,
block_indent=indent)
self.next_line(len(indented) - 1) # advance to last indented line
while indented and not indented[0].strip():
indented.trim_start()
offset += 1
return indented, offset, blank_finish
def get_first_known_indented(self, indent, until_blank=False,
strip_indent=True, strip_top=True):
"""
Return an indented block and info.
Extract an indented block where the indent is known for the first line
and unknown for all other lines.
:Parameters:
- `indent`: The first line's indent (# of columns/characters).
- `until_blank`: Stop collecting at the first blank line if true
(1).
- `strip_indent`: Strip `indent` characters of indentation if true
(1, default).
- `strip_top`: Strip blank lines from the beginning of the block.
:Return:
- the indented block,
- its indent,
- its first line offset from BOF, and
- whether or not it finished with a blank line.
"""
offset = self.abs_line_offset()
indented, indent, blank_finish = self.input_lines.get_indented(
self.line_offset, until_blank, strip_indent,
first_indent=indent)
self.next_line(len(indented) - 1) # advance to last indented line
if strip_top:
while indented and not indented[0].strip():
indented.trim_start()
offset += 1
return indented, indent, offset, blank_finish
class StateWS(State):
"""
State superclass specialized for whitespace (blank lines & indents).
Use this class with `StateMachineWS`. The transitions 'blank' (for blank
lines) and 'indent' (for indented text blocks) are added automatically,
before any other transitions. The transition method `blank()` handles
blank lines and `indent()` handles nested indented blocks. Indented
blocks trigger a new state machine to be created by `indent()` and run.
The class of the state machine to be created is in `indent_sm`, and the
constructor keyword arguments are in the dictionary `indent_sm_kwargs`.
The methods `known_indent()` and `firstknown_indent()` are provided for
indented blocks where the indent (all lines' and first line's only,
respectively) is known to the transition method, along with the attributes
`known_indent_sm` and `known_indent_sm_kwargs`. Neither transition method
is triggered automatically.
"""
indent_sm = None
"""
The `StateMachine` class handling indented text blocks.
If left as ``None``, `indent_sm` defaults to the value of
`State.nested_sm`. Override it in subclasses to avoid the default.
"""
indent_sm_kwargs = None
"""
Keyword arguments dictionary, passed to the `indent_sm` constructor.
If left as ``None``, `indent_sm_kwargs` defaults to the value of
`State.nested_sm_kwargs`. Override it in subclasses to avoid the default.
"""
known_indent_sm = None
"""
The `StateMachine` class handling known-indented text blocks.
If left as ``None``, `known_indent_sm` defaults to the value of
`indent_sm`. Override it in subclasses to avoid the default.
"""
known_indent_sm_kwargs = None
"""
Keyword arguments dictionary, passed to the `known_indent_sm` constructor.
If left as ``None``, `known_indent_sm_kwargs` defaults to the value of
`indent_sm_kwargs`. Override it in subclasses to avoid the default.
"""
ws_patterns = {'blank': ' *$',
'indent': ' +'}
"""Patterns for default whitespace transitions. May be overridden in
subclasses."""
ws_initial_transitions = ('blank', 'indent')
"""Default initial whitespace transitions, added before those listed in
`State.initial_transitions`. May be overridden in subclasses."""
def __init__(self, state_machine, debug=False):
"""
Initialize a `StateSM` object; extends `State.__init__()`.
Check for indent state machine attributes, set defaults if not set.
"""
State.__init__(self, state_machine, debug)
if self.indent_sm is None:
self.indent_sm = self.nested_sm
if self.indent_sm_kwargs is None:
self.indent_sm_kwargs = self.nested_sm_kwargs
if self.known_indent_sm is None:
self.known_indent_sm = self.indent_sm
if self.known_indent_sm_kwargs is None:
self.known_indent_sm_kwargs = self.indent_sm_kwargs
def add_initial_transitions(self):
"""
Add whitespace-specific transitions before those defined in subclass.
Extends `State.add_initial_transitions()`.
"""
State.add_initial_transitions(self)
if self.patterns is None:
self.patterns = {}
self.patterns.update(self.ws_patterns)
names, transitions = self.make_transitions(
self.ws_initial_transitions)
self.add_transitions(names, transitions)
def blank(self, match, context, next_state):
"""Handle blank lines. Does nothing. Override in subclasses."""
return self.nop(match, context, next_state)
def indent(self, match, context, next_state):
"""
Handle an indented text block. Extend or override in subclasses.
Recursively run the registered state machine for indented blocks
(`self.indent_sm`).
"""
indented, indent, line_offset, blank_finish = \
self.state_machine.get_indented()
sm = self.indent_sm(debug=self.debug, **self.indent_sm_kwargs)
results = sm.run(indented, input_offset=line_offset)
return context, next_state, results
def known_indent(self, match, context, next_state):
"""
Handle a known-indent text block. Extend or override in subclasses.
Recursively run the registered state machine for known-indent indented
blocks (`self.known_indent_sm`). The indent is the length of the
match, ``match.end()``.
"""
indented, line_offset, blank_finish = \
self.state_machine.get_known_indented(match.end())
sm = self.known_indent_sm(debug=self.debug,
**self.known_indent_sm_kwargs)
results = sm.run(indented, input_offset=line_offset)
return context, next_state, results
def first_known_indent(self, match, context, next_state):
"""
Handle an indented text block (first line's indent known).
Extend or override in subclasses.
Recursively run the registered state machine for known-indent indented
blocks (`self.known_indent_sm`). The indent is the length of the
match, ``match.end()``.
"""
indented, line_offset, blank_finish = \
self.state_machine.get_first_known_indented(match.end())
sm = self.known_indent_sm(debug=self.debug,
**self.known_indent_sm_kwargs)
results = sm.run(indented, input_offset=line_offset)
return context, next_state, results
class _SearchOverride:
"""
Mix-in class to override `StateMachine` regular expression behavior.
Changes regular expression matching, from the default `re.match()`
(succeeds only if the pattern matches at the start of `self.line`) to
`re.search()` (succeeds if the pattern matches anywhere in `self.line`).
When subclassing a `StateMachine`, list this class **first** in the
inheritance list of the class definition.
"""
def match(self, pattern):
"""
Return the result of a regular expression search.
Overrides `StateMachine.match()`.
Parameter `pattern`: `re` compiled regular expression.
"""
return pattern.search(self.line)
class SearchStateMachine(_SearchOverride, StateMachine):
"""`StateMachine` which uses `re.search()` instead of `re.match()`."""
pass
class SearchStateMachineWS(_SearchOverride, StateMachineWS):
"""`StateMachineWS` which uses `re.search()` instead of `re.match()`."""
pass
class ViewList:
"""
List with extended functionality: slices of ViewList objects are child
lists, linked to their parents. Changes made to a child list also affect
the parent list. A child list is effectively a "view" (in the SQL sense)
of the parent list. Changes to parent lists, however, do *not* affect
active child lists. If a parent list is changed, any active child lists
should be recreated.
The start and end of the slice can be trimmed using the `trim_start()` and
`trim_end()` methods, without affecting the parent list. The link between
child and parent lists can be broken by calling `disconnect()` on the
child list.
Also, ViewList objects keep track of the source & offset of each item.
This information is accessible via the `source()`, `offset()`, and
`info()` methods.
"""
def __init__(self, initlist=None, source=None, items=None,
parent=None, parent_offset=None):
self.data = []
"""The actual list of data, flattened from various sources."""
self.items = []
"""A list of (source, offset) pairs, same length as `self.data`: the
source of each line and the offset of each line from the beginning of
its source."""
self.parent = parent
"""The parent list."""
self.parent_offset = parent_offset
"""Offset of this list from the beginning of the parent list."""
if isinstance(initlist, ViewList):
self.data = initlist.data[:]
self.items = initlist.items[:]
elif initlist is not None:
self.data = list(initlist)
if items:
self.items = items
else:
self.items = [(source, i) for i in range(len(initlist))]
assert len(self.data) == len(self.items), 'data mismatch'
def __str__(self):
return str(self.data)
def __repr__(self):
return '%s(%s, items=%s)' % (self.__class__.__name__,
self.data, self.items)
def __lt__(self, other): return self.data < self.__cast(other)
def __le__(self, other): return self.data <= self.__cast(other)
def __eq__(self, other): return self.data == self.__cast(other)
def __ne__(self, other): return self.data != self.__cast(other)
def __gt__(self, other): return self.data > self.__cast(other)
def __ge__(self, other): return self.data >= self.__cast(other)
def __cmp__(self, other): return cmp(self.data, self.__cast(other))
def __cast(self, other):
if isinstance(other, ViewList):
return other.data
else:
return other
def __contains__(self, item): return item in self.data
def __len__(self): return len(self.data)
# The __getitem__()/__setitem__() methods check whether the index
# is a slice first, since indexing a native list with a slice object
# just works.
def __getitem__(self, i):
if isinstance(i, types.SliceType):
assert i.step in (None, 1), 'cannot handle slice with stride'
return self.__class__(self.data[i.start:i.stop],
items=self.items[i.start:i.stop],
parent=self, parent_offset=i.start or 0)
else:
return self.data[i]
def __setitem__(self, i, item):
if isinstance(i, types.SliceType):
assert i.step in (None, 1), 'cannot handle slice with stride'
if not isinstance(item, ViewList):
raise TypeError('assigning non-ViewList to ViewList slice')
self.data[i.start:i.stop] = item.data
self.items[i.start:i.stop] = item.items
assert len(self.data) == len(self.items), 'data mismatch'
if self.parent:
self.parent[(i.start or 0) + self.parent_offset
: (i.stop or len(self)) + self.parent_offset] = item
else:
self.data[i] = item
if self.parent:
self.parent[i + self.parent_offset] = item
def __delitem__(self, i):
try:
del self.data[i]
del self.items[i]
if self.parent:
del self.parent[i + self.parent_offset]
except TypeError:
assert i.step is None, 'cannot handle slice with stride'
del self.data[i.start:i.stop]
del self.items[i.start:i.stop]
if self.parent:
del self.parent[(i.start or 0) + self.parent_offset
: (i.stop or len(self)) + self.parent_offset]
def __add__(self, other):
if isinstance(other, ViewList):
return self.__class__(self.data + other.data,
items=(self.items + other.items))
else:
raise TypeError('adding non-ViewList to a ViewList')
def __radd__(self, other):
if isinstance(other, ViewList):
return self.__class__(other.data + self.data,
items=(other.items + self.items))
else:
raise TypeError('adding ViewList to a non-ViewList')
def __iadd__(self, other):
if isinstance(other, ViewList):
self.data += other.data
else:
raise TypeError('argument to += must be a ViewList')
return self
def __mul__(self, n):
return self.__class__(self.data * n, items=(self.items * n))
__rmul__ = __mul__
def __imul__(self, n):
self.data *= n
self.items *= n
return self
def extend(self, other):
if not isinstance(other, ViewList):
raise TypeError('extending a ViewList with a non-ViewList')
if self.parent:
self.parent.insert(len(self.data) + self.parent_offset, other)
self.data.extend(other.data)
self.items.extend(other.items)
def append(self, item, source=None, offset=0):
if source is None:
self.extend(item)
else:
if self.parent:
self.parent.insert(len(self.data) + self.parent_offset, item,
source, offset)
self.data.append(item)
self.items.append((source, offset))
def insert(self, i, item, source=None, offset=0):
if source is None:
if not isinstance(item, ViewList):
raise TypeError('inserting non-ViewList with no source given')
self.data[i:i] = item.data
self.items[i:i] = item.items
if self.parent:
index = (len(self.data) + i) % len(self.data)
self.parent.insert(index + self.parent_offset, item)
else:
self.data.insert(i, item)
self.items.insert(i, (source, offset))
if self.parent:
index = (len(self.data) + i) % len(self.data)
self.parent.insert(index + self.parent_offset, item,
source, offset)
def pop(self, i=-1):
if self.parent:
index = (len(self.data) + i) % len(self.data)
self.parent.pop(index + self.parent_offset)
self.items.pop(i)
return self.data.pop(i)
def trim_start(self, n=1):
"""
Remove items from the start of the list, without touching the parent.
"""
if n > len(self.data):
raise IndexError("Size of trim too large; can't trim %s items "
"from a list of size %s." % (n, len(self.data)))
elif n < 0:
raise IndexError('Trim size must be >= 0.')
del self.data[:n]
del self.items[:n]
if self.parent:
self.parent_offset += n
def trim_end(self, n=1):
"""
Remove items from the end of the list, without touching the parent.
"""
if n > len(self.data):
raise IndexError("Size of trim too large; can't trim %s items "
"from a list of size %s." % (n, len(self.data)))
elif n < 0:
raise IndexError('Trim size must be >= 0.')
del self.data[-n:]
del self.items[-n:]
def remove(self, item):
index = self.index(item)
del self[index]
def count(self, item): return self.data.count(item)
def index(self, item): return self.data.index(item)
def reverse(self):
self.data.reverse()
self.items.reverse()
self.parent = None
def sort(self, *args):
tmp = zip(self.data, self.items)
tmp.sort(*args)
self.data = [entry[0] for entry in tmp]
self.items = [entry[1] for entry in tmp]
self.parent = None
def info(self, i):
"""Return source & offset for index `i`."""
try:
return self.items[i]
except IndexError:
if i == len(self.data): # Just past the end
return self.items[i - 1][0], None
else:
raise
def source(self, i):
"""Return source for index `i`."""
return self.info(i)[0]
def offset(self, i):
"""Return offset for index `i`."""
return self.info(i)[1]
def disconnect(self):
"""Break link between this list and parent list."""
self.parent = None
def xitems(self):
"""Return iterator yielding (source, offset, value) tuples."""
for (value, (source, offset)) in zip(self.data, self.items):
yield (source, offset, value)
def pprint(self):
"""Print the list in `grep` format (`source:offset:value` lines)"""
for line in self.xitems():
print "%s:%d:%s" % line
class StringList(ViewList):
"""A `ViewList` with string-specific methods."""
def trim_left(self, length, start=0, end=sys.maxint):
"""
Trim `length` characters off the beginning of each item, in-place,
from index `start` to `end`. No whitespace-checking is done on the
trimmed text. Does not affect slice parent.
"""
self.data[start:end] = [line[length:]
for line in self.data[start:end]]
def get_text_block(self, start, flush_left=False):
"""
Return a contiguous block of text.
If `flush_left` is true, raise `UnexpectedIndentationError` if an
indented line is encountered before the text block ends (with a blank
line).
"""
end = start
last = len(self.data)
while end < last:
line = self.data[end]
if not line.strip():
break
if flush_left and (line[0] == ' '):
source, offset = self.info(end)
raise UnexpectedIndentationError(self[start:end], source,
offset + 1)
end += 1
return self[start:end]
def get_indented(self, start=0, until_blank=False, strip_indent=True,
block_indent=None, first_indent=None):
"""
Extract and return a StringList of indented lines of text.
Collect all lines with indentation, determine the minimum indentation,
remove the minimum indentation from all indented lines (unless
`strip_indent` is false), and return them. All lines up to but not
including the first unindented line will be returned.
:Parameters:
- `start`: The index of the first line to examine.
- `until_blank`: Stop collecting at the first blank line if true.
- `strip_indent`: Strip common leading indent if true (default).
- `block_indent`: The indent of the entire block, if known.
- `first_indent`: The indent of the first line, if known.
:Return:
- a StringList of indented lines with mininum indent removed;
- the amount of the indent;
- a boolean: did the indented block finish with a blank line or EOF?
"""
indent = block_indent # start with None if unknown
end = start
if block_indent is not None and first_indent is None:
first_indent = block_indent
if first_indent is not None:
end += 1
last = len(self.data)
while end < last:
line = self.data[end]
if line and (line[0] != ' '
or (block_indent is not None
and line[:block_indent].strip())):
# Line not indented or insufficiently indented.
# Block finished properly iff the last indented line blank:
blank_finish = ((end > start)
and not self.data[end - 1].strip())
break
stripped = line.lstrip()
if not stripped: # blank line
if until_blank:
blank_finish = 1
break
elif block_indent is None:
line_indent = len(line) - len(stripped)
if indent is None:
indent = line_indent
else:
indent = min(indent, line_indent)
end += 1
else:
blank_finish = 1 # block ends at end of lines
block = self[start:end]
if first_indent is not None and block:
block.data[0] = block.data[0][first_indent:]
if indent and strip_indent:
block.trim_left(indent, start=(first_indent is not None))
return block, indent or 0, blank_finish
def get_2D_block(self, top, left, bottom, right, strip_indent=True):
block = self[top:bottom]
indent = right
for i in range(len(block.data)):
# get slice from line, care for combining characters
ci = utils.column_indices(block.data[i])
try:
left = ci[left]
except IndexError:
left += len(block.data[i]) - len(ci)
try:
right = ci[right]
except IndexError:
right += len(block.data[i]) - len(ci)
block.data[i] = line = block.data[i][left:right].rstrip()
if line:
indent = min(indent, len(line) - len(line.lstrip()))
if strip_indent and 0 < indent < right:
block.data = [line[indent:] for line in block.data]
return block
def pad_double_width(self, pad_char):
"""
Pad all double-width characters in self by appending `pad_char` to each.
For East Asian language support.
"""
if hasattr(unicodedata, 'east_asian_width'):
east_asian_width = unicodedata.east_asian_width
else:
return # new in Python 2.4
for i in range(len(self.data)):
line = self.data[i]
if isinstance(line, unicode):
new = []
for char in line:
new.append(char)
if east_asian_width(char) in 'WF': # 'W'ide & 'F'ull-width
new.append(pad_char)
self.data[i] = ''.join(new)
def replace(self, old, new):
"""Replace all occurrences of substring `old` with `new`."""
for i in range(len(self.data)):
self.data[i] = self.data[i].replace(old, new)
class StateMachineError(Exception): pass
class UnknownStateError(StateMachineError): pass
class DuplicateStateError(StateMachineError): pass
class UnknownTransitionError(StateMachineError): pass
class DuplicateTransitionError(StateMachineError): pass
class TransitionPatternNotFound(StateMachineError): pass
class TransitionMethodNotFound(StateMachineError): pass
class UnexpectedIndentationError(StateMachineError): pass
class TransitionCorrection(Exception):
"""
Raise from within a transition method to switch to another transition.
Raise with one argument, the new transition name.
"""
class StateCorrection(Exception):
"""
Raise from within a transition method to switch to another state.
Raise with one or two arguments: new state name, and an optional new
transition name.
"""
def string2lines(astring, tab_width=8, convert_whitespace=False,
whitespace=re.compile('[\v\f]')):
"""
Return a list of one-line strings with tabs expanded, no newlines, and
trailing whitespace stripped.
Each tab is expanded with between 1 and `tab_width` spaces, so that the
next character's index becomes a multiple of `tab_width` (8 by default).
Parameters:
- `astring`: a multi-line string.
- `tab_width`: the number of columns between tab stops.
- `convert_whitespace`: convert form feeds and vertical tabs to spaces?
"""
if convert_whitespace:
astring = whitespace.sub(' ', astring)
return [s.expandtabs(tab_width).rstrip() for s in astring.splitlines()]
def _exception_data():
"""
Return exception information:
- the exception's class name;
- the exception object;
- the name of the file containing the offending code;
- the line number of the offending code;
- the function name of the offending code.
"""
type, value, traceback = sys.exc_info()
while traceback.tb_next:
traceback = traceback.tb_next
code = traceback.tb_frame.f_code
return (type.__name__, value, code.co_filename, traceback.tb_lineno,
code.co_name)
| {
"repo_name": "neumerance/deploy",
"path": ".venv/lib/python2.7/site-packages/docutils/statemachine.py",
"copies": "4",
"size": "57566",
"license": "apache-2.0",
"hash": 150045733607720580,
"line_mean": 36.4291287386,
"line_max": 80,
"alpha_frac": 0.5861967133,
"autogenerated": false,
"ratio": 4.390329469188529,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6976526182488529,
"avg_score": null,
"num_lines": null
} |
# $Id: statemachine.py 7464 2012-06-25 13:16:03Z milde $
# Author: David Goodger <goodger@python.org>
# Copyright: This module has been placed in the public domain.
"""
A finite state machine specialized for regular-expression-based text filters,
this module defines the following classes:
- `StateMachine`, a state machine
- `State`, a state superclass
- `StateMachineWS`, a whitespace-sensitive version of `StateMachine`
- `StateWS`, a state superclass for use with `StateMachineWS`
- `SearchStateMachine`, uses `re.search()` instead of `re.match()`
- `SearchStateMachineWS`, uses `re.search()` instead of `re.match()`
- `ViewList`, extends standard Python lists.
- `StringList`, string-specific ViewList.
Exception classes:
- `StateMachineError`
- `UnknownStateError`
- `DuplicateStateError`
- `UnknownTransitionError`
- `DuplicateTransitionError`
- `TransitionPatternNotFound`
- `TransitionMethodNotFound`
- `UnexpectedIndentationError`
- `TransitionCorrection`: Raised to switch to another transition.
- `StateCorrection`: Raised to switch to another state & transition.
Functions:
- `string2lines()`: split a multi-line string into a list of one-line strings
How To Use This Module
======================
(See the individual classes, methods, and attributes for details.)
1. Import it: ``import statemachine`` or ``from statemachine import ...``.
You will also need to ``import re``.
2. Derive a subclass of `State` (or `StateWS`) for each state in your state
machine::
class MyState(statemachine.State):
Within the state's class definition:
a) Include a pattern for each transition, in `State.patterns`::
patterns = {'atransition': r'pattern', ...}
b) Include a list of initial transitions to be set up automatically, in
`State.initial_transitions`::
initial_transitions = ['atransition', ...]
c) Define a method for each transition, with the same name as the
transition pattern::
def atransition(self, match, context, next_state):
# do something
result = [...] # a list
return context, next_state, result
# context, next_state may be altered
Transition methods may raise an `EOFError` to cut processing short.
d) You may wish to override the `State.bof()` and/or `State.eof()` implicit
transition methods, which handle the beginning- and end-of-file.
e) In order to handle nested processing, you may wish to override the
attributes `State.nested_sm` and/or `State.nested_sm_kwargs`.
If you are using `StateWS` as a base class, in order to handle nested
indented blocks, you may wish to:
- override the attributes `StateWS.indent_sm`,
`StateWS.indent_sm_kwargs`, `StateWS.known_indent_sm`, and/or
`StateWS.known_indent_sm_kwargs`;
- override the `StateWS.blank()` method; and/or
- override or extend the `StateWS.indent()`, `StateWS.known_indent()`,
and/or `StateWS.firstknown_indent()` methods.
3. Create a state machine object::
sm = StateMachine(state_classes=[MyState, ...],
initial_state='MyState')
4. Obtain the input text, which needs to be converted into a tab-free list of
one-line strings. For example, to read text from a file called
'inputfile'::
input_string = open('inputfile').read()
input_lines = statemachine.string2lines(input_string)
5. Run the state machine on the input text and collect the results, a list::
results = sm.run(input_lines)
6. Remove any lingering circular references::
sm.unlink()
"""
__docformat__ = 'restructuredtext'
import re
import sys
import unicodedata
from docutils import utils
from docutils.utils.error_reporting import ErrorOutput
class StateMachine:
"""
A finite state machine for text filters using regular expressions.
The input is provided in the form of a list of one-line strings (no
newlines). States are subclasses of the `State` class. Transitions consist
of regular expression patterns and transition methods, and are defined in
each state.
The state machine is started with the `run()` method, which returns the
results of processing in a list.
"""
def __init__(self, state_classes, initial_state, debug=False):
"""
Initialize a `StateMachine` object; add state objects.
Parameters:
- `state_classes`: a list of `State` (sub)classes.
- `initial_state`: a string, the class name of the initial state.
- `debug`: a boolean; produce verbose output if true (nonzero).
"""
self.input_lines = None
"""`StringList` of input lines (without newlines).
Filled by `self.run()`."""
self.input_offset = 0
"""Offset of `self.input_lines` from the beginning of the file."""
self.line = None
"""Current input line."""
self.line_offset = -1
"""Current input line offset from beginning of `self.input_lines`."""
self.debug = debug
"""Debugging mode on/off."""
self.initial_state = initial_state
"""The name of the initial state (key to `self.states`)."""
self.current_state = initial_state
"""The name of the current state (key to `self.states`)."""
self.states = {}
"""Mapping of {state_name: State_object}."""
self.add_states(state_classes)
self.observers = []
"""List of bound methods or functions to call whenever the current
line changes. Observers are called with one argument, ``self``.
Cleared at the end of `run()`."""
self._stderr = ErrorOutput()
"""Wrapper around sys.stderr catching en-/decoding errors"""
def unlink(self):
"""Remove circular references to objects no longer required."""
for state in list(self.states.values()):
state.unlink()
self.states = None
def run(self, input_lines, input_offset=0, context=None,
input_source=None, initial_state=None):
"""
Run the state machine on `input_lines`. Return results (a list).
Reset `self.line_offset` and `self.current_state`. Run the
beginning-of-file transition. Input one line at a time and check for a
matching transition. If a match is found, call the transition method
and possibly change the state. Store the context returned by the
transition method to be passed on to the next transition matched.
Accumulate the results returned by the transition methods in a list.
Run the end-of-file transition. Finally, return the accumulated
results.
Parameters:
- `input_lines`: a list of strings without newlines, or `StringList`.
- `input_offset`: the line offset of `input_lines` from the beginning
of the file.
- `context`: application-specific storage.
- `input_source`: name or path of source of `input_lines`.
- `initial_state`: name of initial state.
"""
self.runtime_init()
if isinstance(input_lines, StringList):
self.input_lines = input_lines
else:
self.input_lines = StringList(input_lines, source=input_source)
self.input_offset = input_offset
self.line_offset = -1
self.current_state = initial_state or self.initial_state
if self.debug:
print((
'\nStateMachine.run: input_lines (line_offset=%s):\n| %s'
% (self.line_offset, '\n| '.join(self.input_lines))), file=self._stderr)
transitions = None
results = []
state = self.get_state()
try:
if self.debug:
print('\nStateMachine.run: bof transition', file=self._stderr)
context, result = state.bof(context)
results.extend(result)
while True:
try:
try:
self.next_line()
if self.debug:
source, offset = self.input_lines.info(
self.line_offset)
print((
'\nStateMachine.run: line (source=%r, '
'offset=%r):\n| %s'
% (source, offset, self.line)), file=self._stderr)
context, next_state, result = self.check_line(
context, state, transitions)
except EOFError:
if self.debug:
print((
'\nStateMachine.run: %s.eof transition'
% state.__class__.__name__), file=self._stderr)
result = state.eof(context)
results.extend(result)
break
else:
results.extend(result)
except TransitionCorrection as exception:
self.previous_line() # back up for another try
transitions = (exception.args[0],)
if self.debug:
print((
'\nStateMachine.run: TransitionCorrection to '
'state "%s", transition %s.'
% (state.__class__.__name__, transitions[0])), file=self._stderr)
continue
except StateCorrection as exception:
self.previous_line() # back up for another try
next_state = exception.args[0]
if len(exception.args) == 1:
transitions = None
else:
transitions = (exception.args[1],)
if self.debug:
print((
'\nStateMachine.run: StateCorrection to state '
'"%s", transition %s.'
% (next_state, transitions[0])), file=self._stderr)
else:
transitions = None
state = self.get_state(next_state)
except:
if self.debug:
self.error()
raise
self.observers = []
return results
def get_state(self, next_state=None):
"""
Return current state object; set it first if `next_state` given.
Parameter `next_state`: a string, the name of the next state.
Exception: `UnknownStateError` raised if `next_state` unknown.
"""
if next_state:
if self.debug and next_state != self.current_state:
print((
'\nStateMachine.get_state: Changing state from '
'"%s" to "%s" (input line %s).'
% (self.current_state, next_state,
self.abs_line_number())), file=self._stderr)
self.current_state = next_state
try:
return self.states[self.current_state]
except KeyError:
raise UnknownStateError(self.current_state)
def next_line(self, n=1):
"""Load `self.line` with the `n`'th next line and return it."""
try:
try:
self.line_offset += n
self.line = self.input_lines[self.line_offset]
except IndexError:
self.line = None
raise EOFError
return self.line
finally:
self.notify_observers()
def is_next_line_blank(self):
"""Return 1 if the next line is blank or non-existant."""
try:
return not self.input_lines[self.line_offset + 1].strip()
except IndexError:
return 1
def at_eof(self):
"""Return 1 if the input is at or past end-of-file."""
return self.line_offset >= len(self.input_lines) - 1
def at_bof(self):
"""Return 1 if the input is at or before beginning-of-file."""
return self.line_offset <= 0
def previous_line(self, n=1):
"""Load `self.line` with the `n`'th previous line and return it."""
self.line_offset -= n
if self.line_offset < 0:
self.line = None
else:
self.line = self.input_lines[self.line_offset]
self.notify_observers()
return self.line
def goto_line(self, line_offset):
"""Jump to absolute line offset `line_offset`, load and return it."""
try:
try:
self.line_offset = line_offset - self.input_offset
self.line = self.input_lines[self.line_offset]
except IndexError:
self.line = None
raise EOFError
return self.line
finally:
self.notify_observers()
def get_source(self, line_offset):
"""Return source of line at absolute line offset `line_offset`."""
return self.input_lines.source(line_offset - self.input_offset)
def abs_line_offset(self):
"""Return line offset of current line, from beginning of file."""
return self.line_offset + self.input_offset
def abs_line_number(self):
"""Return line number of current line (counting from 1)."""
return self.line_offset + self.input_offset + 1
def get_source_and_line(self, lineno=None):
"""Return (source, line) tuple for current or given line number.
Looks up the source and line number in the `self.input_lines`
StringList instance to count for included source files.
If the optional argument `lineno` is given, convert it from an
absolute line number to the corresponding (source, line) pair.
"""
if lineno is None:
offset = self.line_offset
else:
offset = lineno - self.input_offset - 1
try:
src, srcoffset = self.input_lines.info(offset)
srcline = srcoffset + 1
except (TypeError):
# line is None if index is "Just past the end"
src, srcline = self.get_source_and_line(offset + self.input_offset)
return src, srcline + 1
except (IndexError): # `offset` is off the list
src, srcline = None, None
# raise AssertionError('cannot find line %d in %s lines' %
# (offset, len(self.input_lines)))
# # list(self.input_lines.lines())))
# assert offset == srcoffset, str(self.input_lines)
# print "get_source_and_line(%s):" % lineno,
# print offset + 1, '->', src, srcline
# print self.input_lines
return (src, srcline)
def insert_input(self, input_lines, source):
self.input_lines.insert(self.line_offset + 1, '',
source='internal padding after '+source,
offset=len(input_lines))
self.input_lines.insert(self.line_offset + 1, '',
source='internal padding before '+source,
offset=-1)
self.input_lines.insert(self.line_offset + 2,
StringList(input_lines, source))
def get_text_block(self, flush_left=False):
"""
Return a contiguous block of text.
If `flush_left` is true, raise `UnexpectedIndentationError` if an
indented line is encountered before the text block ends (with a blank
line).
"""
try:
block = self.input_lines.get_text_block(self.line_offset,
flush_left)
self.next_line(len(block) - 1)
return block
except UnexpectedIndentationError as err:
block = err.args[0]
self.next_line(len(block) - 1) # advance to last line of block
raise
def check_line(self, context, state, transitions=None):
"""
Examine one line of input for a transition match & execute its method.
Parameters:
- `context`: application-dependent storage.
- `state`: a `State` object, the current state.
- `transitions`: an optional ordered list of transition names to try,
instead of ``state.transition_order``.
Return the values returned by the transition method:
- context: possibly modified from the parameter `context`;
- next state name (`State` subclass name);
- the result output of the transition, a list.
When there is no match, ``state.no_match()`` is called and its return
value is returned.
"""
if transitions is None:
transitions = state.transition_order
state_correction = None
if self.debug:
print((
'\nStateMachine.check_line: state="%s", transitions=%r.'
% (state.__class__.__name__, transitions)), file=self._stderr)
for name in transitions:
pattern, method, next_state = state.transitions[name]
match = pattern.match(self.line)
if match:
if self.debug:
print((
'\nStateMachine.check_line: Matched transition '
'"%s" in state "%s".'
% (name, state.__class__.__name__)), file=self._stderr)
return method(match, context, next_state)
else:
if self.debug:
print((
'\nStateMachine.check_line: No match in state "%s".'
% state.__class__.__name__), file=self._stderr)
return state.no_match(context, transitions)
def add_state(self, state_class):
"""
Initialize & add a `state_class` (`State` subclass) object.
Exception: `DuplicateStateError` raised if `state_class` was already
added.
"""
statename = state_class.__name__
if statename in self.states:
raise DuplicateStateError(statename)
self.states[statename] = state_class(self, self.debug)
def add_states(self, state_classes):
"""
Add `state_classes` (a list of `State` subclasses).
"""
for state_class in state_classes:
self.add_state(state_class)
def runtime_init(self):
"""
Initialize `self.states`.
"""
for state in list(self.states.values()):
state.runtime_init()
def error(self):
"""Report error details."""
type, value, module, line, function = _exception_data()
print('%s: %s' % (type, value), file=self._stderr)
print('input line %s' % (self.abs_line_number()), file=self._stderr)
print(('module %s, line %s, function %s' %
(module, line, function)), file=self._stderr)
def attach_observer(self, observer):
"""
The `observer` parameter is a function or bound method which takes two
arguments, the source and offset of the current line.
"""
self.observers.append(observer)
def detach_observer(self, observer):
self.observers.remove(observer)
def notify_observers(self):
for observer in self.observers:
try:
info = self.input_lines.info(self.line_offset)
except IndexError:
info = (None, None)
observer(*info)
class State:
"""
State superclass. Contains a list of transitions, and transition methods.
Transition methods all have the same signature. They take 3 parameters:
- An `re` match object. ``match.string`` contains the matched input line,
``match.start()`` gives the start index of the match, and
``match.end()`` gives the end index.
- A context object, whose meaning is application-defined (initial value
``None``). It can be used to store any information required by the state
machine, and the retured context is passed on to the next transition
method unchanged.
- The name of the next state, a string, taken from the transitions list;
normally it is returned unchanged, but it may be altered by the
transition method if necessary.
Transition methods all return a 3-tuple:
- A context object, as (potentially) modified by the transition method.
- The next state name (a return value of ``None`` means no state change).
- The processing result, a list, which is accumulated by the state
machine.
Transition methods may raise an `EOFError` to cut processing short.
There are two implicit transitions, and corresponding transition methods
are defined: `bof()` handles the beginning-of-file, and `eof()` handles
the end-of-file. These methods have non-standard signatures and return
values. `bof()` returns the initial context and results, and may be used
to return a header string, or do any other processing needed. `eof()`
should handle any remaining context and wrap things up; it returns the
final processing result.
Typical applications need only subclass `State` (or a subclass), set the
`patterns` and `initial_transitions` class attributes, and provide
corresponding transition methods. The default object initialization will
take care of constructing the list of transitions.
"""
patterns = None
"""
{Name: pattern} mapping, used by `make_transition()`. Each pattern may
be a string or a compiled `re` pattern. Override in subclasses.
"""
initial_transitions = None
"""
A list of transitions to initialize when a `State` is instantiated.
Each entry is either a transition name string, or a (transition name, next
state name) pair. See `make_transitions()`. Override in subclasses.
"""
nested_sm = None
"""
The `StateMachine` class for handling nested processing.
If left as ``None``, `nested_sm` defaults to the class of the state's
controlling state machine. Override it in subclasses to avoid the default.
"""
nested_sm_kwargs = None
"""
Keyword arguments dictionary, passed to the `nested_sm` constructor.
Two keys must have entries in the dictionary:
- Key 'state_classes' must be set to a list of `State` classes.
- Key 'initial_state' must be set to the name of the initial state class.
If `nested_sm_kwargs` is left as ``None``, 'state_classes' defaults to the
class of the current state, and 'initial_state' defaults to the name of
the class of the current state. Override in subclasses to avoid the
defaults.
"""
def __init__(self, state_machine, debug=False):
"""
Initialize a `State` object; make & add initial transitions.
Parameters:
- `statemachine`: the controlling `StateMachine` object.
- `debug`: a boolean; produce verbose output if true.
"""
self.transition_order = []
"""A list of transition names in search order."""
self.transitions = {}
"""
A mapping of transition names to 3-tuples containing
(compiled_pattern, transition_method, next_state_name). Initialized as
an instance attribute dynamically (instead of as a class attribute)
because it may make forward references to patterns and methods in this
or other classes.
"""
self.add_initial_transitions()
self.state_machine = state_machine
"""A reference to the controlling `StateMachine` object."""
self.debug = debug
"""Debugging mode on/off."""
if self.nested_sm is None:
self.nested_sm = self.state_machine.__class__
if self.nested_sm_kwargs is None:
self.nested_sm_kwargs = {'state_classes': [self.__class__],
'initial_state': self.__class__.__name__}
def runtime_init(self):
"""
Initialize this `State` before running the state machine; called from
`self.state_machine.run()`.
"""
pass
def unlink(self):
"""Remove circular references to objects no longer required."""
self.state_machine = None
def add_initial_transitions(self):
"""Make and add transitions listed in `self.initial_transitions`."""
if self.initial_transitions:
names, transitions = self.make_transitions(
self.initial_transitions)
self.add_transitions(names, transitions)
def add_transitions(self, names, transitions):
"""
Add a list of transitions to the start of the transition list.
Parameters:
- `names`: a list of transition names.
- `transitions`: a mapping of names to transition tuples.
Exceptions: `DuplicateTransitionError`, `UnknownTransitionError`.
"""
for name in names:
if name in self.transitions:
raise DuplicateTransitionError(name)
if name not in transitions:
raise UnknownTransitionError(name)
self.transition_order[:0] = names
self.transitions.update(transitions)
def add_transition(self, name, transition):
"""
Add a transition to the start of the transition list.
Parameter `transition`: a ready-made transition 3-tuple.
Exception: `DuplicateTransitionError`.
"""
if name in self.transitions:
raise DuplicateTransitionError(name)
self.transition_order[:0] = [name]
self.transitions[name] = transition
def remove_transition(self, name):
"""
Remove a transition by `name`.
Exception: `UnknownTransitionError`.
"""
try:
del self.transitions[name]
self.transition_order.remove(name)
except:
raise UnknownTransitionError(name)
def make_transition(self, name, next_state=None):
"""
Make & return a transition tuple based on `name`.
This is a convenience function to simplify transition creation.
Parameters:
- `name`: a string, the name of the transition pattern & method. This
`State` object must have a method called '`name`', and a dictionary
`self.patterns` containing a key '`name`'.
- `next_state`: a string, the name of the next `State` object for this
transition. A value of ``None`` (or absent) implies no state change
(i.e., continue with the same state).
Exceptions: `TransitionPatternNotFound`, `TransitionMethodNotFound`.
"""
if next_state is None:
next_state = self.__class__.__name__
try:
pattern = self.patterns[name]
if not hasattr(pattern, 'match'):
pattern = re.compile(pattern)
except KeyError:
raise TransitionPatternNotFound(
'%s.patterns[%r]' % (self.__class__.__name__, name))
try:
method = getattr(self, name)
except AttributeError:
raise TransitionMethodNotFound(
'%s.%s' % (self.__class__.__name__, name))
return (pattern, method, next_state)
def make_transitions(self, name_list):
"""
Return a list of transition names and a transition mapping.
Parameter `name_list`: a list, where each entry is either a transition
name string, or a 1- or 2-tuple (transition name, optional next state
name).
"""
stringtype = type('')
names = []
transitions = {}
for namestate in name_list:
if type(namestate) is stringtype:
transitions[namestate] = self.make_transition(namestate)
names.append(namestate)
else:
transitions[namestate[0]] = self.make_transition(*namestate)
names.append(namestate[0])
return names, transitions
def no_match(self, context, transitions):
"""
Called when there is no match from `StateMachine.check_line()`.
Return the same values returned by transition methods:
- context: unchanged;
- next state name: ``None``;
- empty result list.
Override in subclasses to catch this event.
"""
return context, None, []
def bof(self, context):
"""
Handle beginning-of-file. Return unchanged `context`, empty result.
Override in subclasses.
Parameter `context`: application-defined storage.
"""
return context, []
def eof(self, context):
"""
Handle end-of-file. Return empty result.
Override in subclasses.
Parameter `context`: application-defined storage.
"""
return []
def nop(self, match, context, next_state):
"""
A "do nothing" transition method.
Return unchanged `context` & `next_state`, empty result. Useful for
simple state changes (actionless transitions).
"""
return context, next_state, []
class StateMachineWS(StateMachine):
"""
`StateMachine` subclass specialized for whitespace recognition.
There are three methods provided for extracting indented text blocks:
- `get_indented()`: use when the indent is unknown.
- `get_known_indented()`: use when the indent is known for all lines.
- `get_first_known_indented()`: use when only the first line's indent is
known.
"""
def get_indented(self, until_blank=False, strip_indent=True):
"""
Return a block of indented lines of text, and info.
Extract an indented block where the indent is unknown for all lines.
:Parameters:
- `until_blank`: Stop collecting at the first blank line if true.
- `strip_indent`: Strip common leading indent if true (default).
:Return:
- the indented block (a list of lines of text),
- its indent,
- its first line offset from BOF, and
- whether or not it finished with a blank line.
"""
offset = self.abs_line_offset()
indented, indent, blank_finish = self.input_lines.get_indented(
self.line_offset, until_blank, strip_indent)
if indented:
self.next_line(len(indented) - 1) # advance to last indented line
while indented and not indented[0].strip():
indented.trim_start()
offset += 1
return indented, indent, offset, blank_finish
def get_known_indented(self, indent, until_blank=False, strip_indent=True):
"""
Return an indented block and info.
Extract an indented block where the indent is known for all lines.
Starting with the current line, extract the entire text block with at
least `indent` indentation (which must be whitespace, except for the
first line).
:Parameters:
- `indent`: The number of indent columns/characters.
- `until_blank`: Stop collecting at the first blank line if true.
- `strip_indent`: Strip `indent` characters of indentation if true
(default).
:Return:
- the indented block,
- its first line offset from BOF, and
- whether or not it finished with a blank line.
"""
offset = self.abs_line_offset()
indented, indent, blank_finish = self.input_lines.get_indented(
self.line_offset, until_blank, strip_indent,
block_indent=indent)
self.next_line(len(indented) - 1) # advance to last indented line
while indented and not indented[0].strip():
indented.trim_start()
offset += 1
return indented, offset, blank_finish
def get_first_known_indented(self, indent, until_blank=False,
strip_indent=True, strip_top=True):
"""
Return an indented block and info.
Extract an indented block where the indent is known for the first line
and unknown for all other lines.
:Parameters:
- `indent`: The first line's indent (# of columns/characters).
- `until_blank`: Stop collecting at the first blank line if true
(1).
- `strip_indent`: Strip `indent` characters of indentation if true
(1, default).
- `strip_top`: Strip blank lines from the beginning of the block.
:Return:
- the indented block,
- its indent,
- its first line offset from BOF, and
- whether or not it finished with a blank line.
"""
offset = self.abs_line_offset()
indented, indent, blank_finish = self.input_lines.get_indented(
self.line_offset, until_blank, strip_indent,
first_indent=indent)
self.next_line(len(indented) - 1) # advance to last indented line
if strip_top:
while indented and not indented[0].strip():
indented.trim_start()
offset += 1
return indented, indent, offset, blank_finish
class StateWS(State):
"""
State superclass specialized for whitespace (blank lines & indents).
Use this class with `StateMachineWS`. The transitions 'blank' (for blank
lines) and 'indent' (for indented text blocks) are added automatically,
before any other transitions. The transition method `blank()` handles
blank lines and `indent()` handles nested indented blocks. Indented
blocks trigger a new state machine to be created by `indent()` and run.
The class of the state machine to be created is in `indent_sm`, and the
constructor keyword arguments are in the dictionary `indent_sm_kwargs`.
The methods `known_indent()` and `firstknown_indent()` are provided for
indented blocks where the indent (all lines' and first line's only,
respectively) is known to the transition method, along with the attributes
`known_indent_sm` and `known_indent_sm_kwargs`. Neither transition method
is triggered automatically.
"""
indent_sm = None
"""
The `StateMachine` class handling indented text blocks.
If left as ``None``, `indent_sm` defaults to the value of
`State.nested_sm`. Override it in subclasses to avoid the default.
"""
indent_sm_kwargs = None
"""
Keyword arguments dictionary, passed to the `indent_sm` constructor.
If left as ``None``, `indent_sm_kwargs` defaults to the value of
`State.nested_sm_kwargs`. Override it in subclasses to avoid the default.
"""
known_indent_sm = None
"""
The `StateMachine` class handling known-indented text blocks.
If left as ``None``, `known_indent_sm` defaults to the value of
`indent_sm`. Override it in subclasses to avoid the default.
"""
known_indent_sm_kwargs = None
"""
Keyword arguments dictionary, passed to the `known_indent_sm` constructor.
If left as ``None``, `known_indent_sm_kwargs` defaults to the value of
`indent_sm_kwargs`. Override it in subclasses to avoid the default.
"""
ws_patterns = {'blank': ' *$',
'indent': ' +'}
"""Patterns for default whitespace transitions. May be overridden in
subclasses."""
ws_initial_transitions = ('blank', 'indent')
"""Default initial whitespace transitions, added before those listed in
`State.initial_transitions`. May be overridden in subclasses."""
def __init__(self, state_machine, debug=False):
"""
Initialize a `StateSM` object; extends `State.__init__()`.
Check for indent state machine attributes, set defaults if not set.
"""
State.__init__(self, state_machine, debug)
if self.indent_sm is None:
self.indent_sm = self.nested_sm
if self.indent_sm_kwargs is None:
self.indent_sm_kwargs = self.nested_sm_kwargs
if self.known_indent_sm is None:
self.known_indent_sm = self.indent_sm
if self.known_indent_sm_kwargs is None:
self.known_indent_sm_kwargs = self.indent_sm_kwargs
def add_initial_transitions(self):
"""
Add whitespace-specific transitions before those defined in subclass.
Extends `State.add_initial_transitions()`.
"""
State.add_initial_transitions(self)
if self.patterns is None:
self.patterns = {}
self.patterns.update(self.ws_patterns)
names, transitions = self.make_transitions(
self.ws_initial_transitions)
self.add_transitions(names, transitions)
def blank(self, match, context, next_state):
"""Handle blank lines. Does nothing. Override in subclasses."""
return self.nop(match, context, next_state)
def indent(self, match, context, next_state):
"""
Handle an indented text block. Extend or override in subclasses.
Recursively run the registered state machine for indented blocks
(`self.indent_sm`).
"""
indented, indent, line_offset, blank_finish = \
self.state_machine.get_indented()
sm = self.indent_sm(debug=self.debug, **self.indent_sm_kwargs)
results = sm.run(indented, input_offset=line_offset)
return context, next_state, results
def known_indent(self, match, context, next_state):
"""
Handle a known-indent text block. Extend or override in subclasses.
Recursively run the registered state machine for known-indent indented
blocks (`self.known_indent_sm`). The indent is the length of the
match, ``match.end()``.
"""
indented, line_offset, blank_finish = \
self.state_machine.get_known_indented(match.end())
sm = self.known_indent_sm(debug=self.debug,
**self.known_indent_sm_kwargs)
results = sm.run(indented, input_offset=line_offset)
return context, next_state, results
def first_known_indent(self, match, context, next_state):
"""
Handle an indented text block (first line's indent known).
Extend or override in subclasses.
Recursively run the registered state machine for known-indent indented
blocks (`self.known_indent_sm`). The indent is the length of the
match, ``match.end()``.
"""
indented, line_offset, blank_finish = \
self.state_machine.get_first_known_indented(match.end())
sm = self.known_indent_sm(debug=self.debug,
**self.known_indent_sm_kwargs)
results = sm.run(indented, input_offset=line_offset)
return context, next_state, results
class _SearchOverride:
"""
Mix-in class to override `StateMachine` regular expression behavior.
Changes regular expression matching, from the default `re.match()`
(succeeds only if the pattern matches at the start of `self.line`) to
`re.search()` (succeeds if the pattern matches anywhere in `self.line`).
When subclassing a `StateMachine`, list this class **first** in the
inheritance list of the class definition.
"""
def match(self, pattern):
"""
Return the result of a regular expression search.
Overrides `StateMachine.match()`.
Parameter `pattern`: `re` compiled regular expression.
"""
return pattern.search(self.line)
class SearchStateMachine(_SearchOverride, StateMachine):
"""`StateMachine` which uses `re.search()` instead of `re.match()`."""
pass
class SearchStateMachineWS(_SearchOverride, StateMachineWS):
"""`StateMachineWS` which uses `re.search()` instead of `re.match()`."""
pass
class ViewList:
"""
List with extended functionality: slices of ViewList objects are child
lists, linked to their parents. Changes made to a child list also affect
the parent list. A child list is effectively a "view" (in the SQL sense)
of the parent list. Changes to parent lists, however, do *not* affect
active child lists. If a parent list is changed, any active child lists
should be recreated.
The start and end of the slice can be trimmed using the `trim_start()` and
`trim_end()` methods, without affecting the parent list. The link between
child and parent lists can be broken by calling `disconnect()` on the
child list.
Also, ViewList objects keep track of the source & offset of each item.
This information is accessible via the `source()`, `offset()`, and
`info()` methods.
"""
def __init__(self, initlist=None, source=None, items=None,
parent=None, parent_offset=None):
self.data = []
"""The actual list of data, flattened from various sources."""
self.items = []
"""A list of (source, offset) pairs, same length as `self.data`: the
source of each line and the offset of each line from the beginning of
its source."""
self.parent = parent
"""The parent list."""
self.parent_offset = parent_offset
"""Offset of this list from the beginning of the parent list."""
if isinstance(initlist, ViewList):
self.data = initlist.data[:]
self.items = initlist.items[:]
elif initlist is not None:
self.data = list(initlist)
if items:
self.items = items
else:
self.items = [(source, i) for i in range(len(initlist))]
assert len(self.data) == len(self.items), 'data mismatch'
def __str__(self):
return str(self.data)
def __repr__(self):
return '%s(%s, items=%s)' % (self.__class__.__name__,
self.data, self.items)
def __lt__(self, other): return self.data < self.__cast(other)
def __le__(self, other): return self.data <= self.__cast(other)
def __eq__(self, other): return self.data == self.__cast(other)
def __ne__(self, other): return self.data != self.__cast(other)
def __gt__(self, other): return self.data > self.__cast(other)
def __ge__(self, other): return self.data >= self.__cast(other)
def __cmp__(self, other): return cmp(self.data, self.__cast(other))
def __cast(self, other):
if isinstance(other, ViewList):
return other.data
else:
return other
def __contains__(self, item): return item in self.data
def __len__(self): return len(self.data)
# The __getitem__()/__setitem__() methods check whether the index
# is a slice first, since indexing a native list with a slice object
# just works.
def __getitem__(self, i):
if isinstance(i, slice):
assert i.step in (None, 1), 'cannot handle slice with stride'
return self.__class__(self.data[i.start:i.stop],
items=self.items[i.start:i.stop],
parent=self, parent_offset=i.start or 0)
else:
return self.data[i]
def __setitem__(self, i, item):
if isinstance(i, slice):
assert i.step in (None, 1), 'cannot handle slice with stride'
if not isinstance(item, ViewList):
raise TypeError('assigning non-ViewList to ViewList slice')
self.data[i.start:i.stop] = item.data
self.items[i.start:i.stop] = item.items
assert len(self.data) == len(self.items), 'data mismatch'
if self.parent:
self.parent[(i.start or 0) + self.parent_offset
: (i.stop or len(self)) + self.parent_offset] = item
else:
self.data[i] = item
if self.parent:
self.parent[i + self.parent_offset] = item
def __delitem__(self, i):
try:
del self.data[i]
del self.items[i]
if self.parent:
del self.parent[i + self.parent_offset]
except TypeError:
assert i.step is None, 'cannot handle slice with stride'
del self.data[i.start:i.stop]
del self.items[i.start:i.stop]
if self.parent:
del self.parent[(i.start or 0) + self.parent_offset
: (i.stop or len(self)) + self.parent_offset]
def __add__(self, other):
if isinstance(other, ViewList):
return self.__class__(self.data + other.data,
items=(self.items + other.items))
else:
raise TypeError('adding non-ViewList to a ViewList')
def __radd__(self, other):
if isinstance(other, ViewList):
return self.__class__(other.data + self.data,
items=(other.items + self.items))
else:
raise TypeError('adding ViewList to a non-ViewList')
def __iadd__(self, other):
if isinstance(other, ViewList):
self.data += other.data
else:
raise TypeError('argument to += must be a ViewList')
return self
def __mul__(self, n):
return self.__class__(self.data * n, items=(self.items * n))
__rmul__ = __mul__
def __imul__(self, n):
self.data *= n
self.items *= n
return self
def extend(self, other):
if not isinstance(other, ViewList):
raise TypeError('extending a ViewList with a non-ViewList')
if self.parent:
self.parent.insert(len(self.data) + self.parent_offset, other)
self.data.extend(other.data)
self.items.extend(other.items)
def append(self, item, source=None, offset=0):
if source is None:
self.extend(item)
else:
if self.parent:
self.parent.insert(len(self.data) + self.parent_offset, item,
source, offset)
self.data.append(item)
self.items.append((source, offset))
def insert(self, i, item, source=None, offset=0):
if source is None:
if not isinstance(item, ViewList):
raise TypeError('inserting non-ViewList with no source given')
self.data[i:i] = item.data
self.items[i:i] = item.items
if self.parent:
index = (len(self.data) + i) % len(self.data)
self.parent.insert(index + self.parent_offset, item)
else:
self.data.insert(i, item)
self.items.insert(i, (source, offset))
if self.parent:
index = (len(self.data) + i) % len(self.data)
self.parent.insert(index + self.parent_offset, item,
source, offset)
def pop(self, i=-1):
if self.parent:
index = (len(self.data) + i) % len(self.data)
self.parent.pop(index + self.parent_offset)
self.items.pop(i)
return self.data.pop(i)
def trim_start(self, n=1):
"""
Remove items from the start of the list, without touching the parent.
"""
if n > len(self.data):
raise IndexError("Size of trim too large; can't trim %s items "
"from a list of size %s." % (n, len(self.data)))
elif n < 0:
raise IndexError('Trim size must be >= 0.')
del self.data[:n]
del self.items[:n]
if self.parent:
self.parent_offset += n
def trim_end(self, n=1):
"""
Remove items from the end of the list, without touching the parent.
"""
if n > len(self.data):
raise IndexError("Size of trim too large; can't trim %s items "
"from a list of size %s." % (n, len(self.data)))
elif n < 0:
raise IndexError('Trim size must be >= 0.')
del self.data[-n:]
del self.items[-n:]
def remove(self, item):
index = self.index(item)
del self[index]
def count(self, item): return self.data.count(item)
def index(self, item): return self.data.index(item)
def reverse(self):
self.data.reverse()
self.items.reverse()
self.parent = None
def sort(self, *args):
tmp = list(zip(self.data, self.items))
tmp.sort(*args)
self.data = [entry[0] for entry in tmp]
self.items = [entry[1] for entry in tmp]
self.parent = None
def info(self, i):
"""Return source & offset for index `i`."""
try:
return self.items[i]
except IndexError:
if i == len(self.data): # Just past the end
return self.items[i - 1][0], None
else:
raise
def source(self, i):
"""Return source for index `i`."""
return self.info(i)[0]
def offset(self, i):
"""Return offset for index `i`."""
return self.info(i)[1]
def disconnect(self):
"""Break link between this list and parent list."""
self.parent = None
def xitems(self):
"""Return iterator yielding (source, offset, value) tuples."""
for (value, (source, offset)) in zip(self.data, self.items):
yield (source, offset, value)
def pprint(self):
"""Print the list in `grep` format (`source:offset:value` lines)"""
for line in self.xitems():
print("%s:%d:%s" % line)
class StringList(ViewList):
"""A `ViewList` with string-specific methods."""
def trim_left(self, length, start=0, end=sys.maxsize):
"""
Trim `length` characters off the beginning of each item, in-place,
from index `start` to `end`. No whitespace-checking is done on the
trimmed text. Does not affect slice parent.
"""
self.data[start:end] = [line[length:]
for line in self.data[start:end]]
def get_text_block(self, start, flush_left=False):
"""
Return a contiguous block of text.
If `flush_left` is true, raise `UnexpectedIndentationError` if an
indented line is encountered before the text block ends (with a blank
line).
"""
end = start
last = len(self.data)
while end < last:
line = self.data[end]
if not line.strip():
break
if flush_left and (line[0] == ' '):
source, offset = self.info(end)
raise UnexpectedIndentationError(self[start:end], source,
offset + 1)
end += 1
return self[start:end]
def get_indented(self, start=0, until_blank=False, strip_indent=True,
block_indent=None, first_indent=None):
"""
Extract and return a StringList of indented lines of text.
Collect all lines with indentation, determine the minimum indentation,
remove the minimum indentation from all indented lines (unless
`strip_indent` is false), and return them. All lines up to but not
including the first unindented line will be returned.
:Parameters:
- `start`: The index of the first line to examine.
- `until_blank`: Stop collecting at the first blank line if true.
- `strip_indent`: Strip common leading indent if true (default).
- `block_indent`: The indent of the entire block, if known.
- `first_indent`: The indent of the first line, if known.
:Return:
- a StringList of indented lines with mininum indent removed;
- the amount of the indent;
- a boolean: did the indented block finish with a blank line or EOF?
"""
indent = block_indent # start with None if unknown
end = start
if block_indent is not None and first_indent is None:
first_indent = block_indent
if first_indent is not None:
end += 1
last = len(self.data)
while end < last:
line = self.data[end]
if line and (line[0] != ' '
or (block_indent is not None
and line[:block_indent].strip())):
# Line not indented or insufficiently indented.
# Block finished properly iff the last indented line blank:
blank_finish = ((end > start)
and not self.data[end - 1].strip())
break
stripped = line.lstrip()
if not stripped: # blank line
if until_blank:
blank_finish = 1
break
elif block_indent is None:
line_indent = len(line) - len(stripped)
if indent is None:
indent = line_indent
else:
indent = min(indent, line_indent)
end += 1
else:
blank_finish = 1 # block ends at end of lines
block = self[start:end]
if first_indent is not None and block:
block.data[0] = block.data[0][first_indent:]
if indent and strip_indent:
block.trim_left(indent, start=(first_indent is not None))
return block, indent or 0, blank_finish
def get_2D_block(self, top, left, bottom, right, strip_indent=True):
block = self[top:bottom]
indent = right
for i in range(len(block.data)):
# get slice from line, care for combining characters
ci = utils.column_indices(block.data[i])
try:
left = ci[left]
except IndexError:
left += len(block.data[i]) - len(ci)
try:
right = ci[right]
except IndexError:
right += len(block.data[i]) - len(ci)
block.data[i] = line = block.data[i][left:right].rstrip()
if line:
indent = min(indent, len(line) - len(line.lstrip()))
if strip_indent and 0 < indent < right:
block.data = [line[indent:] for line in block.data]
return block
def pad_double_width(self, pad_char):
"""
Pad all double-width characters in self by appending `pad_char` to each.
For East Asian language support.
"""
if hasattr(unicodedata, 'east_asian_width'):
east_asian_width = unicodedata.east_asian_width
else:
return # new in Python 2.4
for i in range(len(self.data)):
line = self.data[i]
if isinstance(line, str):
new = []
for char in line:
new.append(char)
if east_asian_width(char) in 'WF': # 'W'ide & 'F'ull-width
new.append(pad_char)
self.data[i] = ''.join(new)
def replace(self, old, new):
"""Replace all occurrences of substring `old` with `new`."""
for i in range(len(self.data)):
self.data[i] = self.data[i].replace(old, new)
class StateMachineError(Exception): pass
class UnknownStateError(StateMachineError): pass
class DuplicateStateError(StateMachineError): pass
class UnknownTransitionError(StateMachineError): pass
class DuplicateTransitionError(StateMachineError): pass
class TransitionPatternNotFound(StateMachineError): pass
class TransitionMethodNotFound(StateMachineError): pass
class UnexpectedIndentationError(StateMachineError): pass
class TransitionCorrection(Exception):
"""
Raise from within a transition method to switch to another transition.
Raise with one argument, the new transition name.
"""
class StateCorrection(Exception):
"""
Raise from within a transition method to switch to another state.
Raise with one or two arguments: new state name, and an optional new
transition name.
"""
def string2lines(astring, tab_width=8, convert_whitespace=False,
whitespace=re.compile('[\v\f]')):
"""
Return a list of one-line strings with tabs expanded, no newlines, and
trailing whitespace stripped.
Each tab is expanded with between 1 and `tab_width` spaces, so that the
next character's index becomes a multiple of `tab_width` (8 by default).
Parameters:
- `astring`: a multi-line string.
- `tab_width`: the number of columns between tab stops.
- `convert_whitespace`: convert form feeds and vertical tabs to spaces?
"""
if convert_whitespace:
astring = whitespace.sub(' ', astring)
return [s.expandtabs(tab_width).rstrip() for s in astring.splitlines()]
def _exception_data():
"""
Return exception information:
- the exception's class name;
- the exception object;
- the name of the file containing the offending code;
- the line number of the offending code;
- the function name of the offending code.
"""
type, value, traceback = sys.exc_info()
while traceback.tb_next:
traceback = traceback.tb_next
code = traceback.tb_frame.f_code
return (type.__name__, value, code.co_filename, traceback.tb_lineno,
code.co_name)
| {
"repo_name": "xfournet/intellij-community",
"path": "python/helpers/py3only/docutils/statemachine.py",
"copies": "44",
"size": "57608",
"license": "apache-2.0",
"hash": 8160966043027777000,
"line_mean": 36.4564369311,
"line_max": 95,
"alpha_frac": 0.5864116095,
"autogenerated": false,
"ratio": 4.391188352770791,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0005199755741031342,
"num_lines": 1538
} |
"""
This is the ``docutils.parsers.restructuredtext.states`` module, the core of
the reStructuredText parser. It defines the following:
:Classes:
- `RSTStateMachine`: reStructuredText parser's entry point.
- `NestedStateMachine`: recursive StateMachine.
- `RSTState`: reStructuredText State superclass.
- `Inliner`: For parsing inline markup.
- `Body`: Generic classifier of the first line of a block.
- `SpecializedBody`: Superclass for compound element members.
- `BulletList`: Second and subsequent bullet_list list_items
- `DefinitionList`: Second+ definition_list_items.
- `EnumeratedList`: Second+ enumerated_list list_items.
- `FieldList`: Second+ fields.
- `OptionList`: Second+ option_list_items.
- `RFC2822List`: Second+ RFC2822-style fields.
- `ExtensionOptions`: Parses directive option fields.
- `Explicit`: Second+ explicit markup constructs.
- `SubstitutionDef`: For embedded directives in substitution definitions.
- `Text`: Classifier of second line of a text block.
- `SpecializedText`: Superclass for continuation lines of Text-variants.
- `Definition`: Second line of potential definition_list_item.
- `Line`: Second line of overlined section title or transition marker.
- `Struct`: An auxiliary collection class.
:Exception classes:
- `MarkupError`
- `ParserError`
- `MarkupMismatch`
:Functions:
- `escape2null()`: Return a string, escape-backslashes converted to nulls.
- `unescape()`: Return a string, nulls removed or restored to backslashes.
:Attributes:
- `state_classes`: set of State classes used with `RSTStateMachine`.
Parser Overview
===============
The reStructuredText parser is implemented as a recursive state machine,
examining its input one line at a time. To understand how the parser works,
please first become familiar with the `docutils.statemachine` module. In the
description below, references are made to classes defined in this module;
please see the individual classes for details.
Parsing proceeds as follows:
1. The state machine examines each line of input, checking each of the
transition patterns of the state `Body`, in order, looking for a match.
The implicit transitions (blank lines and indentation) are checked before
any others. The 'text' transition is a catch-all (matches anything).
2. The method associated with the matched transition pattern is called.
A. Some transition methods are self-contained, appending elements to the
document tree (`Body.doctest` parses a doctest block). The parser's
current line index is advanced to the end of the element, and parsing
continues with step 1.
B. Other transition methods trigger the creation of a nested state machine,
whose job is to parse a compound construct ('indent' does a block quote,
'bullet' does a bullet list, 'overline' does a section [first checking
for a valid section header], etc.).
- In the case of lists and explicit markup, a one-off state machine is
created and run to parse contents of the first item.
- A new state machine is created and its initial state is set to the
appropriate specialized state (`BulletList` in the case of the
'bullet' transition; see `SpecializedBody` for more detail). This
state machine is run to parse the compound element (or series of
explicit markup elements), and returns as soon as a non-member element
is encountered. For example, the `BulletList` state machine ends as
soon as it encounters an element which is not a list item of that
bullet list. The optional omission of inter-element blank lines is
enabled by this nested state machine.
- The current line index is advanced to the end of the elements parsed,
and parsing continues with step 1.
C. The result of the 'text' transition depends on the next line of text.
The current state is changed to `Text`, under which the second line is
examined. If the second line is:
- Indented: The element is a definition list item, and parsing proceeds
similarly to step 2.B, using the `DefinitionList` state.
- A line of uniform punctuation characters: The element is a section
header; again, parsing proceeds as in step 2.B, and `Body` is still
used.
- Anything else: The element is a paragraph, which is examined for
inline markup and appended to the parent element. Processing
continues with step 1.
"""
__docformat__ = 'reStructuredText'
import sys
import re
import roman
from types import FunctionType, MethodType
from docutils import nodes, statemachine, utils, urischemes
from docutils import ApplicationError, DataError
from docutils.statemachine import StateMachineWS, StateWS
from docutils.nodes import fully_normalize_name as normalize_name
from docutils.nodes import whitespace_normalize_name
from docutils.utils import escape2null, unescape, column_width
import docutils.parsers.rst
from docutils.parsers.rst import directives, languages, tableparser, roles
from docutils.parsers.rst.languages import en as _fallback_language_module
class MarkupError(DataError): pass
class UnknownInterpretedRoleError(DataError): pass
class InterpretedRoleNotImplementedError(DataError): pass
class ParserError(ApplicationError): pass
class MarkupMismatch(Exception): pass
class Struct:
"""Stores data attributes for dotted-attribute access."""
def __init__(self, **keywordargs):
self.__dict__.update(keywordargs)
class RSTStateMachine(StateMachineWS):
"""
reStructuredText's master StateMachine.
The entry point to reStructuredText parsing is the `run()` method.
"""
def run(self, input_lines, document, input_offset=0, match_titles=1,
inliner=None):
"""
Parse `input_lines` and modify the `document` node in place.
Extend `StateMachineWS.run()`: set up parse-global data and
run the StateMachine.
"""
self.language = languages.get_language(
document.settings.language_code)
self.match_titles = match_titles
if inliner is None:
inliner = Inliner()
inliner.init_customizations(document.settings)
self.memo = Struct(document=document,
reporter=document.reporter,
language=self.language,
title_styles=[],
section_level=0,
section_bubble_up_kludge=0,
inliner=inliner)
self.document = document
self.attach_observer(document.note_source)
self.reporter = self.memo.reporter
self.node = document
results = StateMachineWS.run(self, input_lines, input_offset,
input_source=document['source'])
assert results == [], 'RSTStateMachine.run() results should be empty!'
self.node = self.memo = None # remove unneeded references
class NestedStateMachine(StateMachineWS):
"""
StateMachine run from within other StateMachine runs, to parse nested
document structures.
"""
def run(self, input_lines, input_offset, memo, node, match_titles=1):
"""
Parse `input_lines` and populate a `docutils.nodes.document` instance.
Extend `StateMachineWS.run()`: set up document-wide data.
"""
self.match_titles = match_titles
self.memo = memo
self.document = memo.document
self.attach_observer(self.document.note_source)
self.reporter = memo.reporter
self.language = memo.language
self.node = node
results = StateMachineWS.run(self, input_lines, input_offset)
assert results == [], ('NestedStateMachine.run() results should be '
'empty!')
return results
class RSTState(StateWS):
"""
reStructuredText State superclass.
Contains methods used by all State subclasses.
"""
nested_sm = NestedStateMachine
nested_sm_cache = []
def __init__(self, state_machine, debug=0):
self.nested_sm_kwargs = {'state_classes': state_classes,
'initial_state': 'Body'}
StateWS.__init__(self, state_machine, debug)
def runtime_init(self):
StateWS.runtime_init(self)
memo = self.state_machine.memo
self.memo = memo
self.reporter = memo.reporter
self.inliner = memo.inliner
self.document = memo.document
self.parent = self.state_machine.node
def goto_line(self, abs_line_offset):
"""
Jump to input line `abs_line_offset`, ignoring jumps past the end.
"""
try:
self.state_machine.goto_line(abs_line_offset)
except EOFError:
pass
def no_match(self, context, transitions):
"""
Override `StateWS.no_match` to generate a system message.
This code should never be run.
"""
self.reporter.severe(
'Internal error: no transition pattern match. State: "%s"; '
'transitions: %s; context: %s; current line: %r.'
% (self.__class__.__name__, transitions, context,
self.state_machine.line),
line=self.state_machine.abs_line_number())
return context, None, []
def bof(self, context):
"""Called at beginning of file."""
return [], []
def nested_parse(self, block, input_offset, node, match_titles=0,
state_machine_class=None, state_machine_kwargs=None):
"""
Create a new StateMachine rooted at `node` and run it over the input
`block`.
"""
use_default = 0
if state_machine_class is None:
state_machine_class = self.nested_sm
use_default += 1
if state_machine_kwargs is None:
state_machine_kwargs = self.nested_sm_kwargs
use_default += 1
block_length = len(block)
state_machine = None
if use_default == 2:
try:
state_machine = self.nested_sm_cache.pop()
except IndexError:
pass
if not state_machine:
state_machine = state_machine_class(debug=self.debug,
**state_machine_kwargs)
state_machine.run(block, input_offset, memo=self.memo,
node=node, match_titles=match_titles)
if use_default == 2:
self.nested_sm_cache.append(state_machine)
else:
state_machine.unlink()
new_offset = state_machine.abs_line_offset()
# No `block.parent` implies disconnected -- lines aren't in sync:
if block.parent and (len(block) - block_length) != 0:
# Adjustment for block if modified in nested parse:
self.state_machine.next_line(len(block) - block_length)
return new_offset
def nested_list_parse(self, block, input_offset, node, initial_state,
blank_finish,
blank_finish_state=None,
extra_settings={},
match_titles=0,
state_machine_class=None,
state_machine_kwargs=None):
"""
Create a new StateMachine rooted at `node` and run it over the input
`block`. Also keep track of optional intermediate blank lines and the
required final one.
"""
if state_machine_class is None:
state_machine_class = self.nested_sm
if state_machine_kwargs is None:
state_machine_kwargs = self.nested_sm_kwargs.copy()
state_machine_kwargs['initial_state'] = initial_state
state_machine = state_machine_class(debug=self.debug,
**state_machine_kwargs)
if blank_finish_state is None:
blank_finish_state = initial_state
state_machine.states[blank_finish_state].blank_finish = blank_finish
for key, value in extra_settings.items():
setattr(state_machine.states[initial_state], key, value)
state_machine.run(block, input_offset, memo=self.memo,
node=node, match_titles=match_titles)
blank_finish = state_machine.states[blank_finish_state].blank_finish
state_machine.unlink()
return state_machine.abs_line_offset(), blank_finish
def section(self, title, source, style, lineno, messages):
"""Check for a valid subsection and create one if it checks out."""
if self.check_subsection(source, style, lineno):
self.new_subsection(title, lineno, messages)
def check_subsection(self, source, style, lineno):
"""
Check for a valid subsection header. Return 1 (true) or None (false).
When a new section is reached that isn't a subsection of the current
section, back up the line count (use ``previous_line(-x)``), then
``raise EOFError``. The current StateMachine will finish, then the
calling StateMachine can re-examine the title. This will work its way
back up the calling chain until the correct section level isreached.
@@@ Alternative: Evaluate the title, store the title info & level, and
back up the chain until that level is reached. Store in memo? Or
return in results?
:Exception: `EOFError` when a sibling or supersection encountered.
"""
memo = self.memo
title_styles = memo.title_styles
mylevel = memo.section_level
try: # check for existing title style
level = title_styles.index(style) + 1
except ValueError: # new title style
if len(title_styles) == memo.section_level: # new subsection
title_styles.append(style)
return 1
else: # not at lowest level
self.parent += self.title_inconsistent(source, lineno)
return None
if level <= mylevel: # sibling or supersection
memo.section_level = level # bubble up to parent section
if len(style) == 2:
memo.section_bubble_up_kludge = 1
# back up 2 lines for underline title, 3 for overline title
self.state_machine.previous_line(len(style) + 1)
raise EOFError # let parent section re-evaluate
if level == mylevel + 1: # immediate subsection
return 1
else: # invalid subsection
self.parent += self.title_inconsistent(source, lineno)
return None
def title_inconsistent(self, sourcetext, lineno):
error = self.reporter.severe(
'Title level inconsistent:', nodes.literal_block('', sourcetext),
line=lineno)
return error
def new_subsection(self, title, lineno, messages):
"""Append new subsection to document tree. On return, check level."""
memo = self.memo
mylevel = memo.section_level
memo.section_level += 1
section_node = nodes.section()
self.parent += section_node
textnodes, title_messages = self.inline_text(title, lineno)
titlenode = nodes.title(title, '', *textnodes)
name = normalize_name(titlenode.astext())
section_node['names'].append(name)
section_node += titlenode
section_node += messages
section_node += title_messages
self.document.note_implicit_target(section_node, section_node)
offset = self.state_machine.line_offset + 1
absoffset = self.state_machine.abs_line_offset() + 1
newabsoffset = self.nested_parse(
self.state_machine.input_lines[offset:], input_offset=absoffset,
node=section_node, match_titles=1)
self.goto_line(newabsoffset)
if memo.section_level <= mylevel: # can't handle next section?
raise EOFError # bubble up to supersection
# reset section_level; next pass will detect it properly
memo.section_level = mylevel
def paragraph(self, lines, lineno):
"""
Return a list (paragraph & messages) & a boolean: literal_block next?
"""
data = '\n'.join(lines).rstrip()
if re.search(r'(?<!\\)(\\\\)*::$', data):
if len(data) == 2:
return [], 1
elif data[-3] in ' \n':
text = data[:-3].rstrip()
else:
text = data[:-1]
literalnext = 1
else:
text = data
literalnext = 0
textnodes, messages = self.inline_text(text, lineno)
p = nodes.paragraph(data, '', *textnodes)
p.line = lineno
return [p] + messages, literalnext
def inline_text(self, text, lineno):
"""
Return 2 lists: nodes (text and inline elements), and system_messages.
"""
return self.inliner.parse(text, lineno, self.memo, self.parent)
def unindent_warning(self, node_name):
return self.reporter.warning(
'%s ends without a blank line; unexpected unindent.' % node_name,
line=(self.state_machine.abs_line_number() + 1))
def build_regexp(definition, compile=1):
"""
Build, compile and return a regular expression based on `definition`.
:Parameter: `definition`: a 4-tuple (group name, prefix, suffix, parts),
where "parts" is a list of regular expressions and/or regular
expression definitions to be joined into an or-group.
"""
name, prefix, suffix, parts = definition
part_strings = []
for part in parts:
if type(part) is tuple:
part_strings.append(build_regexp(part, None))
else:
part_strings.append(part)
or_group = '|'.join(part_strings)
regexp = '%(prefix)s(?P<%(name)s>%(or_group)s)%(suffix)s' % locals()
if compile:
return re.compile(regexp, re.UNICODE)
else:
return regexp
class Inliner:
"""
Parse inline markup; call the `parse()` method.
"""
def __init__(self):
self.implicit_dispatch = [(self.patterns.uri, self.standalone_uri),]
"""List of (pattern, bound method) tuples, used by
`self.implicit_inline`."""
def init_customizations(self, settings):
"""Setting-based customizations; run when parsing begins."""
if settings.pep_references:
self.implicit_dispatch.append((self.patterns.pep,
self.pep_reference))
if settings.rfc_references:
self.implicit_dispatch.append((self.patterns.rfc,
self.rfc_reference))
def parse(self, text, lineno, memo, parent):
# Needs to be refactored for nested inline markup.
# Add nested_parse() method?
"""
Return 2 lists: nodes (text and inline elements), and system_messages.
Using `self.patterns.initial`, a pattern which matches start-strings
(emphasis, strong, interpreted, phrase reference, literal,
substitution reference, and inline target) and complete constructs
(simple reference, footnote reference), search for a candidate. When
one is found, check for validity (e.g., not a quoted '*' character).
If valid, search for the corresponding end string if applicable, and
check it for validity. If not found or invalid, generate a warning
and ignore the start-string. Implicit inline markup (e.g. standalone
URIs) is found last.
"""
self.reporter = memo.reporter
self.document = memo.document
self.language = memo.language
self.parent = parent
pattern_search = self.patterns.initial.search
dispatch = self.dispatch
remaining = escape2null(text)
processed = []
unprocessed = []
messages = []
while remaining:
match = pattern_search(remaining)
if match:
groups = match.groupdict()
method = dispatch[groups['start'] or groups['backquote']
or groups['refend'] or groups['fnend']]
before, inlines, remaining, sysmessages = method(self, match,
lineno)
unprocessed.append(before)
messages += sysmessages
if inlines:
processed += self.implicit_inline(''.join(unprocessed),
lineno)
processed += inlines
unprocessed = []
else:
break
remaining = ''.join(unprocessed) + remaining
if remaining:
processed += self.implicit_inline(remaining, lineno)
return processed, messages
openers = u'\'"([{<\u2018\u201c\xab\u00a1\u00bf' # see quoted_start below
closers = u'\'")]}>\u2019\u201d\xbb!?'
unicode_delimiters = u'\u2010\u2011\u2012\u2013\u2014\u00a0'
start_string_prefix = (u'((?<=^)|(?<=[-/: \\n\u2019%s%s]))'
% (re.escape(unicode_delimiters),
re.escape(openers)))
end_string_suffix = (r'((?=$)|(?=[-/:.,; \n\x00%s%s]))'
% (re.escape(unicode_delimiters),
re.escape(closers)))
non_whitespace_before = r'(?<![ \n])'
non_whitespace_escape_before = r'(?<![ \n\x00])'
non_whitespace_after = r'(?![ \n])'
# Alphanumerics with isolated internal [-._+:] chars (i.e. not 2 together):
simplename = r'(?:(?!_)\w)+(?:[-._+:](?:(?!_)\w)+)*'
# Valid URI characters (see RFC 2396 & RFC 2732);
# final \x00 allows backslash escapes in URIs:
uric = r"""[-_.!~*'()[\];/:@&=+$,%a-zA-Z0-9\x00]"""
# Delimiter indicating the end of a URI (not part of the URI):
uri_end_delim = r"""[>]"""
# Last URI character; same as uric but no punctuation:
urilast = r"""[_~*/=+a-zA-Z0-9]"""
# End of a URI (either 'urilast' or 'uric followed by a
# uri_end_delim'):
uri_end = r"""(?:%(urilast)s|%(uric)s(?=%(uri_end_delim)s))""" % locals()
emailc = r"""[-_!~*'{|}/#?^`&=+$%a-zA-Z0-9\x00]"""
email_pattern = r"""
%(emailc)s+(?:\.%(emailc)s+)* # name
(?<!\x00)@ # at
%(emailc)s+(?:\.%(emailc)s*)* # host
%(uri_end)s # final URI char
"""
parts = ('initial_inline', start_string_prefix, '',
[('start', '', non_whitespace_after, # simple start-strings
[r'\*\*', # strong
r'\*(?!\*)', # emphasis but not strong
r'``', # literal
r'_`', # inline internal target
r'\|(?!\|)'] # substitution reference
),
('whole', '', end_string_suffix, # whole constructs
[# reference name & end-string
r'(?P<refname>%s)(?P<refend>__?)' % simplename,
('footnotelabel', r'\[', r'(?P<fnend>\]_)',
[r'[0-9]+', # manually numbered
r'\#(%s)?' % simplename, # auto-numbered (w/ label?)
r'\*', # auto-symbol
r'(?P<citationlabel>%s)' % simplename] # citation reference
)
]
),
('backquote', # interpreted text or phrase reference
'(?P<role>(:%s:)?)' % simplename, # optional role
non_whitespace_after,
['`(?!`)'] # but not literal
)
]
)
patterns = Struct(
initial=build_regexp(parts),
emphasis=re.compile(non_whitespace_escape_before
+ r'(\*)' + end_string_suffix),
strong=re.compile(non_whitespace_escape_before
+ r'(\*\*)' + end_string_suffix),
interpreted_or_phrase_ref=re.compile(
r"""
%(non_whitespace_escape_before)s
(
`
(?P<suffix>
(?P<role>:%(simplename)s:)?
(?P<refend>__?)?
)
)
%(end_string_suffix)s
""" % locals(), re.VERBOSE | re.UNICODE),
embedded_uri=re.compile(
r"""
(
(?:[ \n]+|^) # spaces or beginning of line/string
< # open bracket
%(non_whitespace_after)s
([^<>\x00]+) # anything but angle brackets & nulls
%(non_whitespace_before)s
> # close bracket w/o whitespace before
)
$ # end of string
""" % locals(), re.VERBOSE),
literal=re.compile(non_whitespace_before + '(``)'
+ end_string_suffix),
target=re.compile(non_whitespace_escape_before
+ r'(`)' + end_string_suffix),
substitution_ref=re.compile(non_whitespace_escape_before
+ r'(\|_{0,2})'
+ end_string_suffix),
email=re.compile(email_pattern % locals() + '$', re.VERBOSE),
uri=re.compile(
(r"""
%(start_string_prefix)s
(?P<whole>
(?P<absolute> # absolute URI
(?P<scheme> # scheme (http, ftp, mailto)
[a-zA-Z][a-zA-Z0-9.+-]*
)
:
(
( # either:
(//?)? # hierarchical URI
%(uric)s* # URI characters
%(uri_end)s # final URI char
)
( # optional query
\?%(uric)s*
%(uri_end)s
)?
( # optional fragment
\#%(uric)s*
%(uri_end)s
)?
)
)
| # *OR*
(?P<email> # email address
""" + email_pattern + r"""
)
)
%(end_string_suffix)s
""") % locals(), re.VERBOSE),
pep=re.compile(
r"""
%(start_string_prefix)s
(
(pep-(?P<pepnum1>\d+)(.txt)?) # reference to source file
|
(PEP\s+(?P<pepnum2>\d+)) # reference by name
)
%(end_string_suffix)s""" % locals(), re.VERBOSE),
rfc=re.compile(
r"""
%(start_string_prefix)s
(RFC(-|\s+)?(?P<rfcnum>\d+))
%(end_string_suffix)s""" % locals(), re.VERBOSE))
def quoted_start(self, match):
"""Return 1 if inline markup start-string is 'quoted', 0 if not."""
string = match.string
start = match.start()
end = match.end()
if start == 0: # start-string at beginning of text
return 0
prestart = string[start - 1]
try:
poststart = string[end]
if self.openers.index(prestart) \
== self.closers.index(poststart): # quoted
return 1
except IndexError: # start-string at end of text
return 1
except ValueError: # not quoted
pass
return 0
def inline_obj(self, match, lineno, end_pattern, nodeclass,
restore_backslashes=0):
string = match.string
matchstart = match.start('start')
matchend = match.end('start')
if self.quoted_start(match):
return (string[:matchend], [], string[matchend:], [], '')
endmatch = end_pattern.search(string[matchend:])
if endmatch and endmatch.start(1): # 1 or more chars
text = unescape(endmatch.string[:endmatch.start(1)],
restore_backslashes)
textend = matchend + endmatch.end(1)
rawsource = unescape(string[matchstart:textend], 1)
return (string[:matchstart], [nodeclass(rawsource, text)],
string[textend:], [], endmatch.group(1))
msg = self.reporter.warning(
'Inline %s start-string without end-string.'
% nodeclass.__name__, line=lineno)
text = unescape(string[matchstart:matchend], 1)
rawsource = unescape(string[matchstart:matchend], 1)
prb = self.problematic(text, rawsource, msg)
return string[:matchstart], [prb], string[matchend:], [msg], ''
def problematic(self, text, rawsource, message):
msgid = self.document.set_id(message, self.parent)
problematic = nodes.problematic(rawsource, text, refid=msgid)
prbid = self.document.set_id(problematic)
message.add_backref(prbid)
return problematic
def emphasis(self, match, lineno):
before, inlines, remaining, sysmessages, endstring = self.inline_obj(
match, lineno, self.patterns.emphasis, nodes.emphasis)
return before, inlines, remaining, sysmessages
def strong(self, match, lineno):
before, inlines, remaining, sysmessages, endstring = self.inline_obj(
match, lineno, self.patterns.strong, nodes.strong)
return before, inlines, remaining, sysmessages
def interpreted_or_phrase_ref(self, match, lineno):
end_pattern = self.patterns.interpreted_or_phrase_ref
string = match.string
matchstart = match.start('backquote')
matchend = match.end('backquote')
rolestart = match.start('role')
role = match.group('role')
position = ''
if role:
role = role[1:-1]
position = 'prefix'
elif self.quoted_start(match):
return (string[:matchend], [], string[matchend:], [])
endmatch = end_pattern.search(string[matchend:])
if endmatch and endmatch.start(1): # 1 or more chars
textend = matchend + endmatch.end()
if endmatch.group('role'):
if role:
msg = self.reporter.warning(
'Multiple roles in interpreted text (both '
'prefix and suffix present; only one allowed).',
line=lineno)
text = unescape(string[rolestart:textend], 1)
prb = self.problematic(text, text, msg)
return string[:rolestart], [prb], string[textend:], [msg]
role = endmatch.group('suffix')[1:-1]
position = 'suffix'
escaped = endmatch.string[:endmatch.start(1)]
rawsource = unescape(string[matchstart:textend], 1)
if rawsource[-1:] == '_':
if role:
msg = self.reporter.warning(
'Mismatch: both interpreted text role %s and '
'reference suffix.' % position, line=lineno)
text = unescape(string[rolestart:textend], 1)
prb = self.problematic(text, text, msg)
return string[:rolestart], [prb], string[textend:], [msg]
return self.phrase_ref(string[:matchstart], string[textend:],
rawsource, escaped, unescape(escaped))
else:
rawsource = unescape(string[rolestart:textend], 1)
nodelist, messages = self.interpreted(rawsource, escaped, role,
lineno)
return (string[:rolestart], nodelist,
string[textend:], messages)
msg = self.reporter.warning(
'Inline interpreted text or phrase reference start-string '
'without end-string.', line=lineno)
text = unescape(string[matchstart:matchend], 1)
prb = self.problematic(text, text, msg)
return string[:matchstart], [prb], string[matchend:], [msg]
def phrase_ref(self, before, after, rawsource, escaped, text):
match = self.patterns.embedded_uri.search(escaped)
if match:
text = unescape(escaped[:match.start(0)])
uri_text = match.group(2)
uri = ''.join(uri_text.split())
uri = self.adjust_uri(uri)
if uri:
target = nodes.target(match.group(1), refuri=uri)
else:
raise ApplicationError('problem with URI: %r' % uri_text)
if not text:
text = uri
else:
target = None
refname = normalize_name(text)
reference = nodes.reference(rawsource, text,
name=whitespace_normalize_name(text))
node_list = [reference]
if rawsource[-2:] == '__':
if target:
reference['refuri'] = uri
else:
reference['anonymous'] = 1
else:
if target:
reference['refuri'] = uri
target['names'].append(refname)
self.document.note_explicit_target(target, self.parent)
node_list.append(target)
else:
reference['refname'] = refname
self.document.note_refname(reference)
return before, node_list, after, []
def adjust_uri(self, uri):
match = self.patterns.email.match(uri)
if match:
return 'mailto:' + uri
else:
return uri
def interpreted(self, rawsource, text, role, lineno):
role_fn, messages = roles.role(role, self.language, lineno,
self.reporter)
if role_fn:
nodes, messages2 = role_fn(role, rawsource, text, lineno, self)
return nodes, messages + messages2
else:
msg = self.reporter.error(
'Unknown interpreted text role "%s".' % role,
line=lineno)
return ([self.problematic(rawsource, rawsource, msg)],
messages + [msg])
def literal(self, match, lineno):
before, inlines, remaining, sysmessages, endstring = self.inline_obj(
match, lineno, self.patterns.literal, nodes.literal,
restore_backslashes=1)
return before, inlines, remaining, sysmessages
def inline_internal_target(self, match, lineno):
before, inlines, remaining, sysmessages, endstring = self.inline_obj(
match, lineno, self.patterns.target, nodes.target)
if inlines and isinstance(inlines[0], nodes.target):
assert len(inlines) == 1
target = inlines[0]
name = normalize_name(target.astext())
target['names'].append(name)
self.document.note_explicit_target(target, self.parent)
return before, inlines, remaining, sysmessages
def substitution_reference(self, match, lineno):
before, inlines, remaining, sysmessages, endstring = self.inline_obj(
match, lineno, self.patterns.substitution_ref,
nodes.substitution_reference)
if len(inlines) == 1:
subref_node = inlines[0]
if isinstance(subref_node, nodes.substitution_reference):
subref_text = subref_node.astext()
self.document.note_substitution_ref(subref_node, subref_text)
if endstring[-1:] == '_':
reference_node = nodes.reference(
'|%s%s' % (subref_text, endstring), '')
if endstring[-2:] == '__':
reference_node['anonymous'] = 1
else:
reference_node['refname'] = normalize_name(subref_text)
self.document.note_refname(reference_node)
reference_node += subref_node
inlines = [reference_node]
return before, inlines, remaining, sysmessages
def footnote_reference(self, match, lineno):
"""
Handles `nodes.footnote_reference` and `nodes.citation_reference`
elements.
"""
label = match.group('footnotelabel')
refname = normalize_name(label)
string = match.string
before = string[:match.start('whole')]
remaining = string[match.end('whole'):]
if match.group('citationlabel'):
refnode = nodes.citation_reference('[%s]_' % label,
refname=refname)
refnode += nodes.Text(label)
self.document.note_citation_ref(refnode)
else:
refnode = nodes.footnote_reference('[%s]_' % label)
if refname[0] == '#':
refname = refname[1:]
refnode['auto'] = 1
self.document.note_autofootnote_ref(refnode)
elif refname == '*':
refname = ''
refnode['auto'] = '*'
self.document.note_symbol_footnote_ref(
refnode)
else:
refnode += nodes.Text(label)
if refname:
refnode['refname'] = refname
self.document.note_footnote_ref(refnode)
if utils.get_trim_footnote_ref_space(self.document.settings):
before = before.rstrip()
return (before, [refnode], remaining, [])
def reference(self, match, lineno, anonymous=None):
referencename = match.group('refname')
refname = normalize_name(referencename)
referencenode = nodes.reference(
referencename + match.group('refend'), referencename,
name=whitespace_normalize_name(referencename))
if anonymous:
referencenode['anonymous'] = 1
else:
referencenode['refname'] = refname
self.document.note_refname(referencenode)
string = match.string
matchstart = match.start('whole')
matchend = match.end('whole')
return (string[:matchstart], [referencenode], string[matchend:], [])
def anonymous_reference(self, match, lineno):
return self.reference(match, lineno, anonymous=1)
def standalone_uri(self, match, lineno):
if (not match.group('scheme')
or match.group('scheme').lower() in urischemes.schemes):
if match.group('email'):
addscheme = 'mailto:'
else:
addscheme = ''
text = match.group('whole')
unescaped = unescape(text, 0)
return [nodes.reference(unescape(text, 1), unescaped,
refuri=addscheme + unescaped)]
else: # not a valid scheme
raise MarkupMismatch
def pep_reference(self, match, lineno):
text = match.group(0)
if text.startswith('pep-'):
pepnum = int(match.group('pepnum1'))
elif text.startswith('PEP'):
pepnum = int(match.group('pepnum2'))
else:
raise MarkupMismatch
ref = (self.document.settings.pep_base_url
+ self.document.settings.pep_file_url_template % pepnum)
unescaped = unescape(text, 0)
return [nodes.reference(unescape(text, 1), unescaped, refuri=ref)]
rfc_url = 'rfc%d.html'
def rfc_reference(self, match, lineno):
text = match.group(0)
if text.startswith('RFC'):
rfcnum = int(match.group('rfcnum'))
ref = self.document.settings.rfc_base_url + self.rfc_url % rfcnum
else:
raise MarkupMismatch
unescaped = unescape(text, 0)
return [nodes.reference(unescape(text, 1), unescaped, refuri=ref)]
def implicit_inline(self, text, lineno):
"""
Check each of the patterns in `self.implicit_dispatch` for a match,
and dispatch to the stored method for the pattern. Recursively check
the text before and after the match. Return a list of `nodes.Text`
and inline element nodes.
"""
if not text:
return []
for pattern, method in self.implicit_dispatch:
match = pattern.search(text)
if match:
try:
# Must recurse on strings before *and* after the match;
# there may be multiple patterns.
return (self.implicit_inline(text[:match.start()], lineno)
+ method(match, lineno) +
self.implicit_inline(text[match.end():], lineno))
except MarkupMismatch:
pass
return [nodes.Text(unescape(text), rawsource=unescape(text, 1))]
dispatch = {'*': emphasis,
'**': strong,
'`': interpreted_or_phrase_ref,
'``': literal,
'_`': inline_internal_target,
']_': footnote_reference,
'|': substitution_reference,
'_': reference,
'__': anonymous_reference}
def _loweralpha_to_int(s, _zero=(ord('a')-1)):
return ord(s) - _zero
def _upperalpha_to_int(s, _zero=(ord('A')-1)):
return ord(s) - _zero
def _lowerroman_to_int(s):
return roman.fromRoman(s.upper())
class Body(RSTState):
"""
Generic classifier of the first line of a block.
"""
double_width_pad_char = tableparser.TableParser.double_width_pad_char
"""Padding character for East Asian double-width text."""
enum = Struct()
"""Enumerated list parsing information."""
enum.formatinfo = {
'parens': Struct(prefix='(', suffix=')', start=1, end=-1),
'rparen': Struct(prefix='', suffix=')', start=0, end=-1),
'period': Struct(prefix='', suffix='.', start=0, end=-1)}
enum.formats = enum.formatinfo.keys()
enum.sequences = ['arabic', 'loweralpha', 'upperalpha',
'lowerroman', 'upperroman'] # ORDERED!
enum.sequencepats = {'arabic': '[0-9]+',
'loweralpha': '[a-z]',
'upperalpha': '[A-Z]',
'lowerroman': '[ivxlcdm]+',
'upperroman': '[IVXLCDM]+',}
enum.converters = {'arabic': int,
'loweralpha': _loweralpha_to_int,
'upperalpha': _upperalpha_to_int,
'lowerroman': _lowerroman_to_int,
'upperroman': roman.fromRoman}
enum.sequenceregexps = {}
for sequence in enum.sequences:
enum.sequenceregexps[sequence] = re.compile(
enum.sequencepats[sequence] + '$')
grid_table_top_pat = re.compile(r'\+-[-+]+-\+ *$')
"""Matches the top (& bottom) of a full table)."""
simple_table_top_pat = re.compile('=+( +=+)+ *$')
"""Matches the top of a simple table."""
simple_table_border_pat = re.compile('=+[ =]*$')
"""Matches the bottom & header bottom of a simple table."""
pats = {}
"""Fragments of patterns used by transitions."""
pats['nonalphanum7bit'] = '[!-/:-@[-`{-~]'
pats['alpha'] = '[a-zA-Z]'
pats['alphanum'] = '[a-zA-Z0-9]'
pats['alphanumplus'] = '[a-zA-Z0-9_-]'
pats['enum'] = ('(%(arabic)s|%(loweralpha)s|%(upperalpha)s|%(lowerroman)s'
'|%(upperroman)s|#)' % enum.sequencepats)
pats['optname'] = '%(alphanum)s%(alphanumplus)s*' % pats
# @@@ Loosen up the pattern? Allow Unicode?
pats['optarg'] = '(%(alpha)s%(alphanumplus)s*|<[^<>]+>)' % pats
pats['shortopt'] = r'(-|\+)%(alphanum)s( ?%(optarg)s)?' % pats
pats['longopt'] = r'(--|/)%(optname)s([ =]%(optarg)s)?' % pats
pats['option'] = r'(%(shortopt)s|%(longopt)s)' % pats
for format in enum.formats:
pats[format] = '(?P<%s>%s%s%s)' % (
format, re.escape(enum.formatinfo[format].prefix),
pats['enum'], re.escape(enum.formatinfo[format].suffix))
patterns = {
'bullet': u'[-+*\u2022\u2023\u2043]( +|$)',
'enumerator': r'(%(parens)s|%(rparen)s|%(period)s)( +|$)' % pats,
'field_marker': r':(?![: ])([^:\\]|\\.)*(?<! ):( +|$)',
'option_marker': r'%(option)s(, %(option)s)*( +| ?$)' % pats,
'doctest': r'>>>( +|$)',
'line_block': r'\|( +|$)',
'grid_table_top': grid_table_top_pat,
'simple_table_top': simple_table_top_pat,
'explicit_markup': r'\.\.( +|$)',
'anonymous': r'__( +|$)',
'line': r'(%(nonalphanum7bit)s)\1* *$' % pats,
'text': r''}
initial_transitions = (
'bullet',
'enumerator',
'field_marker',
'option_marker',
'doctest',
'line_block',
'grid_table_top',
'simple_table_top',
'explicit_markup',
'anonymous',
'line',
'text')
def indent(self, match, context, next_state):
"""Block quote."""
indented, indent, line_offset, blank_finish = \
self.state_machine.get_indented()
elements = self.block_quote(indented, line_offset)
self.parent += elements
if not blank_finish:
self.parent += self.unindent_warning('Block quote')
return context, next_state, []
def block_quote(self, indented, line_offset):
elements = []
while indented:
(blockquote_lines,
attribution_lines,
attribution_offset,
indented,
new_line_offset) = self.split_attribution(indented, line_offset)
blockquote = nodes.block_quote()
self.nested_parse(blockquote_lines, line_offset, blockquote)
elements.append(blockquote)
if attribution_lines:
attribution, messages = self.parse_attribution(
attribution_lines, attribution_offset)
blockquote += attribution
elements += messages
line_offset = new_line_offset
while indented and not indented[0]:
indented = indented[1:]
line_offset += 1
return elements
# U+2014 is an em-dash:
attribution_pattern = re.compile(u'(---?(?!-)|\u2014) *(?=[^ \\n])')
def split_attribution(self, indented, line_offset):
"""
Check for a block quote attribution and split it off:
* First line after a blank line must begin with a dash ("--", "---",
em-dash; matches `self.attribution_pattern`).
* Every line after that must have consistent indentation.
* Attributions must be preceded by block quote content.
Return a tuple of: (block quote content lines, content offset,
attribution lines, attribution offset, remaining indented lines).
"""
blank = None
nonblank_seen = False
for i in range(len(indented)):
line = indented[i].rstrip()
if line:
if nonblank_seen and blank == i - 1: # last line blank
match = self.attribution_pattern.match(line)
if match:
attribution_end, indent = self.check_attribution(
indented, i)
if attribution_end:
a_lines = indented[i:attribution_end]
a_lines.trim_left(match.end(), end=1)
a_lines.trim_left(indent, start=1)
return (indented[:i], a_lines,
i, indented[attribution_end:],
line_offset + attribution_end)
nonblank_seen = True
else:
blank = i
else:
return (indented, None, None, None, None)
def check_attribution(self, indented, attribution_start):
"""
Check attribution shape.
Return the index past the end of the attribution, and the indent.
"""
indent = None
i = attribution_start + 1
for i in range(attribution_start + 1, len(indented)):
line = indented[i].rstrip()
if not line:
break
if indent is None:
indent = len(line) - len(line.lstrip())
elif len(line) - len(line.lstrip()) != indent:
return None, None # bad shape; not an attribution
else:
# return index of line after last attribution line:
i += 1
return i, (indent or 0)
def parse_attribution(self, indented, line_offset):
text = '\n'.join(indented).rstrip()
lineno = self.state_machine.abs_line_number() + line_offset
textnodes, messages = self.inline_text(text, lineno)
node = nodes.attribution(text, '', *textnodes)
node.line = lineno
return node, messages
def bullet(self, match, context, next_state):
"""Bullet list item."""
bulletlist = nodes.bullet_list()
self.parent += bulletlist
bulletlist['bullet'] = match.string[0]
i, blank_finish = self.list_item(match.end())
bulletlist += i
offset = self.state_machine.line_offset + 1 # next line
new_line_offset, blank_finish = self.nested_list_parse(
self.state_machine.input_lines[offset:],
input_offset=self.state_machine.abs_line_offset() + 1,
node=bulletlist, initial_state='BulletList',
blank_finish=blank_finish)
self.goto_line(new_line_offset)
if not blank_finish:
self.parent += self.unindent_warning('Bullet list')
return [], next_state, []
def list_item(self, indent):
if self.state_machine.line[indent:]:
indented, line_offset, blank_finish = (
self.state_machine.get_known_indented(indent))
else:
indented, indent, line_offset, blank_finish = (
self.state_machine.get_first_known_indented(indent))
listitem = nodes.list_item('\n'.join(indented))
if indented:
self.nested_parse(indented, input_offset=line_offset,
node=listitem)
return listitem, blank_finish
def enumerator(self, match, context, next_state):
"""Enumerated List Item"""
format, sequence, text, ordinal = self.parse_enumerator(match)
if not self.is_enumerated_list_item(ordinal, sequence, format):
raise statemachine.TransitionCorrection('text')
enumlist = nodes.enumerated_list()
self.parent += enumlist
if sequence == '#':
enumlist['enumtype'] = 'arabic'
else:
enumlist['enumtype'] = sequence
enumlist['prefix'] = self.enum.formatinfo[format].prefix
enumlist['suffix'] = self.enum.formatinfo[format].suffix
if ordinal != 1:
enumlist['start'] = ordinal
msg = self.reporter.info(
'Enumerated list start value not ordinal-1: "%s" (ordinal %s)'
% (text, ordinal), line=self.state_machine.abs_line_number())
self.parent += msg
listitem, blank_finish = self.list_item(match.end())
enumlist += listitem
offset = self.state_machine.line_offset + 1 # next line
newline_offset, blank_finish = self.nested_list_parse(
self.state_machine.input_lines[offset:],
input_offset=self.state_machine.abs_line_offset() + 1,
node=enumlist, initial_state='EnumeratedList',
blank_finish=blank_finish,
extra_settings={'lastordinal': ordinal,
'format': format,
'auto': sequence == '#'})
self.goto_line(newline_offset)
if not blank_finish:
self.parent += self.unindent_warning('Enumerated list')
return [], next_state, []
def parse_enumerator(self, match, expected_sequence=None):
"""
Analyze an enumerator and return the results.
:Return:
- the enumerator format ('period', 'parens', or 'rparen'),
- the sequence used ('arabic', 'loweralpha', 'upperroman', etc.),
- the text of the enumerator, stripped of formatting, and
- the ordinal value of the enumerator ('a' -> 1, 'ii' -> 2, etc.;
``None`` is returned for invalid enumerator text).
The enumerator format has already been determined by the regular
expression match. If `expected_sequence` is given, that sequence is
tried first. If not, we check for Roman numeral 1. This way,
single-character Roman numerals (which are also alphabetical) can be
matched. If no sequence has been matched, all sequences are checked in
order.
"""
groupdict = match.groupdict()
sequence = ''
for format in self.enum.formats:
if groupdict[format]: # was this the format matched?
break # yes; keep `format`
else: # shouldn't happen
raise ParserError('enumerator format not matched')
text = groupdict[format][self.enum.formatinfo[format].start
:self.enum.formatinfo[format].end]
if text == '#':
sequence = '#'
elif expected_sequence:
try:
if self.enum.sequenceregexps[expected_sequence].match(text):
sequence = expected_sequence
except KeyError: # shouldn't happen
raise ParserError('unknown enumerator sequence: %s'
% sequence)
elif text == 'i':
sequence = 'lowerroman'
elif text == 'I':
sequence = 'upperroman'
if not sequence:
for sequence in self.enum.sequences:
if self.enum.sequenceregexps[sequence].match(text):
break
else: # shouldn't happen
raise ParserError('enumerator sequence not matched')
if sequence == '#':
ordinal = 1
else:
try:
ordinal = self.enum.converters[sequence](text)
except roman.InvalidRomanNumeralError:
ordinal = None
return format, sequence, text, ordinal
def is_enumerated_list_item(self, ordinal, sequence, format):
"""
Check validity based on the ordinal value and the second line.
Return true if the ordinal is valid and the second line is blank,
indented, or starts with the next enumerator or an auto-enumerator.
"""
if ordinal is None:
return None
try:
next_line = self.state_machine.next_line()
except EOFError: # end of input lines
self.state_machine.previous_line()
return 1
else:
self.state_machine.previous_line()
if not next_line[:1].strip(): # blank or indented
return 1
result = self.make_enumerator(ordinal + 1, sequence, format)
if result:
next_enumerator, auto_enumerator = result
try:
if ( next_line.startswith(next_enumerator) or
next_line.startswith(auto_enumerator) ):
return 1
except TypeError:
pass
return None
def make_enumerator(self, ordinal, sequence, format):
"""
Construct and return the next enumerated list item marker, and an
auto-enumerator ("#" instead of the regular enumerator).
Return ``None`` for invalid (out of range) ordinals.
""" #"
if sequence == '#':
enumerator = '#'
elif sequence == 'arabic':
enumerator = str(ordinal)
else:
if sequence.endswith('alpha'):
if ordinal > 26:
return None
enumerator = chr(ordinal + ord('a') - 1)
elif sequence.endswith('roman'):
try:
enumerator = roman.toRoman(ordinal)
except roman.RomanError:
return None
else: # shouldn't happen
raise ParserError('unknown enumerator sequence: "%s"'
% sequence)
if sequence.startswith('lower'):
enumerator = enumerator.lower()
elif sequence.startswith('upper'):
enumerator = enumerator.upper()
else: # shouldn't happen
raise ParserError('unknown enumerator sequence: "%s"'
% sequence)
formatinfo = self.enum.formatinfo[format]
next_enumerator = (formatinfo.prefix + enumerator + formatinfo.suffix
+ ' ')
auto_enumerator = formatinfo.prefix + '#' + formatinfo.suffix + ' '
return next_enumerator, auto_enumerator
def field_marker(self, match, context, next_state):
"""Field list item."""
field_list = nodes.field_list()
self.parent += field_list
field, blank_finish = self.field(match)
field_list += field
offset = self.state_machine.line_offset + 1 # next line
newline_offset, blank_finish = self.nested_list_parse(
self.state_machine.input_lines[offset:],
input_offset=self.state_machine.abs_line_offset() + 1,
node=field_list, initial_state='FieldList',
blank_finish=blank_finish)
self.goto_line(newline_offset)
if not blank_finish:
self.parent += self.unindent_warning('Field list')
return [], next_state, []
def field(self, match):
name = self.parse_field_marker(match)
lineno = self.state_machine.abs_line_number()
indented, indent, line_offset, blank_finish = \
self.state_machine.get_first_known_indented(match.end())
field_node = nodes.field()
field_node.line = lineno
name_nodes, name_messages = self.inline_text(name, lineno)
field_node += nodes.field_name(name, '', *name_nodes)
field_body = nodes.field_body('\n'.join(indented), *name_messages)
field_node += field_body
if indented:
self.parse_field_body(indented, line_offset, field_body)
return field_node, blank_finish
def parse_field_marker(self, match):
"""Extract & return field name from a field marker match."""
field = match.group()[1:] # strip off leading ':'
field = field[:field.rfind(':')] # strip off trailing ':' etc.
return field
def parse_field_body(self, indented, offset, node):
self.nested_parse(indented, input_offset=offset, node=node)
def option_marker(self, match, context, next_state):
"""Option list item."""
optionlist = nodes.option_list()
try:
listitem, blank_finish = self.option_list_item(match)
except MarkupError, (message, lineno):
# This shouldn't happen; pattern won't match.
msg = self.reporter.error(
'Invalid option list marker: %s' % message, line=lineno)
self.parent += msg
indented, indent, line_offset, blank_finish = \
self.state_machine.get_first_known_indented(match.end())
elements = self.block_quote(indented, line_offset)
self.parent += elements
if not blank_finish:
self.parent += self.unindent_warning('Option list')
return [], next_state, []
self.parent += optionlist
optionlist += listitem
offset = self.state_machine.line_offset + 1 # next line
newline_offset, blank_finish = self.nested_list_parse(
self.state_machine.input_lines[offset:],
input_offset=self.state_machine.abs_line_offset() + 1,
node=optionlist, initial_state='OptionList',
blank_finish=blank_finish)
self.goto_line(newline_offset)
if not blank_finish:
self.parent += self.unindent_warning('Option list')
return [], next_state, []
def option_list_item(self, match):
offset = self.state_machine.abs_line_offset()
options = self.parse_option_marker(match)
indented, indent, line_offset, blank_finish = \
self.state_machine.get_first_known_indented(match.end())
if not indented: # not an option list item
self.goto_line(offset)
raise statemachine.TransitionCorrection('text')
option_group = nodes.option_group('', *options)
description = nodes.description('\n'.join(indented))
option_list_item = nodes.option_list_item('', option_group,
description)
if indented:
self.nested_parse(indented, input_offset=line_offset,
node=description)
return option_list_item, blank_finish
def parse_option_marker(self, match):
"""
Return a list of `node.option` and `node.option_argument` objects,
parsed from an option marker match.
:Exception: `MarkupError` for invalid option markers.
"""
optlist = []
optionstrings = match.group().rstrip().split(', ')
for optionstring in optionstrings:
tokens = optionstring.split()
delimiter = ' '
firstopt = tokens[0].split('=')
if len(firstopt) > 1:
# "--opt=value" form
tokens[:1] = firstopt
delimiter = '='
elif (len(tokens[0]) > 2
and ((tokens[0].startswith('-')
and not tokens[0].startswith('--'))
or tokens[0].startswith('+'))):
# "-ovalue" form
tokens[:1] = [tokens[0][:2], tokens[0][2:]]
delimiter = ''
if len(tokens) > 1 and (tokens[1].startswith('<')
and tokens[-1].endswith('>')):
# "-o <value1 value2>" form; join all values into one token
tokens[1:] = [' '.join(tokens[1:])]
if 0 < len(tokens) <= 2:
option = nodes.option(optionstring)
option += nodes.option_string(tokens[0], tokens[0])
if len(tokens) > 1:
option += nodes.option_argument(tokens[1], tokens[1],
delimiter=delimiter)
optlist.append(option)
else:
raise MarkupError(
'wrong number of option tokens (=%s), should be 1 or 2: '
'"%s"' % (len(tokens), optionstring),
self.state_machine.abs_line_number() + 1)
return optlist
def doctest(self, match, context, next_state):
data = '\n'.join(self.state_machine.get_text_block())
self.parent += nodes.doctest_block(data, data)
return [], next_state, []
def line_block(self, match, context, next_state):
"""First line of a line block."""
block = nodes.line_block()
self.parent += block
lineno = self.state_machine.abs_line_number()
line, messages, blank_finish = self.line_block_line(match, lineno)
block += line
self.parent += messages
if not blank_finish:
offset = self.state_machine.line_offset + 1 # next line
new_line_offset, blank_finish = self.nested_list_parse(
self.state_machine.input_lines[offset:],
input_offset=self.state_machine.abs_line_offset() + 1,
node=block, initial_state='LineBlock',
blank_finish=0)
self.goto_line(new_line_offset)
if not blank_finish:
self.parent += self.reporter.warning(
'Line block ends without a blank line.',
line=(self.state_machine.abs_line_number() + 1))
if len(block):
if block[0].indent is None:
block[0].indent = 0
self.nest_line_block_lines(block)
return [], next_state, []
def line_block_line(self, match, lineno):
"""Return one line element of a line_block."""
indented, indent, line_offset, blank_finish = \
self.state_machine.get_first_known_indented(match.end(),
until_blank=1)
text = u'\n'.join(indented)
text_nodes, messages = self.inline_text(text, lineno)
line = nodes.line(text, '', *text_nodes)
if match.string.rstrip() != '|': # not empty
line.indent = len(match.group(1)) - 1
return line, messages, blank_finish
def nest_line_block_lines(self, block):
for index in range(1, len(block)):
if block[index].indent is None:
block[index].indent = block[index - 1].indent
self.nest_line_block_segment(block)
def nest_line_block_segment(self, block):
indents = [item.indent for item in block]
least = min(indents)
new_items = []
new_block = nodes.line_block()
for item in block:
if item.indent > least:
new_block.append(item)
else:
if len(new_block):
self.nest_line_block_segment(new_block)
new_items.append(new_block)
new_block = nodes.line_block()
new_items.append(item)
if len(new_block):
self.nest_line_block_segment(new_block)
new_items.append(new_block)
block[:] = new_items
def grid_table_top(self, match, context, next_state):
"""Top border of a full table."""
return self.table_top(match, context, next_state,
self.isolate_grid_table,
tableparser.GridTableParser)
def simple_table_top(self, match, context, next_state):
"""Top border of a simple table."""
return self.table_top(match, context, next_state,
self.isolate_simple_table,
tableparser.SimpleTableParser)
def table_top(self, match, context, next_state,
isolate_function, parser_class):
"""Top border of a generic table."""
nodelist, blank_finish = self.table(isolate_function, parser_class)
self.parent += nodelist
if not blank_finish:
msg = self.reporter.warning(
'Blank line required after table.',
line=self.state_machine.abs_line_number() + 1)
self.parent += msg
return [], next_state, []
def table(self, isolate_function, parser_class):
"""Parse a table."""
block, messages, blank_finish = isolate_function()
if block:
try:
parser = parser_class()
tabledata = parser.parse(block)
tableline = (self.state_machine.abs_line_number() - len(block)
+ 1)
table = self.build_table(tabledata, tableline)
nodelist = [table] + messages
except tableparser.TableMarkupError, detail:
nodelist = self.malformed_table(
block, ' '.join(detail.args)) + messages
else:
nodelist = messages
return nodelist, blank_finish
def isolate_grid_table(self):
messages = []
blank_finish = 1
try:
block = self.state_machine.get_text_block(flush_left=1)
except statemachine.UnexpectedIndentationError, instance:
block, source, lineno = instance.args
messages.append(self.reporter.error('Unexpected indentation.',
source=source, line=lineno))
blank_finish = 0
block.disconnect()
# for East Asian chars:
block.pad_double_width(self.double_width_pad_char)
width = len(block[0].strip())
for i in range(len(block)):
block[i] = block[i].strip()
if block[i][0] not in '+|': # check left edge
blank_finish = 0
self.state_machine.previous_line(len(block) - i)
del block[i:]
break
if not self.grid_table_top_pat.match(block[-1]): # find bottom
blank_finish = 0
# from second-last to third line of table:
for i in range(len(block) - 2, 1, -1):
if self.grid_table_top_pat.match(block[i]):
self.state_machine.previous_line(len(block) - i + 1)
del block[i+1:]
break
else:
messages.extend(self.malformed_table(block))
return [], messages, blank_finish
for i in range(len(block)): # check right edge
if len(block[i]) != width or block[i][-1] not in '+|':
messages.extend(self.malformed_table(block))
return [], messages, blank_finish
return block, messages, blank_finish
def isolate_simple_table(self):
start = self.state_machine.line_offset
lines = self.state_machine.input_lines
limit = len(lines) - 1
toplen = len(lines[start].strip())
pattern_match = self.simple_table_border_pat.match
found = 0
found_at = None
i = start + 1
while i <= limit:
line = lines[i]
match = pattern_match(line)
if match:
if len(line.strip()) != toplen:
self.state_machine.next_line(i - start)
messages = self.malformed_table(
lines[start:i+1], 'Bottom/header table border does '
'not match top border.')
return [], messages, i == limit or not lines[i+1].strip()
found += 1
found_at = i
if found == 2 or i == limit or not lines[i+1].strip():
end = i
break
i += 1
else: # reached end of input_lines
if found:
extra = ' or no blank line after table bottom'
self.state_machine.next_line(found_at - start)
block = lines[start:found_at+1]
else:
extra = ''
self.state_machine.next_line(i - start - 1)
block = lines[start:]
messages = self.malformed_table(
block, 'No bottom table border found%s.' % extra)
return [], messages, not extra
self.state_machine.next_line(end - start)
block = lines[start:end+1]
# for East Asian chars:
block.pad_double_width(self.double_width_pad_char)
return block, [], end == limit or not lines[end+1].strip()
def malformed_table(self, block, detail=''):
block.replace(self.double_width_pad_char, '')
data = '\n'.join(block)
message = 'Malformed table.'
lineno = self.state_machine.abs_line_number() - len(block) + 1
if detail:
message += '\n' + detail
error = self.reporter.error(message, nodes.literal_block(data, data),
line=lineno)
return [error]
def build_table(self, tabledata, tableline, stub_columns=0):
colwidths, headrows, bodyrows = tabledata
table = nodes.table()
tgroup = nodes.tgroup(cols=len(colwidths))
table += tgroup
for colwidth in colwidths:
colspec = nodes.colspec(colwidth=colwidth)
if stub_columns:
colspec.attributes['stub'] = 1
stub_columns -= 1
tgroup += colspec
if headrows:
thead = nodes.thead()
tgroup += thead
for row in headrows:
thead += self.build_table_row(row, tableline)
tbody = nodes.tbody()
tgroup += tbody
for row in bodyrows:
tbody += self.build_table_row(row, tableline)
return table
def build_table_row(self, rowdata, tableline):
row = nodes.row()
for cell in rowdata:
if cell is None:
continue
morerows, morecols, offset, cellblock = cell
attributes = {}
if morerows:
attributes['morerows'] = morerows
if morecols:
attributes['morecols'] = morecols
entry = nodes.entry(**attributes)
row += entry
if ''.join(cellblock):
self.nested_parse(cellblock, input_offset=tableline+offset,
node=entry)
return row
explicit = Struct()
"""Patterns and constants used for explicit markup recognition."""
explicit.patterns = Struct(
target=re.compile(r"""
(
_ # anonymous target
| # *OR*
(?!_) # no underscore at the beginning
(?P<quote>`?) # optional open quote
(?![ `]) # first char. not space or
# backquote
(?P<name> # reference name
.+?
)
%(non_whitespace_escape_before)s
(?P=quote) # close quote if open quote used
)
(?<!(?<!\x00):) # no unescaped colon at end
%(non_whitespace_escape_before)s
[ ]? # optional space
: # end of reference name
([ ]+|$) # followed by whitespace
""" % vars(Inliner), re.VERBOSE),
reference=re.compile(r"""
(
(?P<simple>%(simplename)s)_
| # *OR*
` # open backquote
(?![ ]) # not space
(?P<phrase>.+?) # hyperlink phrase
%(non_whitespace_escape_before)s
`_ # close backquote,
# reference mark
)
$ # end of string
""" % vars(Inliner), re.VERBOSE | re.UNICODE),
substitution=re.compile(r"""
(
(?![ ]) # first char. not space
(?P<name>.+?) # substitution text
%(non_whitespace_escape_before)s
\| # close delimiter
)
([ ]+|$) # followed by whitespace
""" % vars(Inliner), re.VERBOSE),)
def footnote(self, match):
lineno = self.state_machine.abs_line_number()
indented, indent, offset, blank_finish = \
self.state_machine.get_first_known_indented(match.end())
label = match.group(1)
name = normalize_name(label)
footnote = nodes.footnote('\n'.join(indented))
footnote.line = lineno
if name[0] == '#': # auto-numbered
name = name[1:] # autonumber label
footnote['auto'] = 1
if name:
footnote['names'].append(name)
self.document.note_autofootnote(footnote)
elif name == '*': # auto-symbol
name = ''
footnote['auto'] = '*'
self.document.note_symbol_footnote(footnote)
else: # manually numbered
footnote += nodes.label('', label)
footnote['names'].append(name)
self.document.note_footnote(footnote)
if name:
self.document.note_explicit_target(footnote, footnote)
else:
self.document.set_id(footnote, footnote)
if indented:
self.nested_parse(indented, input_offset=offset, node=footnote)
return [footnote], blank_finish
def citation(self, match):
lineno = self.state_machine.abs_line_number()
indented, indent, offset, blank_finish = \
self.state_machine.get_first_known_indented(match.end())
label = match.group(1)
name = normalize_name(label)
citation = nodes.citation('\n'.join(indented))
citation.line = lineno
citation += nodes.label('', label)
citation['names'].append(name)
self.document.note_citation(citation)
self.document.note_explicit_target(citation, citation)
if indented:
self.nested_parse(indented, input_offset=offset, node=citation)
return [citation], blank_finish
def hyperlink_target(self, match):
pattern = self.explicit.patterns.target
lineno = self.state_machine.abs_line_number()
block, indent, offset, blank_finish = \
self.state_machine.get_first_known_indented(
match.end(), until_blank=1, strip_indent=0)
blocktext = match.string[:match.end()] + '\n'.join(block)
block = [escape2null(line) for line in block]
escaped = block[0]
blockindex = 0
while 1:
targetmatch = pattern.match(escaped)
if targetmatch:
break
blockindex += 1
try:
escaped += block[blockindex]
except IndexError:
raise MarkupError('malformed hyperlink target.', lineno)
del block[:blockindex]
block[0] = (block[0] + ' ')[targetmatch.end()-len(escaped)-1:].strip()
target = self.make_target(block, blocktext, lineno,
targetmatch.group('name'))
return [target], blank_finish
def make_target(self, block, block_text, lineno, target_name):
target_type, data = self.parse_target(block, block_text, lineno)
if target_type == 'refname':
target = nodes.target(block_text, '', refname=normalize_name(data))
target.indirect_reference_name = data
self.add_target(target_name, '', target, lineno)
self.document.note_indirect_target(target)
return target
elif target_type == 'refuri':
target = nodes.target(block_text, '')
self.add_target(target_name, data, target, lineno)
return target
else:
return data
def parse_target(self, block, block_text, lineno):
"""
Determine the type of reference of a target.
:Return: A 2-tuple, one of:
- 'refname' and the indirect reference name
- 'refuri' and the URI
- 'malformed' and a system_message node
"""
if block and block[-1].strip()[-1:] == '_': # possible indirect target
reference = ' '.join([line.strip() for line in block])
refname = self.is_reference(reference)
if refname:
return 'refname', refname
reference = ''.join([''.join(line.split()) for line in block])
return 'refuri', unescape(reference)
def is_reference(self, reference):
match = self.explicit.patterns.reference.match(
whitespace_normalize_name(reference))
if not match:
return None
return unescape(match.group('simple') or match.group('phrase'))
def add_target(self, targetname, refuri, target, lineno):
target.line = lineno
if targetname:
name = normalize_name(unescape(targetname))
target['names'].append(name)
if refuri:
uri = self.inliner.adjust_uri(refuri)
if uri:
target['refuri'] = uri
else:
raise ApplicationError('problem with URI: %r' % refuri)
self.document.note_explicit_target(target, self.parent)
else: # anonymous target
if refuri:
target['refuri'] = refuri
target['anonymous'] = 1
self.document.note_anonymous_target(target)
def substitution_def(self, match):
pattern = self.explicit.patterns.substitution
lineno = self.state_machine.abs_line_number()
block, indent, offset, blank_finish = \
self.state_machine.get_first_known_indented(match.end(),
strip_indent=0)
blocktext = (match.string[:match.end()] + '\n'.join(block))
block.disconnect()
escaped = escape2null(block[0].rstrip())
blockindex = 0
while 1:
subdefmatch = pattern.match(escaped)
if subdefmatch:
break
blockindex += 1
try:
escaped = escaped + ' ' + escape2null(block[blockindex].strip())
except IndexError:
raise MarkupError('malformed substitution definition.',
lineno)
del block[:blockindex] # strip out the substitution marker
block[0] = (block[0].strip() + ' ')[subdefmatch.end()-len(escaped)-1:-1]
if not block[0]:
del block[0]
offset += 1
while block and not block[-1].strip():
block.pop()
subname = subdefmatch.group('name')
substitution_node = nodes.substitution_definition(blocktext)
substitution_node.line = lineno
if not block:
msg = self.reporter.warning(
'Substitution definition "%s" missing contents.' % subname,
nodes.literal_block(blocktext, blocktext), line=lineno)
return [msg], blank_finish
block[0] = block[0].strip()
substitution_node['names'].append(
nodes.whitespace_normalize_name(subname))
new_abs_offset, blank_finish = self.nested_list_parse(
block, input_offset=offset, node=substitution_node,
initial_state='SubstitutionDef', blank_finish=blank_finish)
i = 0
for node in substitution_node[:]:
if not (isinstance(node, nodes.Inline) or
isinstance(node, nodes.Text)):
self.parent += substitution_node[i]
del substitution_node[i]
else:
i += 1
for node in substitution_node.traverse(nodes.Element):
if self.disallowed_inside_substitution_definitions(node):
pformat = nodes.literal_block('', node.pformat().rstrip())
msg = self.reporter.error(
'Substitution definition contains illegal element:',
pformat, nodes.literal_block(blocktext, blocktext),
line=lineno)
return [msg], blank_finish
if len(substitution_node) == 0:
msg = self.reporter.warning(
'Substitution definition "%s" empty or invalid.'
% subname,
nodes.literal_block(blocktext, blocktext), line=lineno)
return [msg], blank_finish
self.document.note_substitution_def(
substitution_node, subname, self.parent)
return [substitution_node], blank_finish
def disallowed_inside_substitution_definitions(self, node):
if (node['ids'] or
isinstance(node, nodes.reference) and node.get('anonymous') or
isinstance(node, nodes.footnote_reference) and node.get('auto')):
return 1
else:
return 0
def directive(self, match, **option_presets):
"""Returns a 2-tuple: list of nodes, and a "blank finish" boolean."""
type_name = match.group(1)
directive_class, messages = directives.directive(
type_name, self.memo.language, self.document)
self.parent += messages
if directive_class:
return self.run_directive(
directive_class, match, type_name, option_presets)
else:
return self.unknown_directive(type_name)
def run_directive(self, directive, match, type_name, option_presets):
"""
Parse a directive then run its directive function.
Parameters:
- `directive`: The class implementing the directive. Must be
a subclass of `rst.Directive`.
- `match`: A regular expression match object which matched the first
line of the directive.
- `type_name`: The directive name, as used in the source text.
- `option_presets`: A dictionary of preset options, defaults for the
directive options. Currently, only an "alt" option is passed by
substitution definitions (value: the substitution name), which may
be used by an embedded image directive.
Returns a 2-tuple: list of nodes, and a "blank finish" boolean.
"""
if isinstance(directive, (FunctionType, MethodType)):
from docutils.parsers.rst import convert_directive_function
directive = convert_directive_function(directive)
lineno = self.state_machine.abs_line_number()
initial_line_offset = self.state_machine.line_offset
indented, indent, line_offset, blank_finish \
= self.state_machine.get_first_known_indented(match.end(),
strip_top=0)
block_text = '\n'.join(self.state_machine.input_lines[
initial_line_offset : self.state_machine.line_offset + 1])
try:
arguments, options, content, content_offset = (
self.parse_directive_block(indented, line_offset,
directive, option_presets))
except MarkupError, detail:
error = self.reporter.error(
'Error in "%s" directive:\n%s.' % (type_name,
' '.join(detail.args)),
nodes.literal_block(block_text, block_text), line=lineno)
return [error], blank_finish
directive_instance = directive(
type_name, arguments, options, content, lineno,
content_offset, block_text, self, self.state_machine)
try:
result = directive_instance.run()
except docutils.parsers.rst.DirectiveError, directive_error:
msg_node = self.reporter.system_message(directive_error.level,
directive_error.message)
msg_node += nodes.literal_block(block_text, block_text)
msg_node['line'] = lineno
result = [msg_node]
assert isinstance(result, list), \
'Directive "%s" must return a list of nodes.' % type_name
for i in range(len(result)):
assert isinstance(result[i], nodes.Node), \
('Directive "%s" returned non-Node object (index %s): %r'
% (type_name, i, result[i]))
return (result,
blank_finish or self.state_machine.is_next_line_blank())
def parse_directive_block(self, indented, line_offset, directive,
option_presets):
option_spec = directive.option_spec
has_content = directive.has_content
if indented and not indented[0].strip():
indented.trim_start()
line_offset += 1
while indented and not indented[-1].strip():
indented.trim_end()
if indented and (directive.required_arguments
or directive.optional_arguments
or option_spec):
for i in range(len(indented)):
if not indented[i].strip():
break
else:
i += 1
arg_block = indented[:i]
content = indented[i+1:]
content_offset = line_offset + i + 1
else:
content = indented
content_offset = line_offset
arg_block = []
while content and not content[0].strip():
content.trim_start()
content_offset += 1
if option_spec:
options, arg_block = self.parse_directive_options(
option_presets, option_spec, arg_block)
if arg_block and not (directive.required_arguments
or directive.optional_arguments):
raise MarkupError('no arguments permitted; blank line '
'required before content block')
else:
options = {}
if directive.required_arguments or directive.optional_arguments:
arguments = self.parse_directive_arguments(
directive, arg_block)
else:
arguments = []
if content and not has_content:
raise MarkupError('no content permitted')
return (arguments, options, content, content_offset)
def parse_directive_options(self, option_presets, option_spec, arg_block):
options = option_presets.copy()
for i in range(len(arg_block)):
if arg_block[i][:1] == ':':
opt_block = arg_block[i:]
arg_block = arg_block[:i]
break
else:
opt_block = []
if opt_block:
success, data = self.parse_extension_options(option_spec,
opt_block)
if success: # data is a dict of options
options.update(data)
else: # data is an error string
raise MarkupError(data)
return options, arg_block
def parse_directive_arguments(self, directive, arg_block):
required = directive.required_arguments
optional = directive.optional_arguments
arg_text = '\n'.join(arg_block)
arguments = arg_text.split()
if len(arguments) < required:
raise MarkupError('%s argument(s) required, %s supplied'
% (required, len(arguments)))
elif len(arguments) > required + optional:
if directive.final_argument_whitespace:
arguments = arg_text.split(None, required + optional - 1)
else:
raise MarkupError(
'maximum %s argument(s) allowed, %s supplied'
% (required + optional, len(arguments)))
return arguments
def parse_extension_options(self, option_spec, datalines):
"""
Parse `datalines` for a field list containing extension options
matching `option_spec`.
:Parameters:
- `option_spec`: a mapping of option name to conversion
function, which should raise an exception on bad input.
- `datalines`: a list of input strings.
:Return:
- Success value, 1 or 0.
- An option dictionary on success, an error string on failure.
"""
node = nodes.field_list()
newline_offset, blank_finish = self.nested_list_parse(
datalines, 0, node, initial_state='ExtensionOptions',
blank_finish=1)
if newline_offset != len(datalines): # incomplete parse of block
return 0, 'invalid option block'
try:
options = utils.extract_extension_options(node, option_spec)
except KeyError, detail:
return 0, ('unknown option: "%s"' % detail.args[0])
except (ValueError, TypeError), detail:
return 0, ('invalid option value: %s' % ' '.join(detail.args))
except utils.ExtensionOptionError, detail:
return 0, ('invalid option data: %s' % ' '.join(detail.args))
if blank_finish:
return 1, options
else:
return 0, 'option data incompletely parsed'
def unknown_directive(self, type_name):
lineno = self.state_machine.abs_line_number()
indented, indent, offset, blank_finish = \
self.state_machine.get_first_known_indented(0, strip_indent=0)
text = '\n'.join(indented)
error = self.reporter.error(
'Unknown directive type "%s".' % type_name,
nodes.literal_block(text, text), line=lineno)
return [error], blank_finish
def comment(self, match):
if not match.string[match.end():].strip() \
and self.state_machine.is_next_line_blank(): # an empty comment?
return [nodes.comment()], 1 # "A tiny but practical wart."
indented, indent, offset, blank_finish = \
self.state_machine.get_first_known_indented(match.end())
while indented and not indented[-1].strip():
indented.trim_end()
text = '\n'.join(indented)
return [nodes.comment(text, text)], blank_finish
explicit.constructs = [
(footnote,
re.compile(r"""
\.\.[ ]+ # explicit markup start
\[
( # footnote label:
[0-9]+ # manually numbered footnote
| # *OR*
\# # anonymous auto-numbered footnote
| # *OR*
\#%s # auto-number ed?) footnote label
| # *OR*
\* # auto-symbol footnote
)
\]
([ ]+|$) # whitespace or end of line
""" % Inliner.simplename, re.VERBOSE | re.UNICODE)),
(citation,
re.compile(r"""
\.\.[ ]+ # explicit markup start
\[(%s)\] # citation label
([ ]+|$) # whitespace or end of line
""" % Inliner.simplename, re.VERBOSE | re.UNICODE)),
(hyperlink_target,
re.compile(r"""
\.\.[ ]+ # explicit markup start
_ # target indicator
(?![ ]|$) # first char. not space or EOL
""", re.VERBOSE)),
(substitution_def,
re.compile(r"""
\.\.[ ]+ # explicit markup start
\| # substitution indicator
(?![ ]|$) # first char. not space or EOL
""", re.VERBOSE)),
(directive,
re.compile(r"""
\.\.[ ]+ # explicit markup start
(%s) # directive name
[ ]? # optional space
:: # directive delimiter
([ ]+|$) # whitespace or end of line
""" % Inliner.simplename, re.VERBOSE | re.UNICODE))]
def explicit_markup(self, match, context, next_state):
"""Footnotes, hyperlink targets, directives, comments."""
nodelist, blank_finish = self.explicit_construct(match)
self.parent += nodelist
self.explicit_list(blank_finish)
return [], next_state, []
def explicit_construct(self, match):
"""Determine which explicit construct this is, parse & return it."""
errors = []
for method, pattern in self.explicit.constructs:
expmatch = pattern.match(match.string)
if expmatch:
try:
return method(self, expmatch)
except MarkupError, error: # never reached?
message, lineno = error.args
errors.append(self.reporter.warning(message, line=lineno))
break
nodelist, blank_finish = self.comment(match)
return nodelist + errors, blank_finish
def explicit_list(self, blank_finish):
"""
Create a nested state machine for a series of explicit markup
constructs (including anonymous hyperlink targets).
"""
offset = self.state_machine.line_offset + 1 # next line
newline_offset, blank_finish = self.nested_list_parse(
self.state_machine.input_lines[offset:],
input_offset=self.state_machine.abs_line_offset() + 1,
node=self.parent, initial_state='Explicit',
blank_finish=blank_finish,
match_titles=self.state_machine.match_titles)
self.goto_line(newline_offset)
if not blank_finish:
self.parent += self.unindent_warning('Explicit markup')
def anonymous(self, match, context, next_state):
"""Anonymous hyperlink targets."""
nodelist, blank_finish = self.anonymous_target(match)
self.parent += nodelist
self.explicit_list(blank_finish)
return [], next_state, []
def anonymous_target(self, match):
lineno = self.state_machine.abs_line_number()
block, indent, offset, blank_finish \
= self.state_machine.get_first_known_indented(match.end(),
until_blank=1)
blocktext = match.string[:match.end()] + '\n'.join(block)
block = [escape2null(line) for line in block]
target = self.make_target(block, blocktext, lineno, '')
return [target], blank_finish
def line(self, match, context, next_state):
"""Section title overline or transition marker."""
if self.state_machine.match_titles:
return [match.string], 'Line', []
elif match.string.strip() == '::':
raise statemachine.TransitionCorrection('text')
elif len(match.string.strip()) < 4:
msg = self.reporter.info(
'Unexpected possible title overline or transition.\n'
"Treating it as ordinary text because it's so short.",
line=self.state_machine.abs_line_number())
self.parent += msg
raise statemachine.TransitionCorrection('text')
else:
blocktext = self.state_machine.line
msg = self.reporter.severe(
'Unexpected section title or transition.',
nodes.literal_block(blocktext, blocktext),
line=self.state_machine.abs_line_number())
self.parent += msg
return [], next_state, []
def text(self, match, context, next_state):
"""Titles, definition lists, paragraphs."""
return [match.string], 'Text', []
class RFC2822Body(Body):
"""
RFC2822 headers are only valid as the first constructs in documents. As
soon as anything else appears, the `Body` state should take over.
"""
patterns = Body.patterns.copy() # can't modify the original
patterns['rfc2822'] = r'[!-9;-~]+:( +|$)'
initial_transitions = [(name, 'Body')
for name in Body.initial_transitions]
initial_transitions.insert(-1, ('rfc2822', 'Body')) # just before 'text'
def rfc2822(self, match, context, next_state):
"""RFC2822-style field list item."""
fieldlist = nodes.field_list(classes=['rfc2822'])
self.parent += fieldlist
field, blank_finish = self.rfc2822_field(match)
fieldlist += field
offset = self.state_machine.line_offset + 1 # next line
newline_offset, blank_finish = self.nested_list_parse(
self.state_machine.input_lines[offset:],
input_offset=self.state_machine.abs_line_offset() + 1,
node=fieldlist, initial_state='RFC2822List',
blank_finish=blank_finish)
self.goto_line(newline_offset)
if not blank_finish:
self.parent += self.unindent_warning(
'RFC2822-style field list')
return [], next_state, []
def rfc2822_field(self, match):
name = match.string[:match.string.find(':')]
indented, indent, line_offset, blank_finish = \
self.state_machine.get_first_known_indented(match.end(),
until_blank=1)
fieldnode = nodes.field()
fieldnode += nodes.field_name(name, name)
fieldbody = nodes.field_body('\n'.join(indented))
fieldnode += fieldbody
if indented:
self.nested_parse(indented, input_offset=line_offset,
node=fieldbody)
return fieldnode, blank_finish
class SpecializedBody(Body):
"""
Superclass for second and subsequent compound element members. Compound
elements are lists and list-like constructs.
All transition methods are disabled (redefined as `invalid_input`).
Override individual methods in subclasses to re-enable.
For example, once an initial bullet list item, say, is recognized, the
`BulletList` subclass takes over, with a "bullet_list" node as its
container. Upon encountering the initial bullet list item, `Body.bullet`
calls its ``self.nested_list_parse`` (`RSTState.nested_list_parse`), which
starts up a nested parsing session with `BulletList` as the initial state.
Only the ``bullet`` transition method is enabled in `BulletList`; as long
as only bullet list items are encountered, they are parsed and inserted
into the container. The first construct which is *not* a bullet list item
triggers the `invalid_input` method, which ends the nested parse and
closes the container. `BulletList` needs to recognize input that is
invalid in the context of a bullet list, which means everything *other
than* bullet list items, so it inherits the transition list created in
`Body`.
"""
def invalid_input(self, match=None, context=None, next_state=None):
"""Not a compound element member. Abort this state machine."""
self.state_machine.previous_line() # back up so parent SM can reassess
raise EOFError
indent = invalid_input
bullet = invalid_input
enumerator = invalid_input
field_marker = invalid_input
option_marker = invalid_input
doctest = invalid_input
line_block = invalid_input
grid_table_top = invalid_input
simple_table_top = invalid_input
explicit_markup = invalid_input
anonymous = invalid_input
line = invalid_input
text = invalid_input
class BulletList(SpecializedBody):
"""Second and subsequent bullet_list list_items."""
def bullet(self, match, context, next_state):
"""Bullet list item."""
if match.string[0] != self.parent['bullet']:
# different bullet: new list
self.invalid_input()
listitem, blank_finish = self.list_item(match.end())
self.parent += listitem
self.blank_finish = blank_finish
return [], next_state, []
class DefinitionList(SpecializedBody):
"""Second and subsequent definition_list_items."""
def text(self, match, context, next_state):
"""Definition lists."""
return [match.string], 'Definition', []
class EnumeratedList(SpecializedBody):
"""Second and subsequent enumerated_list list_items."""
def enumerator(self, match, context, next_state):
"""Enumerated list item."""
format, sequence, text, ordinal = self.parse_enumerator(
match, self.parent['enumtype'])
if ( format != self.format
or (sequence != '#' and (sequence != self.parent['enumtype']
or self.auto
or ordinal != (self.lastordinal + 1)))
or not self.is_enumerated_list_item(ordinal, sequence, format)):
# different enumeration: new list
self.invalid_input()
if sequence == '#':
self.auto = 1
listitem, blank_finish = self.list_item(match.end())
self.parent += listitem
self.blank_finish = blank_finish
self.lastordinal = ordinal
return [], next_state, []
class FieldList(SpecializedBody):
"""Second and subsequent field_list fields."""
def field_marker(self, match, context, next_state):
"""Field list field."""
field, blank_finish = self.field(match)
self.parent += field
self.blank_finish = blank_finish
return [], next_state, []
class OptionList(SpecializedBody):
"""Second and subsequent option_list option_list_items."""
def option_marker(self, match, context, next_state):
"""Option list item."""
try:
option_list_item, blank_finish = self.option_list_item(match)
except MarkupError, (message, lineno):
self.invalid_input()
self.parent += option_list_item
self.blank_finish = blank_finish
return [], next_state, []
class RFC2822List(SpecializedBody, RFC2822Body):
"""Second and subsequent RFC2822-style field_list fields."""
patterns = RFC2822Body.patterns
initial_transitions = RFC2822Body.initial_transitions
def rfc2822(self, match, context, next_state):
"""RFC2822-style field list item."""
field, blank_finish = self.rfc2822_field(match)
self.parent += field
self.blank_finish = blank_finish
return [], 'RFC2822List', []
blank = SpecializedBody.invalid_input
class ExtensionOptions(FieldList):
"""
Parse field_list fields for extension options.
No nested parsing is done (including inline markup parsing).
"""
def parse_field_body(self, indented, offset, node):
"""Override `Body.parse_field_body` for simpler parsing."""
lines = []
for line in list(indented) + ['']:
if line.strip():
lines.append(line)
elif lines:
text = '\n'.join(lines)
node += nodes.paragraph(text, text)
lines = []
class LineBlock(SpecializedBody):
"""Second and subsequent lines of a line_block."""
blank = SpecializedBody.invalid_input
def line_block(self, match, context, next_state):
"""New line of line block."""
lineno = self.state_machine.abs_line_number()
line, messages, blank_finish = self.line_block_line(match, lineno)
self.parent += line
self.parent.parent += messages
self.blank_finish = blank_finish
return [], next_state, []
class Explicit(SpecializedBody):
"""Second and subsequent explicit markup construct."""
def explicit_markup(self, match, context, next_state):
"""Footnotes, hyperlink targets, directives, comments."""
nodelist, blank_finish = self.explicit_construct(match)
self.parent += nodelist
self.blank_finish = blank_finish
return [], next_state, []
def anonymous(self, match, context, next_state):
"""Anonymous hyperlink targets."""
nodelist, blank_finish = self.anonymous_target(match)
self.parent += nodelist
self.blank_finish = blank_finish
return [], next_state, []
blank = SpecializedBody.invalid_input
class SubstitutionDef(Body):
"""
Parser for the contents of a substitution_definition element.
"""
patterns = {
'embedded_directive': re.compile(r'(%s)::( +|$)'
% Inliner.simplename, re.UNICODE),
'text': r''}
initial_transitions = ['embedded_directive', 'text']
def embedded_directive(self, match, context, next_state):
nodelist, blank_finish = self.directive(match,
alt=self.parent['names'][0])
self.parent += nodelist
if not self.state_machine.at_eof():
self.blank_finish = blank_finish
raise EOFError
def text(self, match, context, next_state):
if not self.state_machine.at_eof():
self.blank_finish = self.state_machine.is_next_line_blank()
raise EOFError
class Text(RSTState):
"""
Classifier of second line of a text block.
Could be a paragraph, a definition list item, or a title.
"""
patterns = {'underline': Body.patterns['line'],
'text': r''}
initial_transitions = [('underline', 'Body'), ('text', 'Body')]
def blank(self, match, context, next_state):
"""End of paragraph."""
paragraph, literalnext = self.paragraph(
context, self.state_machine.abs_line_number() - 1)
self.parent += paragraph
if literalnext:
self.parent += self.literal_block()
return [], 'Body', []
def eof(self, context):
if context:
self.blank(None, context, None)
return []
def indent(self, match, context, next_state):
"""Definition list item."""
definitionlist = nodes.definition_list()
definitionlistitem, blank_finish = self.definition_list_item(context)
definitionlist += definitionlistitem
self.parent += definitionlist
offset = self.state_machine.line_offset + 1 # next line
newline_offset, blank_finish = self.nested_list_parse(
self.state_machine.input_lines[offset:],
input_offset=self.state_machine.abs_line_offset() + 1,
node=definitionlist, initial_state='DefinitionList',
blank_finish=blank_finish, blank_finish_state='Definition')
self.goto_line(newline_offset)
if not blank_finish:
self.parent += self.unindent_warning('Definition list')
return [], 'Body', []
def underline(self, match, context, next_state):
"""Section title."""
lineno = self.state_machine.abs_line_number()
title = context[0].rstrip()
underline = match.string.rstrip()
source = title + '\n' + underline
messages = []
if column_width(title) > len(underline):
if len(underline) < 4:
if self.state_machine.match_titles:
msg = self.reporter.info(
'Possible title underline, too short for the title.\n'
"Treating it as ordinary text because it's so short.",
line=lineno)
self.parent += msg
raise statemachine.TransitionCorrection('text')
else:
blocktext = context[0] + '\n' + self.state_machine.line
msg = self.reporter.warning(
'Title underline too short.',
nodes.literal_block(blocktext, blocktext), line=lineno)
messages.append(msg)
if not self.state_machine.match_titles:
blocktext = context[0] + '\n' + self.state_machine.line
msg = self.reporter.severe(
'Unexpected section title.',
nodes.literal_block(blocktext, blocktext), line=lineno)
self.parent += messages
self.parent += msg
return [], next_state, []
style = underline[0]
context[:] = []
self.section(title, source, style, lineno - 1, messages)
return [], next_state, []
def text(self, match, context, next_state):
"""Paragraph."""
startline = self.state_machine.abs_line_number() - 1
msg = None
try:
block = self.state_machine.get_text_block(flush_left=1)
except statemachine.UnexpectedIndentationError, instance:
block, source, lineno = instance.args
msg = self.reporter.error('Unexpected indentation.',
source=source, line=lineno)
lines = context + list(block)
paragraph, literalnext = self.paragraph(lines, startline)
self.parent += paragraph
self.parent += msg
if literalnext:
try:
self.state_machine.next_line()
except EOFError:
pass
self.parent += self.literal_block()
return [], next_state, []
def literal_block(self):
"""Return a list of nodes."""
indented, indent, offset, blank_finish = \
self.state_machine.get_indented()
while indented and not indented[-1].strip():
indented.trim_end()
if not indented:
return self.quoted_literal_block()
data = '\n'.join(indented)
literal_block = nodes.literal_block(data, data)
literal_block.line = offset + 1
nodelist = [literal_block]
if not blank_finish:
nodelist.append(self.unindent_warning('Literal block'))
return nodelist
def quoted_literal_block(self):
abs_line_offset = self.state_machine.abs_line_offset()
offset = self.state_machine.line_offset
parent_node = nodes.Element()
new_abs_offset = self.nested_parse(
self.state_machine.input_lines[offset:],
input_offset=abs_line_offset, node=parent_node, match_titles=0,
state_machine_kwargs={'state_classes': (QuotedLiteralBlock,),
'initial_state': 'QuotedLiteralBlock'})
self.goto_line(new_abs_offset)
return parent_node.children
def definition_list_item(self, termline):
indented, indent, line_offset, blank_finish = \
self.state_machine.get_indented()
definitionlistitem = nodes.definition_list_item(
'\n'.join(termline + list(indented)))
lineno = self.state_machine.abs_line_number() - 1
definitionlistitem.line = lineno
termlist, messages = self.term(termline, lineno)
definitionlistitem += termlist
definition = nodes.definition('', *messages)
definitionlistitem += definition
if termline[0][-2:] == '::':
definition += self.reporter.info(
'Blank line missing before literal block (after the "::")? '
'Interpreted as a definition list item.', line=line_offset+1)
self.nested_parse(indented, input_offset=line_offset, node=definition)
return definitionlistitem, blank_finish
classifier_delimiter = re.compile(' +: +')
def term(self, lines, lineno):
"""Return a definition_list's term and optional classifiers."""
assert len(lines) == 1
text_nodes, messages = self.inline_text(lines[0], lineno)
term_node = nodes.term()
node_list = [term_node]
for i in range(len(text_nodes)):
node = text_nodes[i]
if isinstance(node, nodes.Text):
parts = self.classifier_delimiter.split(node.rawsource)
if len(parts) == 1:
node_list[-1] += node
else:
node_list[-1] += nodes.Text(parts[0].rstrip())
for part in parts[1:]:
classifier_node = nodes.classifier('', part)
node_list.append(classifier_node)
else:
node_list[-1] += node
return node_list, messages
class SpecializedText(Text):
"""
Superclass for second and subsequent lines of Text-variants.
All transition methods are disabled. Override individual methods in
subclasses to re-enable.
"""
def eof(self, context):
"""Incomplete construct."""
return []
def invalid_input(self, match=None, context=None, next_state=None):
"""Not a compound element member. Abort this state machine."""
raise EOFError
blank = invalid_input
indent = invalid_input
underline = invalid_input
text = invalid_input
class Definition(SpecializedText):
"""Second line of potential definition_list_item."""
def eof(self, context):
"""Not a definition."""
self.state_machine.previous_line(2) # so parent SM can reassess
return []
def indent(self, match, context, next_state):
"""Definition list item."""
definitionlistitem, blank_finish = self.definition_list_item(context)
self.parent += definitionlistitem
self.blank_finish = blank_finish
return [], 'DefinitionList', []
class Line(SpecializedText):
"""
Second line of over- & underlined section title or transition marker.
"""
eofcheck = 1 # @@@ ???
"""Set to 0 while parsing sections, so that we don't catch the EOF."""
def eof(self, context):
"""Transition marker at end of section or document."""
marker = context[0].strip()
if self.memo.section_bubble_up_kludge:
self.memo.section_bubble_up_kludge = 0
elif len(marker) < 4:
self.state_correction(context)
if self.eofcheck: # ignore EOFError with sections
lineno = self.state_machine.abs_line_number() - 1
transition = nodes.transition(rawsource=context[0])
transition.line = lineno
self.parent += transition
self.eofcheck = 1
return []
def blank(self, match, context, next_state):
"""Transition marker."""
lineno = self.state_machine.abs_line_number() - 1
marker = context[0].strip()
if len(marker) < 4:
self.state_correction(context)
transition = nodes.transition(rawsource=marker)
transition.line = lineno
self.parent += transition
return [], 'Body', []
def text(self, match, context, next_state):
"""Potential over- & underlined title."""
lineno = self.state_machine.abs_line_number() - 1
overline = context[0]
title = match.string
underline = ''
try:
underline = self.state_machine.next_line()
except EOFError:
blocktext = overline + '\n' + title
if len(overline.rstrip()) < 4:
self.short_overline(context, blocktext, lineno, 2)
else:
msg = self.reporter.severe(
'Incomplete section title.',
nodes.literal_block(blocktext, blocktext), line=lineno)
self.parent += msg
return [], 'Body', []
source = '%s\n%s\n%s' % (overline, title, underline)
overline = overline.rstrip()
underline = underline.rstrip()
if not self.transitions['underline'][0].match(underline):
blocktext = overline + '\n' + title + '\n' + underline
if len(overline.rstrip()) < 4:
self.short_overline(context, blocktext, lineno, 2)
else:
msg = self.reporter.severe(
'Missing matching underline for section title overline.',
nodes.literal_block(source, source), line=lineno)
self.parent += msg
return [], 'Body', []
elif overline != underline:
blocktext = overline + '\n' + title + '\n' + underline
if len(overline.rstrip()) < 4:
self.short_overline(context, blocktext, lineno, 2)
else:
msg = self.reporter.severe(
'Title overline & underline mismatch.',
nodes.literal_block(source, source), line=lineno)
self.parent += msg
return [], 'Body', []
title = title.rstrip()
messages = []
if column_width(title) > len(overline):
blocktext = overline + '\n' + title + '\n' + underline
if len(overline.rstrip()) < 4:
self.short_overline(context, blocktext, lineno, 2)
else:
msg = self.reporter.warning(
'Title overline too short.',
nodes.literal_block(source, source), line=lineno)
messages.append(msg)
style = (overline[0], underline[0])
self.eofcheck = 0 # @@@ not sure this is correct
self.section(title.lstrip(), source, style, lineno + 1, messages)
self.eofcheck = 1
return [], 'Body', []
indent = text # indented title
def underline(self, match, context, next_state):
overline = context[0]
blocktext = overline + '\n' + self.state_machine.line
lineno = self.state_machine.abs_line_number() - 1
if len(overline.rstrip()) < 4:
self.short_overline(context, blocktext, lineno, 1)
msg = self.reporter.error(
'Invalid section title or transition marker.',
nodes.literal_block(blocktext, blocktext), line=lineno)
self.parent += msg
return [], 'Body', []
def short_overline(self, context, blocktext, lineno, lines=1):
msg = self.reporter.info(
'Possible incomplete section title.\nTreating the overline as '
"ordinary text because it's so short.", line=lineno)
self.parent += msg
self.state_correction(context, lines)
def state_correction(self, context, lines=1):
self.state_machine.previous_line(lines)
context[:] = []
raise statemachine.StateCorrection('Body', 'text')
class QuotedLiteralBlock(RSTState):
"""
Nested parse handler for quoted (unindented) literal blocks.
Special-purpose. Not for inclusion in `state_classes`.
"""
patterns = {'initial_quoted': r'(%(nonalphanum7bit)s)' % Body.pats,
'text': r''}
initial_transitions = ('initial_quoted', 'text')
def __init__(self, state_machine, debug=0):
RSTState.__init__(self, state_machine, debug)
self.messages = []
self.initial_lineno = None
def blank(self, match, context, next_state):
if context:
raise EOFError
else:
return context, next_state, []
def eof(self, context):
if context:
text = '\n'.join(context)
literal_block = nodes.literal_block(text, text)
literal_block.line = self.initial_lineno
self.parent += literal_block
else:
self.parent += self.reporter.warning(
'Literal block expected; none found.',
line=self.state_machine.abs_line_number())
self.state_machine.previous_line()
self.parent += self.messages
return []
def indent(self, match, context, next_state):
assert context, ('QuotedLiteralBlock.indent: context should not '
'be empty!')
self.messages.append(
self.reporter.error('Unexpected indentation.',
line=self.state_machine.abs_line_number()))
self.state_machine.previous_line()
raise EOFError
def initial_quoted(self, match, context, next_state):
"""Match arbitrary quote character on the first line only."""
self.remove_transition('initial_quoted')
quote = match.string[0]
pattern = re.compile(re.escape(quote))
# New transition matches consistent quotes only:
self.add_transition('quoted',
(pattern, self.quoted, self.__class__.__name__))
self.initial_lineno = self.state_machine.abs_line_number()
return [match.string], next_state, []
def quoted(self, match, context, next_state):
"""Match consistent quotes on subsequent lines."""
context.append(match.string)
return context, next_state, []
def text(self, match, context, next_state):
if context:
self.messages.append(
self.reporter.error('Inconsistent literal block quoting.',
line=self.state_machine.abs_line_number()))
self.state_machine.previous_line()
raise EOFError
state_classes = (Body, BulletList, DefinitionList, EnumeratedList, FieldList,
OptionList, LineBlock, ExtensionOptions, Explicit, Text,
Definition, Line, SubstitutionDef, RFC2822Body, RFC2822List)
"""Standard set of State classes used to start `RSTStateMachine`."""
| {
"repo_name": "spreeker/democracygame",
"path": "external_apps/docutils-snapshot/build/lib/docutils/parsers/rst/states.py",
"copies": "2",
"size": "126631",
"license": "bsd-3-clause",
"hash": -813603456845239900,
"line_mean": 41.0980718085,
"line_max": 80,
"alpha_frac": 0.5464459729,
"autogenerated": false,
"ratio": 4.399200972728852,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00037778217333748874,
"num_lines": 3008
} |
"""
This is the ``docutils.parsers.restructuredtext.states`` module, the core of
the reStructuredText parser. It defines the following:
:Classes:
- `RSTStateMachine`: reStructuredText parser's entry point.
- `NestedStateMachine`: recursive StateMachine.
- `RSTState`: reStructuredText State superclass.
- `Inliner`: For parsing inline markup.
- `Body`: Generic classifier of the first line of a block.
- `SpecializedBody`: Superclass for compound element members.
- `BulletList`: Second and subsequent bullet_list list_items
- `DefinitionList`: Second+ definition_list_items.
- `EnumeratedList`: Second+ enumerated_list list_items.
- `FieldList`: Second+ fields.
- `OptionList`: Second+ option_list_items.
- `RFC2822List`: Second+ RFC2822-style fields.
- `ExtensionOptions`: Parses directive option fields.
- `Explicit`: Second+ explicit markup constructs.
- `SubstitutionDef`: For embedded directives in substitution definitions.
- `Text`: Classifier of second line of a text block.
- `SpecializedText`: Superclass for continuation lines of Text-variants.
- `Definition`: Second line of potential definition_list_item.
- `Line`: Second line of overlined section title or transition marker.
- `Struct`: An auxiliary collection class.
:Exception classes:
- `MarkupError`
- `ParserError`
- `MarkupMismatch`
:Functions:
- `escape2null()`: Return a string, escape-backslashes converted to nulls.
- `unescape()`: Return a string, nulls removed or restored to backslashes.
:Attributes:
- `state_classes`: set of State classes used with `RSTStateMachine`.
Parser Overview
===============
The reStructuredText parser is implemented as a recursive state machine,
examining its input one line at a time. To understand how the parser works,
please first become familiar with the `docutils.statemachine` module. In the
description below, references are made to classes defined in this module;
please see the individual classes for details.
Parsing proceeds as follows:
1. The state machine examines each line of input, checking each of the
transition patterns of the state `Body`, in order, looking for a match.
The implicit transitions (blank lines and indentation) are checked before
any others. The 'text' transition is a catch-all (matches anything).
2. The method associated with the matched transition pattern is called.
A. Some transition methods are self-contained, appending elements to the
document tree (`Body.doctest` parses a doctest block). The parser's
current line index is advanced to the end of the element, and parsing
continues with step 1.
B. Other transition methods trigger the creation of a nested state machine,
whose job is to parse a compound construct ('indent' does a block quote,
'bullet' does a bullet list, 'overline' does a section [first checking
for a valid section header], etc.).
- In the case of lists and explicit markup, a one-off state machine is
created and run to parse contents of the first item.
- A new state machine is created and its initial state is set to the
appropriate specialized state (`BulletList` in the case of the
'bullet' transition; see `SpecializedBody` for more detail). This
state machine is run to parse the compound element (or series of
explicit markup elements), and returns as soon as a non-member element
is encountered. For example, the `BulletList` state machine ends as
soon as it encounters an element which is not a list item of that
bullet list. The optional omission of inter-element blank lines is
enabled by this nested state machine.
- The current line index is advanced to the end of the elements parsed,
and parsing continues with step 1.
C. The result of the 'text' transition depends on the next line of text.
The current state is changed to `Text`, under which the second line is
examined. If the second line is:
- Indented: The element is a definition list item, and parsing proceeds
similarly to step 2.B, using the `DefinitionList` state.
- A line of uniform punctuation characters: The element is a section
header; again, parsing proceeds as in step 2.B, and `Body` is still
used.
- Anything else: The element is a paragraph, which is examined for
inline markup and appended to the parent element. Processing
continues with step 1.
"""
__docformat__ = 'reStructuredText'
import sys
import re
import roman
from types import FunctionType, MethodType
from docutils import nodes, statemachine, utils, urischemes
from docutils import ApplicationError, DataError
from docutils.statemachine import StateMachineWS, StateWS
from docutils.nodes import fully_normalize_name as normalize_name
from docutils.nodes import whitespace_normalize_name
from docutils.utils import escape2null, unescape, column_width
import docutils.parsers.rst
from docutils.parsers.rst import directives, languages, tableparser, roles
from docutils.parsers.rst.languages import en as _fallback_language_module
class MarkupError(DataError): pass
class UnknownInterpretedRoleError(DataError): pass
class InterpretedRoleNotImplementedError(DataError): pass
class ParserError(ApplicationError): pass
class MarkupMismatch(Exception): pass
class Struct:
"""Stores data attributes for dotted-attribute access."""
def __init__(self, **keywordargs):
self.__dict__.update(keywordargs)
class RSTStateMachine(StateMachineWS):
"""
reStructuredText's master StateMachine.
The entry point to reStructuredText parsing is the `run()` method.
"""
def run(self, input_lines, document, input_offset=0, match_titles=1,
inliner=None):
"""
Parse `input_lines` and modify the `document` node in place.
Extend `StateMachineWS.run()`: set up parse-global data and
run the StateMachine.
"""
self.language = languages.get_language(
document.settings.language_code)
self.match_titles = match_titles
if inliner is None:
inliner = Inliner()
inliner.init_customizations(document.settings)
self.memo = Struct(document=document,
reporter=document.reporter,
language=self.language,
title_styles=[],
section_level=0,
section_bubble_up_kludge=0,
inliner=inliner)
self.document = document
self.attach_observer(document.note_source)
self.reporter = self.memo.reporter
self.node = document
results = StateMachineWS.run(self, input_lines, input_offset,
input_source=document['source'])
assert results == [], 'RSTStateMachine.run() results should be empty!'
self.node = self.memo = None # remove unneeded references
class NestedStateMachine(StateMachineWS):
"""
StateMachine run from within other StateMachine runs, to parse nested
document structures.
"""
def run(self, input_lines, input_offset, memo, node, match_titles=1):
"""
Parse `input_lines` and populate a `docutils.nodes.document` instance.
Extend `StateMachineWS.run()`: set up document-wide data.
"""
self.match_titles = match_titles
self.memo = memo
self.document = memo.document
self.attach_observer(self.document.note_source)
self.reporter = memo.reporter
self.language = memo.language
self.node = node
results = StateMachineWS.run(self, input_lines, input_offset)
assert results == [], ('NestedStateMachine.run() results should be '
'empty!')
return results
class RSTState(StateWS):
"""
reStructuredText State superclass.
Contains methods used by all State subclasses.
"""
nested_sm = NestedStateMachine
nested_sm_cache = []
def __init__(self, state_machine, debug=0):
self.nested_sm_kwargs = {'state_classes': state_classes,
'initial_state': 'Body'}
StateWS.__init__(self, state_machine, debug)
def runtime_init(self):
StateWS.runtime_init(self)
memo = self.state_machine.memo
self.memo = memo
self.reporter = memo.reporter
self.inliner = memo.inliner
self.document = memo.document
self.parent = self.state_machine.node
def goto_line(self, abs_line_offset):
"""
Jump to input line `abs_line_offset`, ignoring jumps past the end.
"""
try:
self.state_machine.goto_line(abs_line_offset)
except EOFError:
pass
def no_match(self, context, transitions):
"""
Override `StateWS.no_match` to generate a system message.
This code should never be run.
"""
self.reporter.severe(
'Internal error: no transition pattern match. State: "%s"; '
'transitions: %s; context: %s; current line: %r.'
% (self.__class__.__name__, transitions, context,
self.state_machine.line),
line=self.state_machine.abs_line_number())
return context, None, []
def bof(self, context):
"""Called at beginning of file."""
return [], []
def nested_parse(self, block, input_offset, node, match_titles=0,
state_machine_class=None, state_machine_kwargs=None):
"""
Create a new StateMachine rooted at `node` and run it over the input
`block`.
"""
use_default = 0
if state_machine_class is None:
state_machine_class = self.nested_sm
use_default += 1
if state_machine_kwargs is None:
state_machine_kwargs = self.nested_sm_kwargs
use_default += 1
block_length = len(block)
state_machine = None
if use_default == 2:
try:
state_machine = self.nested_sm_cache.pop()
except IndexError:
pass
if not state_machine:
state_machine = state_machine_class(debug=self.debug,
**state_machine_kwargs)
state_machine.run(block, input_offset, memo=self.memo,
node=node, match_titles=match_titles)
if use_default == 2:
self.nested_sm_cache.append(state_machine)
else:
state_machine.unlink()
new_offset = state_machine.abs_line_offset()
# No `block.parent` implies disconnected -- lines aren't in sync:
if block.parent and (len(block) - block_length) != 0:
# Adjustment for block if modified in nested parse:
self.state_machine.next_line(len(block) - block_length)
return new_offset
def nested_list_parse(self, block, input_offset, node, initial_state,
blank_finish,
blank_finish_state=None,
extra_settings={},
match_titles=0,
state_machine_class=None,
state_machine_kwargs=None):
"""
Create a new StateMachine rooted at `node` and run it over the input
`block`. Also keep track of optional intermediate blank lines and the
required final one.
"""
if state_machine_class is None:
state_machine_class = self.nested_sm
if state_machine_kwargs is None:
state_machine_kwargs = self.nested_sm_kwargs.copy()
state_machine_kwargs['initial_state'] = initial_state
state_machine = state_machine_class(debug=self.debug,
**state_machine_kwargs)
if blank_finish_state is None:
blank_finish_state = initial_state
state_machine.states[blank_finish_state].blank_finish = blank_finish
for key, value in extra_settings.items():
setattr(state_machine.states[initial_state], key, value)
state_machine.run(block, input_offset, memo=self.memo,
node=node, match_titles=match_titles)
blank_finish = state_machine.states[blank_finish_state].blank_finish
state_machine.unlink()
return state_machine.abs_line_offset(), blank_finish
def section(self, title, source, style, lineno, messages):
"""Check for a valid subsection and create one if it checks out."""
if self.check_subsection(source, style, lineno):
self.new_subsection(title, lineno, messages)
def check_subsection(self, source, style, lineno):
"""
Check for a valid subsection header. Return 1 (true) or None (false).
When a new section is reached that isn't a subsection of the current
section, back up the line count (use ``previous_line(-x)``), then
``raise EOFError``. The current StateMachine will finish, then the
calling StateMachine can re-examine the title. This will work its way
back up the calling chain until the correct section level isreached.
@@@ Alternative: Evaluate the title, store the title info & level, and
back up the chain until that level is reached. Store in memo? Or
return in results?
:Exception: `EOFError` when a sibling or supersection encountered.
"""
memo = self.memo
title_styles = memo.title_styles
mylevel = memo.section_level
try: # check for existing title style
level = title_styles.index(style) + 1
except ValueError: # new title style
if len(title_styles) == memo.section_level: # new subsection
title_styles.append(style)
return 1
else: # not at lowest level
self.parent += self.title_inconsistent(source, lineno)
return None
if level <= mylevel: # sibling or supersection
memo.section_level = level # bubble up to parent section
if len(style) == 2:
memo.section_bubble_up_kludge = 1
# back up 2 lines for underline title, 3 for overline title
self.state_machine.previous_line(len(style) + 1)
raise EOFError # let parent section re-evaluate
if level == mylevel + 1: # immediate subsection
return 1
else: # invalid subsection
self.parent += self.title_inconsistent(source, lineno)
return None
def title_inconsistent(self, sourcetext, lineno):
error = self.reporter.severe(
'Title level inconsistent:', nodes.literal_block('', sourcetext),
line=lineno)
return error
def new_subsection(self, title, lineno, messages):
"""Append new subsection to document tree. On return, check level."""
memo = self.memo
mylevel = memo.section_level
memo.section_level += 1
section_node = nodes.section()
self.parent += section_node
textnodes, title_messages = self.inline_text(title, lineno)
titlenode = nodes.title(title, '', *textnodes)
name = normalize_name(titlenode.astext())
section_node['names'].append(name)
section_node += titlenode
section_node += messages
section_node += title_messages
self.document.note_implicit_target(section_node, section_node)
offset = self.state_machine.line_offset + 1
absoffset = self.state_machine.abs_line_offset() + 1
newabsoffset = self.nested_parse(
self.state_machine.input_lines[offset:], input_offset=absoffset,
node=section_node, match_titles=1)
self.goto_line(newabsoffset)
if memo.section_level <= mylevel: # can't handle next section?
raise EOFError # bubble up to supersection
# reset section_level; next pass will detect it properly
memo.section_level = mylevel
def paragraph(self, lines, lineno):
"""
Return a list (paragraph & messages) & a boolean: literal_block next?
"""
data = '\n'.join(lines).rstrip()
if re.search(r'(?<!\\)(\\\\)*::$', data):
if len(data) == 2:
return [], 1
elif data[-3] in ' \n':
text = data[:-3].rstrip()
else:
text = data[:-1]
literalnext = 1
else:
text = data
literalnext = 0
textnodes, messages = self.inline_text(text, lineno)
p = nodes.paragraph(data, '', *textnodes)
p.line = lineno
return [p] + messages, literalnext
def inline_text(self, text, lineno):
"""
Return 2 lists: nodes (text and inline elements), and system_messages.
"""
return self.inliner.parse(text, lineno, self.memo, self.parent)
def unindent_warning(self, node_name):
return self.reporter.warning(
'%s ends without a blank line; unexpected unindent.' % node_name,
line=(self.state_machine.abs_line_number() + 1))
def build_regexp(definition, compile=1):
"""
Build, compile and return a regular expression based on `definition`.
:Parameter: `definition`: a 4-tuple (group name, prefix, suffix, parts),
where "parts" is a list of regular expressions and/or regular
expression definitions to be joined into an or-group.
"""
name, prefix, suffix, parts = definition
part_strings = []
for part in parts:
if type(part) is tuple:
part_strings.append(build_regexp(part, None))
else:
part_strings.append(part)
or_group = '|'.join(part_strings)
regexp = '%(prefix)s(?P<%(name)s>%(or_group)s)%(suffix)s' % locals()
if compile:
return re.compile(regexp, re.UNICODE)
else:
return regexp
class Inliner:
"""
Parse inline markup; call the `parse()` method.
"""
def __init__(self):
self.implicit_dispatch = [(self.patterns.uri, self.standalone_uri),]
"""List of (pattern, bound method) tuples, used by
`self.implicit_inline`."""
def init_customizations(self, settings):
"""Setting-based customizations; run when parsing begins."""
if settings.pep_references:
self.implicit_dispatch.append((self.patterns.pep,
self.pep_reference))
if settings.rfc_references:
self.implicit_dispatch.append((self.patterns.rfc,
self.rfc_reference))
def parse(self, text, lineno, memo, parent):
# Needs to be refactored for nested inline markup.
# Add nested_parse() method?
"""
Return 2 lists: nodes (text and inline elements), and system_messages.
Using `self.patterns.initial`, a pattern which matches start-strings
(emphasis, strong, interpreted, phrase reference, literal,
substitution reference, and inline target) and complete constructs
(simple reference, footnote reference), search for a candidate. When
one is found, check for validity (e.g., not a quoted '*' character).
If valid, search for the corresponding end string if applicable, and
check it for validity. If not found or invalid, generate a warning
and ignore the start-string. Implicit inline markup (e.g. standalone
URIs) is found last.
"""
self.reporter = memo.reporter
self.document = memo.document
self.language = memo.language
self.parent = parent
pattern_search = self.patterns.initial.search
dispatch = self.dispatch
remaining = escape2null(text)
processed = []
unprocessed = []
messages = []
while remaining:
match = pattern_search(remaining)
if match:
groups = match.groupdict()
method = dispatch[groups['start'] or groups['backquote']
or groups['refend'] or groups['fnend']]
before, inlines, remaining, sysmessages = method(self, match,
lineno)
unprocessed.append(before)
messages += sysmessages
if inlines:
processed += self.implicit_inline(''.join(unprocessed),
lineno)
processed += inlines
unprocessed = []
else:
break
remaining = ''.join(unprocessed) + remaining
if remaining:
processed += self.implicit_inline(remaining, lineno)
return processed, messages
openers = u'\'"([{<\u2018\u201c\xab\u00a1\u00bf' # see quoted_start below
closers = u'\'")]}>\u2019\u201d\xbb!?'
unicode_delimiters = u'\u2010\u2011\u2012\u2013\u2014\u00a0'
start_string_prefix = (u'((?<=^)|(?<=[-/: \\n\u2019%s%s]))'
% (re.escape(unicode_delimiters),
re.escape(openers)))
end_string_suffix = (r'((?=$)|(?=[-/:.,; \n\x00%s%s]))'
% (re.escape(unicode_delimiters),
re.escape(closers)))
non_whitespace_before = r'(?<![ \n])'
non_whitespace_escape_before = r'(?<![ \n\x00])'
non_whitespace_after = r'(?![ \n])'
# Alphanumerics with isolated internal [-._+:] chars (i.e. not 2 together):
simplename = r'(?:(?!_)\w)+(?:[-._+:](?:(?!_)\w)+)*'
# Valid URI characters (see RFC 2396 & RFC 2732);
# final \x00 allows backslash escapes in URIs:
uric = r"""[-_.!~*'()[\];/:@&=+$,%a-zA-Z0-9\x00]"""
# Delimiter indicating the end of a URI (not part of the URI):
uri_end_delim = r"""[>]"""
# Last URI character; same as uric but no punctuation:
urilast = r"""[_~*/=+a-zA-Z0-9]"""
# End of a URI (either 'urilast' or 'uric followed by a
# uri_end_delim'):
uri_end = r"""(?:%(urilast)s|%(uric)s(?=%(uri_end_delim)s))""" % locals()
emailc = r"""[-_!~*'{|}/#?^`&=+$%a-zA-Z0-9\x00]"""
email_pattern = r"""
%(emailc)s+(?:\.%(emailc)s+)* # name
(?<!\x00)@ # at
%(emailc)s+(?:\.%(emailc)s*)* # host
%(uri_end)s # final URI char
"""
parts = ('initial_inline', start_string_prefix, '',
[('start', '', non_whitespace_after, # simple start-strings
[r'\*\*', # strong
r'\*(?!\*)', # emphasis but not strong
r'``', # literal
r'_`', # inline internal target
r'\|(?!\|)'] # substitution reference
),
('whole', '', end_string_suffix, # whole constructs
[# reference name & end-string
r'(?P<refname>%s)(?P<refend>__?)' % simplename,
('footnotelabel', r'\[', r'(?P<fnend>\]_)',
[r'[0-9]+', # manually numbered
r'\#(%s)?' % simplename, # auto-numbered (w/ label?)
r'\*', # auto-symbol
r'(?P<citationlabel>%s)' % simplename] # citation reference
)
]
),
('backquote', # interpreted text or phrase reference
'(?P<role>(:%s:)?)' % simplename, # optional role
non_whitespace_after,
['`(?!`)'] # but not literal
)
]
)
patterns = Struct(
initial=build_regexp(parts),
emphasis=re.compile(non_whitespace_escape_before
+ r'(\*)' + end_string_suffix),
strong=re.compile(non_whitespace_escape_before
+ r'(\*\*)' + end_string_suffix),
interpreted_or_phrase_ref=re.compile(
r"""
%(non_whitespace_escape_before)s
(
`
(?P<suffix>
(?P<role>:%(simplename)s:)?
(?P<refend>__?)?
)
)
%(end_string_suffix)s
""" % locals(), re.VERBOSE | re.UNICODE),
embedded_uri=re.compile(
r"""
(
(?:[ \n]+|^) # spaces or beginning of line/string
< # open bracket
%(non_whitespace_after)s
([^<>\x00]+) # anything but angle brackets & nulls
%(non_whitespace_before)s
> # close bracket w/o whitespace before
)
$ # end of string
""" % locals(), re.VERBOSE),
literal=re.compile(non_whitespace_before + '(``)'
+ end_string_suffix),
target=re.compile(non_whitespace_escape_before
+ r'(`)' + end_string_suffix),
substitution_ref=re.compile(non_whitespace_escape_before
+ r'(\|_{0,2})'
+ end_string_suffix),
email=re.compile(email_pattern % locals() + '$', re.VERBOSE),
uri=re.compile(
(r"""
%(start_string_prefix)s
(?P<whole>
(?P<absolute> # absolute URI
(?P<scheme> # scheme (http, ftp, mailto)
[a-zA-Z][a-zA-Z0-9.+-]*
)
:
(
( # either:
(//?)? # hierarchical URI
%(uric)s* # URI characters
%(uri_end)s # final URI char
)
( # optional query
\?%(uric)s*
%(uri_end)s
)?
( # optional fragment
\#%(uric)s*
%(uri_end)s
)?
)
)
| # *OR*
(?P<email> # email address
""" + email_pattern + r"""
)
)
%(end_string_suffix)s
""") % locals(), re.VERBOSE),
pep=re.compile(
r"""
%(start_string_prefix)s
(
(pep-(?P<pepnum1>\d+)(.txt)?) # reference to source file
|
(PEP\s+(?P<pepnum2>\d+)) # reference by name
)
%(end_string_suffix)s""" % locals(), re.VERBOSE),
rfc=re.compile(
r"""
%(start_string_prefix)s
(RFC(-|\s+)?(?P<rfcnum>\d+))
%(end_string_suffix)s""" % locals(), re.VERBOSE))
def quoted_start(self, match):
"""Return 1 if inline markup start-string is 'quoted', 0 if not."""
string = match.string
start = match.start()
end = match.end()
if start == 0: # start-string at beginning of text
return 0
prestart = string[start - 1]
try:
poststart = string[end]
if self.openers.index(prestart) \
== self.closers.index(poststart): # quoted
return 1
except IndexError: # start-string at end of text
return 1
except ValueError: # not quoted
pass
return 0
def inline_obj(self, match, lineno, end_pattern, nodeclass,
restore_backslashes=0):
string = match.string
matchstart = match.start('start')
matchend = match.end('start')
if self.quoted_start(match):
return (string[:matchend], [], string[matchend:], [], '')
endmatch = end_pattern.search(string[matchend:])
if endmatch and endmatch.start(1): # 1 or more chars
text = unescape(endmatch.string[:endmatch.start(1)],
restore_backslashes)
textend = matchend + endmatch.end(1)
rawsource = unescape(string[matchstart:textend], 1)
return (string[:matchstart], [nodeclass(rawsource, text)],
string[textend:], [], endmatch.group(1))
msg = self.reporter.warning(
'Inline %s start-string without end-string.'
% nodeclass.__name__, line=lineno)
text = unescape(string[matchstart:matchend], 1)
rawsource = unescape(string[matchstart:matchend], 1)
prb = self.problematic(text, rawsource, msg)
return string[:matchstart], [prb], string[matchend:], [msg], ''
def problematic(self, text, rawsource, message):
msgid = self.document.set_id(message, self.parent)
problematic = nodes.problematic(rawsource, text, refid=msgid)
prbid = self.document.set_id(problematic)
message.add_backref(prbid)
return problematic
def emphasis(self, match, lineno):
before, inlines, remaining, sysmessages, endstring = self.inline_obj(
match, lineno, self.patterns.emphasis, nodes.emphasis)
return before, inlines, remaining, sysmessages
def strong(self, match, lineno):
before, inlines, remaining, sysmessages, endstring = self.inline_obj(
match, lineno, self.patterns.strong, nodes.strong)
return before, inlines, remaining, sysmessages
def interpreted_or_phrase_ref(self, match, lineno):
end_pattern = self.patterns.interpreted_or_phrase_ref
string = match.string
matchstart = match.start('backquote')
matchend = match.end('backquote')
rolestart = match.start('role')
role = match.group('role')
position = ''
if role:
role = role[1:-1]
position = 'prefix'
elif self.quoted_start(match):
return (string[:matchend], [], string[matchend:], [])
endmatch = end_pattern.search(string[matchend:])
if endmatch and endmatch.start(1): # 1 or more chars
textend = matchend + endmatch.end()
if endmatch.group('role'):
if role:
msg = self.reporter.warning(
'Multiple roles in interpreted text (both '
'prefix and suffix present; only one allowed).',
line=lineno)
text = unescape(string[rolestart:textend], 1)
prb = self.problematic(text, text, msg)
return string[:rolestart], [prb], string[textend:], [msg]
role = endmatch.group('suffix')[1:-1]
position = 'suffix'
escaped = endmatch.string[:endmatch.start(1)]
rawsource = unescape(string[matchstart:textend], 1)
if rawsource[-1:] == '_':
if role:
msg = self.reporter.warning(
'Mismatch: both interpreted text role %s and '
'reference suffix.' % position, line=lineno)
text = unescape(string[rolestart:textend], 1)
prb = self.problematic(text, text, msg)
return string[:rolestart], [prb], string[textend:], [msg]
return self.phrase_ref(string[:matchstart], string[textend:],
rawsource, escaped, unescape(escaped))
else:
rawsource = unescape(string[rolestart:textend], 1)
nodelist, messages = self.interpreted(rawsource, escaped, role,
lineno)
return (string[:rolestart], nodelist,
string[textend:], messages)
msg = self.reporter.warning(
'Inline interpreted text or phrase reference start-string '
'without end-string.', line=lineno)
text = unescape(string[matchstart:matchend], 1)
prb = self.problematic(text, text, msg)
return string[:matchstart], [prb], string[matchend:], [msg]
def phrase_ref(self, before, after, rawsource, escaped, text):
match = self.patterns.embedded_uri.search(escaped)
if match:
text = unescape(escaped[:match.start(0)])
uri_text = match.group(2)
uri = ''.join(uri_text.split())
uri = self.adjust_uri(uri)
if uri:
target = nodes.target(match.group(1), refuri=uri)
else:
raise ApplicationError('problem with URI: %r' % uri_text)
if not text:
text = uri
else:
target = None
refname = normalize_name(text)
reference = nodes.reference(rawsource, text,
name=whitespace_normalize_name(text))
node_list = [reference]
if rawsource[-2:] == '__':
if target:
reference['refuri'] = uri
else:
reference['anonymous'] = 1
else:
if target:
reference['refuri'] = uri
target['names'].append(refname)
self.document.note_explicit_target(target, self.parent)
node_list.append(target)
else:
reference['refname'] = refname
self.document.note_refname(reference)
return before, node_list, after, []
def adjust_uri(self, uri):
match = self.patterns.email.match(uri)
if match:
return 'mailto:' + uri
else:
return uri
def interpreted(self, rawsource, text, role, lineno):
role_fn, messages = roles.role(role, self.language, lineno,
self.reporter)
if role_fn:
nodes, messages2 = role_fn(role, rawsource, text, lineno, self)
return nodes, messages + messages2
else:
msg = self.reporter.error(
'Unknown interpreted text role "%s".' % role,
line=lineno)
return ([self.problematic(rawsource, rawsource, msg)],
messages + [msg])
def literal(self, match, lineno):
before, inlines, remaining, sysmessages, endstring = self.inline_obj(
match, lineno, self.patterns.literal, nodes.literal,
restore_backslashes=1)
return before, inlines, remaining, sysmessages
def inline_internal_target(self, match, lineno):
before, inlines, remaining, sysmessages, endstring = self.inline_obj(
match, lineno, self.patterns.target, nodes.target)
if inlines and isinstance(inlines[0], nodes.target):
assert len(inlines) == 1
target = inlines[0]
name = normalize_name(target.astext())
target['names'].append(name)
self.document.note_explicit_target(target, self.parent)
return before, inlines, remaining, sysmessages
def substitution_reference(self, match, lineno):
before, inlines, remaining, sysmessages, endstring = self.inline_obj(
match, lineno, self.patterns.substitution_ref,
nodes.substitution_reference)
if len(inlines) == 1:
subref_node = inlines[0]
if isinstance(subref_node, nodes.substitution_reference):
subref_text = subref_node.astext()
self.document.note_substitution_ref(subref_node, subref_text)
if endstring[-1:] == '_':
reference_node = nodes.reference(
'|%s%s' % (subref_text, endstring), '')
if endstring[-2:] == '__':
reference_node['anonymous'] = 1
else:
reference_node['refname'] = normalize_name(subref_text)
self.document.note_refname(reference_node)
reference_node += subref_node
inlines = [reference_node]
return before, inlines, remaining, sysmessages
def footnote_reference(self, match, lineno):
"""
Handles `nodes.footnote_reference` and `nodes.citation_reference`
elements.
"""
label = match.group('footnotelabel')
refname = normalize_name(label)
string = match.string
before = string[:match.start('whole')]
remaining = string[match.end('whole'):]
if match.group('citationlabel'):
refnode = nodes.citation_reference('[%s]_' % label,
refname=refname)
refnode += nodes.Text(label)
self.document.note_citation_ref(refnode)
else:
refnode = nodes.footnote_reference('[%s]_' % label)
if refname[0] == '#':
refname = refname[1:]
refnode['auto'] = 1
self.document.note_autofootnote_ref(refnode)
elif refname == '*':
refname = ''
refnode['auto'] = '*'
self.document.note_symbol_footnote_ref(
refnode)
else:
refnode += nodes.Text(label)
if refname:
refnode['refname'] = refname
self.document.note_footnote_ref(refnode)
if utils.get_trim_footnote_ref_space(self.document.settings):
before = before.rstrip()
return (before, [refnode], remaining, [])
def reference(self, match, lineno, anonymous=None):
referencename = match.group('refname')
refname = normalize_name(referencename)
referencenode = nodes.reference(
referencename + match.group('refend'), referencename,
name=whitespace_normalize_name(referencename))
if anonymous:
referencenode['anonymous'] = 1
else:
referencenode['refname'] = refname
self.document.note_refname(referencenode)
string = match.string
matchstart = match.start('whole')
matchend = match.end('whole')
return (string[:matchstart], [referencenode], string[matchend:], [])
def anonymous_reference(self, match, lineno):
return self.reference(match, lineno, anonymous=1)
def standalone_uri(self, match, lineno):
if (not match.group('scheme')
or match.group('scheme').lower() in urischemes.schemes):
if match.group('email'):
addscheme = 'mailto:'
else:
addscheme = ''
text = match.group('whole')
unescaped = unescape(text, 0)
return [nodes.reference(unescape(text, 1), unescaped,
refuri=addscheme + unescaped)]
else: # not a valid scheme
raise MarkupMismatch
def pep_reference(self, match, lineno):
text = match.group(0)
if text.startswith('pep-'):
pepnum = int(match.group('pepnum1'))
elif text.startswith('PEP'):
pepnum = int(match.group('pepnum2'))
else:
raise MarkupMismatch
ref = (self.document.settings.pep_base_url
+ self.document.settings.pep_file_url_template % pepnum)
unescaped = unescape(text, 0)
return [nodes.reference(unescape(text, 1), unescaped, refuri=ref)]
rfc_url = 'rfc%d.html'
def rfc_reference(self, match, lineno):
text = match.group(0)
if text.startswith('RFC'):
rfcnum = int(match.group('rfcnum'))
ref = self.document.settings.rfc_base_url + self.rfc_url % rfcnum
else:
raise MarkupMismatch
unescaped = unescape(text, 0)
return [nodes.reference(unescape(text, 1), unescaped, refuri=ref)]
def implicit_inline(self, text, lineno):
"""
Check each of the patterns in `self.implicit_dispatch` for a match,
and dispatch to the stored method for the pattern. Recursively check
the text before and after the match. Return a list of `nodes.Text`
and inline element nodes.
"""
if not text:
return []
for pattern, method in self.implicit_dispatch:
match = pattern.search(text)
if match:
try:
# Must recurse on strings before *and* after the match;
# there may be multiple patterns.
return (self.implicit_inline(text[:match.start()], lineno)
+ method(match, lineno) +
self.implicit_inline(text[match.end():], lineno))
except MarkupMismatch:
pass
return [nodes.Text(unescape(text), rawsource=unescape(text, 1))]
dispatch = {'*': emphasis,
'**': strong,
'`': interpreted_or_phrase_ref,
'``': literal,
'_`': inline_internal_target,
']_': footnote_reference,
'|': substitution_reference,
'_': reference,
'__': anonymous_reference}
def _loweralpha_to_int(s, _zero=(ord('a')-1)):
return ord(s) - _zero
def _upperalpha_to_int(s, _zero=(ord('A')-1)):
return ord(s) - _zero
def _lowerroman_to_int(s):
return roman.fromRoman(s.upper())
class Body(RSTState):
"""
Generic classifier of the first line of a block.
"""
double_width_pad_char = tableparser.TableParser.double_width_pad_char
"""Padding character for East Asian double-width text."""
enum = Struct()
"""Enumerated list parsing information."""
enum.formatinfo = {
'parens': Struct(prefix='(', suffix=')', start=1, end=-1),
'rparen': Struct(prefix='', suffix=')', start=0, end=-1),
'period': Struct(prefix='', suffix='.', start=0, end=-1)}
enum.formats = enum.formatinfo.keys()
enum.sequences = ['arabic', 'loweralpha', 'upperalpha',
'lowerroman', 'upperroman'] # ORDERED!
enum.sequencepats = {'arabic': '[0-9]+',
'loweralpha': '[a-z]',
'upperalpha': '[A-Z]',
'lowerroman': '[ivxlcdm]+',
'upperroman': '[IVXLCDM]+',}
enum.converters = {'arabic': int,
'loweralpha': _loweralpha_to_int,
'upperalpha': _upperalpha_to_int,
'lowerroman': _lowerroman_to_int,
'upperroman': roman.fromRoman}
enum.sequenceregexps = {}
for sequence in enum.sequences:
enum.sequenceregexps[sequence] = re.compile(
enum.sequencepats[sequence] + '$')
grid_table_top_pat = re.compile(r'\+-[-+]+-\+ *$')
"""Matches the top (& bottom) of a full table)."""
simple_table_top_pat = re.compile('=+( +=+)+ *$')
"""Matches the top of a simple table."""
simple_table_border_pat = re.compile('=+[ =]*$')
"""Matches the bottom & header bottom of a simple table."""
pats = {}
"""Fragments of patterns used by transitions."""
pats['nonalphanum7bit'] = '[!-/:-@[-`{-~]'
pats['alpha'] = '[a-zA-Z]'
pats['alphanum'] = '[a-zA-Z0-9]'
pats['alphanumplus'] = '[a-zA-Z0-9_-]'
pats['enum'] = ('(%(arabic)s|%(loweralpha)s|%(upperalpha)s|%(lowerroman)s'
'|%(upperroman)s|#)' % enum.sequencepats)
pats['optname'] = '%(alphanum)s%(alphanumplus)s*' % pats
# @@@ Loosen up the pattern? Allow Unicode?
pats['optarg'] = '(%(alpha)s%(alphanumplus)s*|<[^<>]+>)' % pats
pats['shortopt'] = r'(-|\+)%(alphanum)s( ?%(optarg)s)?' % pats
pats['longopt'] = r'(--|/)%(optname)s([ =]%(optarg)s)?' % pats
pats['option'] = r'(%(shortopt)s|%(longopt)s)' % pats
for format in enum.formats:
pats[format] = '(?P<%s>%s%s%s)' % (
format, re.escape(enum.formatinfo[format].prefix),
pats['enum'], re.escape(enum.formatinfo[format].suffix))
patterns = {
'bullet': u'[-+*\u2022\u2023\u2043]( +|$)',
'enumerator': r'(%(parens)s|%(rparen)s|%(period)s)( +|$)' % pats,
'field_marker': r':(?![: ])([^:\\]|\\.)*(?<! ):( +|$)',
'option_marker': r'%(option)s(, %(option)s)*( +| ?$)' % pats,
'doctest': r'>>>( +|$)',
'line_block': r'\|( +|$)',
'grid_table_top': grid_table_top_pat,
'simple_table_top': simple_table_top_pat,
'explicit_markup': r'\.\.( +|$)',
'anonymous': r'__( +|$)',
'line': r'(%(nonalphanum7bit)s)\1* *$' % pats,
'text': r''}
initial_transitions = (
'bullet',
'enumerator',
'field_marker',
'option_marker',
'doctest',
'line_block',
'grid_table_top',
'simple_table_top',
'explicit_markup',
'anonymous',
'line',
'text')
def indent(self, match, context, next_state):
"""Block quote."""
indented, indent, line_offset, blank_finish = \
self.state_machine.get_indented()
elements = self.block_quote(indented, line_offset)
self.parent += elements
if not blank_finish:
self.parent += self.unindent_warning('Block quote')
return context, next_state, []
def block_quote(self, indented, line_offset):
elements = []
while indented:
(blockquote_lines,
attribution_lines,
attribution_offset,
indented,
new_line_offset) = self.split_attribution(indented, line_offset)
blockquote = nodes.block_quote()
self.nested_parse(blockquote_lines, line_offset, blockquote)
elements.append(blockquote)
if attribution_lines:
attribution, messages = self.parse_attribution(
attribution_lines, attribution_offset)
blockquote += attribution
elements += messages
line_offset = new_line_offset
while indented and not indented[0]:
indented = indented[1:]
line_offset += 1
return elements
# U+2014 is an em-dash:
attribution_pattern = re.compile(u'(---?(?!-)|\u2014) *(?=[^ \\n])')
def split_attribution(self, indented, line_offset):
"""
Check for a block quote attribution and split it off:
* First line after a blank line must begin with a dash ("--", "---",
em-dash; matches `self.attribution_pattern`).
* Every line after that must have consistent indentation.
* Attributions must be preceded by block quote content.
Return a tuple of: (block quote content lines, content offset,
attribution lines, attribution offset, remaining indented lines).
"""
blank = None
nonblank_seen = False
for i in range(len(indented)):
line = indented[i].rstrip()
if line:
if nonblank_seen and blank == i - 1: # last line blank
match = self.attribution_pattern.match(line)
if match:
attribution_end, indent = self.check_attribution(
indented, i)
if attribution_end:
a_lines = indented[i:attribution_end]
a_lines.trim_left(match.end(), end=1)
a_lines.trim_left(indent, start=1)
return (indented[:i], a_lines,
i, indented[attribution_end:],
line_offset + attribution_end)
nonblank_seen = True
else:
blank = i
else:
return (indented, None, None, None, None)
def check_attribution(self, indented, attribution_start):
"""
Check attribution shape.
Return the index past the end of the attribution, and the indent.
"""
indent = None
i = attribution_start + 1
for i in range(attribution_start + 1, len(indented)):
line = indented[i].rstrip()
if not line:
break
if indent is None:
indent = len(line) - len(line.lstrip())
elif len(line) - len(line.lstrip()) != indent:
return None, None # bad shape; not an attribution
else:
# return index of line after last attribution line:
i += 1
return i, (indent or 0)
def parse_attribution(self, indented, line_offset):
text = '\n'.join(indented).rstrip()
lineno = self.state_machine.abs_line_number() + line_offset
textnodes, messages = self.inline_text(text, lineno)
node = nodes.attribution(text, '', *textnodes)
node.line = lineno
return node, messages
def bullet(self, match, context, next_state):
"""Bullet list item."""
bulletlist = nodes.bullet_list()
self.parent += bulletlist
bulletlist['bullet'] = match.string[0]
i, blank_finish = self.list_item(match.end())
bulletlist += i
offset = self.state_machine.line_offset + 1 # next line
new_line_offset, blank_finish = self.nested_list_parse(
self.state_machine.input_lines[offset:],
input_offset=self.state_machine.abs_line_offset() + 1,
node=bulletlist, initial_state='BulletList',
blank_finish=blank_finish)
self.goto_line(new_line_offset)
if not blank_finish:
self.parent += self.unindent_warning('Bullet list')
return [], next_state, []
def list_item(self, indent):
if self.state_machine.line[indent:]:
indented, line_offset, blank_finish = (
self.state_machine.get_known_indented(indent))
else:
indented, indent, line_offset, blank_finish = (
self.state_machine.get_first_known_indented(indent))
listitem = nodes.list_item('\n'.join(indented))
if indented:
self.nested_parse(indented, input_offset=line_offset,
node=listitem)
return listitem, blank_finish
def enumerator(self, match, context, next_state):
"""Enumerated List Item"""
format, sequence, text, ordinal = self.parse_enumerator(match)
if not self.is_enumerated_list_item(ordinal, sequence, format):
raise statemachine.TransitionCorrection('text')
enumlist = nodes.enumerated_list()
self.parent += enumlist
if sequence == '#':
enumlist['enumtype'] = 'arabic'
else:
enumlist['enumtype'] = sequence
enumlist['prefix'] = self.enum.formatinfo[format].prefix
enumlist['suffix'] = self.enum.formatinfo[format].suffix
if ordinal != 1:
enumlist['start'] = ordinal
msg = self.reporter.info(
'Enumerated list start value not ordinal-1: "%s" (ordinal %s)'
% (text, ordinal), line=self.state_machine.abs_line_number())
self.parent += msg
listitem, blank_finish = self.list_item(match.end())
enumlist += listitem
offset = self.state_machine.line_offset + 1 # next line
newline_offset, blank_finish = self.nested_list_parse(
self.state_machine.input_lines[offset:],
input_offset=self.state_machine.abs_line_offset() + 1,
node=enumlist, initial_state='EnumeratedList',
blank_finish=blank_finish,
extra_settings={'lastordinal': ordinal,
'format': format,
'auto': sequence == '#'})
self.goto_line(newline_offset)
if not blank_finish:
self.parent += self.unindent_warning('Enumerated list')
return [], next_state, []
def parse_enumerator(self, match, expected_sequence=None):
"""
Analyze an enumerator and return the results.
:Return:
- the enumerator format ('period', 'parens', or 'rparen'),
- the sequence used ('arabic', 'loweralpha', 'upperroman', etc.),
- the text of the enumerator, stripped of formatting, and
- the ordinal value of the enumerator ('a' -> 1, 'ii' -> 2, etc.;
``None`` is returned for invalid enumerator text).
The enumerator format has already been determined by the regular
expression match. If `expected_sequence` is given, that sequence is
tried first. If not, we check for Roman numeral 1. This way,
single-character Roman numerals (which are also alphabetical) can be
matched. If no sequence has been matched, all sequences are checked in
order.
"""
groupdict = match.groupdict()
sequence = ''
for format in self.enum.formats:
if groupdict[format]: # was this the format matched?
break # yes; keep `format`
else: # shouldn't happen
raise ParserError('enumerator format not matched')
text = groupdict[format][self.enum.formatinfo[format].start
:self.enum.formatinfo[format].end]
if text == '#':
sequence = '#'
elif expected_sequence:
try:
if self.enum.sequenceregexps[expected_sequence].match(text):
sequence = expected_sequence
except KeyError: # shouldn't happen
raise ParserError('unknown enumerator sequence: %s'
% sequence)
elif text == 'i':
sequence = 'lowerroman'
elif text == 'I':
sequence = 'upperroman'
if not sequence:
for sequence in self.enum.sequences:
if self.enum.sequenceregexps[sequence].match(text):
break
else: # shouldn't happen
raise ParserError('enumerator sequence not matched')
if sequence == '#':
ordinal = 1
else:
try:
ordinal = self.enum.converters[sequence](text)
except roman.InvalidRomanNumeralError:
ordinal = None
return format, sequence, text, ordinal
def is_enumerated_list_item(self, ordinal, sequence, format):
"""
Check validity based on the ordinal value and the second line.
Return true if the ordinal is valid and the second line is blank,
indented, or starts with the next enumerator or an auto-enumerator.
"""
if ordinal is None:
return None
try:
next_line = self.state_machine.next_line()
except EOFError: # end of input lines
self.state_machine.previous_line()
return 1
else:
self.state_machine.previous_line()
if not next_line[:1].strip(): # blank or indented
return 1
result = self.make_enumerator(ordinal + 1, sequence, format)
if result:
next_enumerator, auto_enumerator = result
try:
if ( next_line.startswith(next_enumerator) or
next_line.startswith(auto_enumerator) ):
return 1
except TypeError:
pass
return None
def make_enumerator(self, ordinal, sequence, format):
"""
Construct and return the next enumerated list item marker, and an
auto-enumerator ("#" instead of the regular enumerator).
Return ``None`` for invalid (out of range) ordinals.
""" #"
if sequence == '#':
enumerator = '#'
elif sequence == 'arabic':
enumerator = str(ordinal)
else:
if sequence.endswith('alpha'):
if ordinal > 26:
return None
enumerator = chr(ordinal + ord('a') - 1)
elif sequence.endswith('roman'):
try:
enumerator = roman.toRoman(ordinal)
except roman.RomanError:
return None
else: # shouldn't happen
raise ParserError('unknown enumerator sequence: "%s"'
% sequence)
if sequence.startswith('lower'):
enumerator = enumerator.lower()
elif sequence.startswith('upper'):
enumerator = enumerator.upper()
else: # shouldn't happen
raise ParserError('unknown enumerator sequence: "%s"'
% sequence)
formatinfo = self.enum.formatinfo[format]
next_enumerator = (formatinfo.prefix + enumerator + formatinfo.suffix
+ ' ')
auto_enumerator = formatinfo.prefix + '#' + formatinfo.suffix + ' '
return next_enumerator, auto_enumerator
def field_marker(self, match, context, next_state):
"""Field list item."""
field_list = nodes.field_list()
self.parent += field_list
field, blank_finish = self.field(match)
field_list += field
offset = self.state_machine.line_offset + 1 # next line
newline_offset, blank_finish = self.nested_list_parse(
self.state_machine.input_lines[offset:],
input_offset=self.state_machine.abs_line_offset() + 1,
node=field_list, initial_state='FieldList',
blank_finish=blank_finish)
self.goto_line(newline_offset)
if not blank_finish:
self.parent += self.unindent_warning('Field list')
return [], next_state, []
def field(self, match):
name = self.parse_field_marker(match)
lineno = self.state_machine.abs_line_number()
indented, indent, line_offset, blank_finish = \
self.state_machine.get_first_known_indented(match.end())
field_node = nodes.field()
field_node.line = lineno
name_nodes, name_messages = self.inline_text(name, lineno)
field_node += nodes.field_name(name, '', *name_nodes)
field_body = nodes.field_body('\n'.join(indented), *name_messages)
field_node += field_body
if indented:
self.parse_field_body(indented, line_offset, field_body)
return field_node, blank_finish
def parse_field_marker(self, match):
"""Extract & return field name from a field marker match."""
field = match.group()[1:] # strip off leading ':'
field = field[:field.rfind(':')] # strip off trailing ':' etc.
return field
def parse_field_body(self, indented, offset, node):
self.nested_parse(indented, input_offset=offset, node=node)
def option_marker(self, match, context, next_state):
"""Option list item."""
optionlist = nodes.option_list()
try:
listitem, blank_finish = self.option_list_item(match)
except MarkupError, (message, lineno):
# This shouldn't happen; pattern won't match.
msg = self.reporter.error(
'Invalid option list marker: %s' % message, line=lineno)
self.parent += msg
indented, indent, line_offset, blank_finish = \
self.state_machine.get_first_known_indented(match.end())
elements = self.block_quote(indented, line_offset)
self.parent += elements
if not blank_finish:
self.parent += self.unindent_warning('Option list')
return [], next_state, []
self.parent += optionlist
optionlist += listitem
offset = self.state_machine.line_offset + 1 # next line
newline_offset, blank_finish = self.nested_list_parse(
self.state_machine.input_lines[offset:],
input_offset=self.state_machine.abs_line_offset() + 1,
node=optionlist, initial_state='OptionList',
blank_finish=blank_finish)
self.goto_line(newline_offset)
if not blank_finish:
self.parent += self.unindent_warning('Option list')
return [], next_state, []
def option_list_item(self, match):
offset = self.state_machine.abs_line_offset()
options = self.parse_option_marker(match)
indented, indent, line_offset, blank_finish = \
self.state_machine.get_first_known_indented(match.end())
if not indented: # not an option list item
self.goto_line(offset)
raise statemachine.TransitionCorrection('text')
option_group = nodes.option_group('', *options)
description = nodes.description('\n'.join(indented))
option_list_item = nodes.option_list_item('', option_group,
description)
if indented:
self.nested_parse(indented, input_offset=line_offset,
node=description)
return option_list_item, blank_finish
def parse_option_marker(self, match):
"""
Return a list of `node.option` and `node.option_argument` objects,
parsed from an option marker match.
:Exception: `MarkupError` for invalid option markers.
"""
optlist = []
optionstrings = match.group().rstrip().split(', ')
for optionstring in optionstrings:
tokens = optionstring.split()
delimiter = ' '
firstopt = tokens[0].split('=')
if len(firstopt) > 1:
# "--opt=value" form
tokens[:1] = firstopt
delimiter = '='
elif (len(tokens[0]) > 2
and ((tokens[0].startswith('-')
and not tokens[0].startswith('--'))
or tokens[0].startswith('+'))):
# "-ovalue" form
tokens[:1] = [tokens[0][:2], tokens[0][2:]]
delimiter = ''
if len(tokens) > 1 and (tokens[1].startswith('<')
and tokens[-1].endswith('>')):
# "-o <value1 value2>" form; join all values into one token
tokens[1:] = [' '.join(tokens[1:])]
if 0 < len(tokens) <= 2:
option = nodes.option(optionstring)
option += nodes.option_string(tokens[0], tokens[0])
if len(tokens) > 1:
option += nodes.option_argument(tokens[1], tokens[1],
delimiter=delimiter)
optlist.append(option)
else:
raise MarkupError(
'wrong number of option tokens (=%s), should be 1 or 2: '
'"%s"' % (len(tokens), optionstring),
self.state_machine.abs_line_number() + 1)
return optlist
def doctest(self, match, context, next_state):
data = '\n'.join(self.state_machine.get_text_block())
self.parent += nodes.doctest_block(data, data)
return [], next_state, []
def line_block(self, match, context, next_state):
"""First line of a line block."""
block = nodes.line_block()
self.parent += block
lineno = self.state_machine.abs_line_number()
line, messages, blank_finish = self.line_block_line(match, lineno)
block += line
self.parent += messages
if not blank_finish:
offset = self.state_machine.line_offset + 1 # next line
new_line_offset, blank_finish = self.nested_list_parse(
self.state_machine.input_lines[offset:],
input_offset=self.state_machine.abs_line_offset() + 1,
node=block, initial_state='LineBlock',
blank_finish=0)
self.goto_line(new_line_offset)
if not blank_finish:
self.parent += self.reporter.warning(
'Line block ends without a blank line.',
line=(self.state_machine.abs_line_number() + 1))
if len(block):
if block[0].indent is None:
block[0].indent = 0
self.nest_line_block_lines(block)
return [], next_state, []
def line_block_line(self, match, lineno):
"""Return one line element of a line_block."""
indented, indent, line_offset, blank_finish = \
self.state_machine.get_first_known_indented(match.end(),
until_blank=1)
text = u'\n'.join(indented)
text_nodes, messages = self.inline_text(text, lineno)
line = nodes.line(text, '', *text_nodes)
if match.string.rstrip() != '|': # not empty
line.indent = len(match.group(1)) - 1
return line, messages, blank_finish
def nest_line_block_lines(self, block):
for index in range(1, len(block)):
if block[index].indent is None:
block[index].indent = block[index - 1].indent
self.nest_line_block_segment(block)
def nest_line_block_segment(self, block):
indents = [item.indent for item in block]
least = min(indents)
new_items = []
new_block = nodes.line_block()
for item in block:
if item.indent > least:
new_block.append(item)
else:
if len(new_block):
self.nest_line_block_segment(new_block)
new_items.append(new_block)
new_block = nodes.line_block()
new_items.append(item)
if len(new_block):
self.nest_line_block_segment(new_block)
new_items.append(new_block)
block[:] = new_items
def grid_table_top(self, match, context, next_state):
"""Top border of a full table."""
return self.table_top(match, context, next_state,
self.isolate_grid_table,
tableparser.GridTableParser)
def simple_table_top(self, match, context, next_state):
"""Top border of a simple table."""
return self.table_top(match, context, next_state,
self.isolate_simple_table,
tableparser.SimpleTableParser)
def table_top(self, match, context, next_state,
isolate_function, parser_class):
"""Top border of a generic table."""
nodelist, blank_finish = self.table(isolate_function, parser_class)
self.parent += nodelist
if not blank_finish:
msg = self.reporter.warning(
'Blank line required after table.',
line=self.state_machine.abs_line_number() + 1)
self.parent += msg
return [], next_state, []
def table(self, isolate_function, parser_class):
"""Parse a table."""
block, messages, blank_finish = isolate_function()
if block:
try:
parser = parser_class()
tabledata = parser.parse(block)
tableline = (self.state_machine.abs_line_number() - len(block)
+ 1)
table = self.build_table(tabledata, tableline)
nodelist = [table] + messages
except tableparser.TableMarkupError, detail:
nodelist = self.malformed_table(
block, ' '.join(detail.args)) + messages
else:
nodelist = messages
return nodelist, blank_finish
def isolate_grid_table(self):
messages = []
blank_finish = 1
try:
block = self.state_machine.get_text_block(flush_left=1)
except statemachine.UnexpectedIndentationError, instance:
block, source, lineno = instance.args
messages.append(self.reporter.error('Unexpected indentation.',
source=source, line=lineno))
blank_finish = 0
block.disconnect()
# for East Asian chars:
block.pad_double_width(self.double_width_pad_char)
width = len(block[0].strip())
for i in range(len(block)):
block[i] = block[i].strip()
if block[i][0] not in '+|': # check left edge
blank_finish = 0
self.state_machine.previous_line(len(block) - i)
del block[i:]
break
if not self.grid_table_top_pat.match(block[-1]): # find bottom
blank_finish = 0
# from second-last to third line of table:
for i in range(len(block) - 2, 1, -1):
if self.grid_table_top_pat.match(block[i]):
self.state_machine.previous_line(len(block) - i + 1)
del block[i+1:]
break
else:
messages.extend(self.malformed_table(block))
return [], messages, blank_finish
for i in range(len(block)): # check right edge
if len(block[i]) != width or block[i][-1] not in '+|':
messages.extend(self.malformed_table(block))
return [], messages, blank_finish
return block, messages, blank_finish
def isolate_simple_table(self):
start = self.state_machine.line_offset
lines = self.state_machine.input_lines
limit = len(lines) - 1
toplen = len(lines[start].strip())
pattern_match = self.simple_table_border_pat.match
found = 0
found_at = None
i = start + 1
while i <= limit:
line = lines[i]
match = pattern_match(line)
if match:
if len(line.strip()) != toplen:
self.state_machine.next_line(i - start)
messages = self.malformed_table(
lines[start:i+1], 'Bottom/header table border does '
'not match top border.')
return [], messages, i == limit or not lines[i+1].strip()
found += 1
found_at = i
if found == 2 or i == limit or not lines[i+1].strip():
end = i
break
i += 1
else: # reached end of input_lines
if found:
extra = ' or no blank line after table bottom'
self.state_machine.next_line(found_at - start)
block = lines[start:found_at+1]
else:
extra = ''
self.state_machine.next_line(i - start - 1)
block = lines[start:]
messages = self.malformed_table(
block, 'No bottom table border found%s.' % extra)
return [], messages, not extra
self.state_machine.next_line(end - start)
block = lines[start:end+1]
# for East Asian chars:
block.pad_double_width(self.double_width_pad_char)
return block, [], end == limit or not lines[end+1].strip()
def malformed_table(self, block, detail=''):
block.replace(self.double_width_pad_char, '')
data = '\n'.join(block)
message = 'Malformed table.'
lineno = self.state_machine.abs_line_number() - len(block) + 1
if detail:
message += '\n' + detail
error = self.reporter.error(message, nodes.literal_block(data, data),
line=lineno)
return [error]
def build_table(self, tabledata, tableline, stub_columns=0):
colwidths, headrows, bodyrows = tabledata
table = nodes.table()
tgroup = nodes.tgroup(cols=len(colwidths))
table += tgroup
for colwidth in colwidths:
colspec = nodes.colspec(colwidth=colwidth)
if stub_columns:
colspec.attributes['stub'] = 1
stub_columns -= 1
tgroup += colspec
if headrows:
thead = nodes.thead()
tgroup += thead
for row in headrows:
thead += self.build_table_row(row, tableline)
tbody = nodes.tbody()
tgroup += tbody
for row in bodyrows:
tbody += self.build_table_row(row, tableline)
return table
def build_table_row(self, rowdata, tableline):
row = nodes.row()
for cell in rowdata:
if cell is None:
continue
morerows, morecols, offset, cellblock = cell
attributes = {}
if morerows:
attributes['morerows'] = morerows
if morecols:
attributes['morecols'] = morecols
entry = nodes.entry(**attributes)
row += entry
if ''.join(cellblock):
self.nested_parse(cellblock, input_offset=tableline+offset,
node=entry)
return row
explicit = Struct()
"""Patterns and constants used for explicit markup recognition."""
explicit.patterns = Struct(
target=re.compile(r"""
(
_ # anonymous target
| # *OR*
(?!_) # no underscore at the beginning
(?P<quote>`?) # optional open quote
(?![ `]) # first char. not space or
# backquote
(?P<name> # reference name
.+?
)
%(non_whitespace_escape_before)s
(?P=quote) # close quote if open quote used
)
(?<!(?<!\x00):) # no unescaped colon at end
%(non_whitespace_escape_before)s
[ ]? # optional space
: # end of reference name
([ ]+|$) # followed by whitespace
""" % vars(Inliner), re.VERBOSE),
reference=re.compile(r"""
(
(?P<simple>%(simplename)s)_
| # *OR*
` # open backquote
(?![ ]) # not space
(?P<phrase>.+?) # hyperlink phrase
%(non_whitespace_escape_before)s
`_ # close backquote,
# reference mark
)
$ # end of string
""" % vars(Inliner), re.VERBOSE | re.UNICODE),
substitution=re.compile(r"""
(
(?![ ]) # first char. not space
(?P<name>.+?) # substitution text
%(non_whitespace_escape_before)s
\| # close delimiter
)
([ ]+|$) # followed by whitespace
""" % vars(Inliner), re.VERBOSE),)
def footnote(self, match):
lineno = self.state_machine.abs_line_number()
indented, indent, offset, blank_finish = \
self.state_machine.get_first_known_indented(match.end())
label = match.group(1)
name = normalize_name(label)
footnote = nodes.footnote('\n'.join(indented))
footnote.line = lineno
if name[0] == '#': # auto-numbered
name = name[1:] # autonumber label
footnote['auto'] = 1
if name:
footnote['names'].append(name)
self.document.note_autofootnote(footnote)
elif name == '*': # auto-symbol
name = ''
footnote['auto'] = '*'
self.document.note_symbol_footnote(footnote)
else: # manually numbered
footnote += nodes.label('', label)
footnote['names'].append(name)
self.document.note_footnote(footnote)
if name:
self.document.note_explicit_target(footnote, footnote)
else:
self.document.set_id(footnote, footnote)
if indented:
self.nested_parse(indented, input_offset=offset, node=footnote)
return [footnote], blank_finish
def citation(self, match):
lineno = self.state_machine.abs_line_number()
indented, indent, offset, blank_finish = \
self.state_machine.get_first_known_indented(match.end())
label = match.group(1)
name = normalize_name(label)
citation = nodes.citation('\n'.join(indented))
citation.line = lineno
citation += nodes.label('', label)
citation['names'].append(name)
self.document.note_citation(citation)
self.document.note_explicit_target(citation, citation)
if indented:
self.nested_parse(indented, input_offset=offset, node=citation)
return [citation], blank_finish
def hyperlink_target(self, match):
pattern = self.explicit.patterns.target
lineno = self.state_machine.abs_line_number()
block, indent, offset, blank_finish = \
self.state_machine.get_first_known_indented(
match.end(), until_blank=1, strip_indent=0)
blocktext = match.string[:match.end()] + '\n'.join(block)
block = [escape2null(line) for line in block]
escaped = block[0]
blockindex = 0
while 1:
targetmatch = pattern.match(escaped)
if targetmatch:
break
blockindex += 1
try:
escaped += block[blockindex]
except IndexError:
raise MarkupError('malformed hyperlink target.', lineno)
del block[:blockindex]
block[0] = (block[0] + ' ')[targetmatch.end()-len(escaped)-1:].strip()
target = self.make_target(block, blocktext, lineno,
targetmatch.group('name'))
return [target], blank_finish
def make_target(self, block, block_text, lineno, target_name):
target_type, data = self.parse_target(block, block_text, lineno)
if target_type == 'refname':
target = nodes.target(block_text, '', refname=normalize_name(data))
target.indirect_reference_name = data
self.add_target(target_name, '', target, lineno)
self.document.note_indirect_target(target)
return target
elif target_type == 'refuri':
target = nodes.target(block_text, '')
self.add_target(target_name, data, target, lineno)
return target
else:
return data
def parse_target(self, block, block_text, lineno):
"""
Determine the type of reference of a target.
:Return: A 2-tuple, one of:
- 'refname' and the indirect reference name
- 'refuri' and the URI
- 'malformed' and a system_message node
"""
if block and block[-1].strip()[-1:] == '_': # possible indirect target
reference = ' '.join([line.strip() for line in block])
refname = self.is_reference(reference)
if refname:
return 'refname', refname
reference = ''.join([''.join(line.split()) for line in block])
return 'refuri', unescape(reference)
def is_reference(self, reference):
match = self.explicit.patterns.reference.match(
whitespace_normalize_name(reference))
if not match:
return None
return unescape(match.group('simple') or match.group('phrase'))
def add_target(self, targetname, refuri, target, lineno):
target.line = lineno
if targetname:
name = normalize_name(unescape(targetname))
target['names'].append(name)
if refuri:
uri = self.inliner.adjust_uri(refuri)
if uri:
target['refuri'] = uri
else:
raise ApplicationError('problem with URI: %r' % refuri)
self.document.note_explicit_target(target, self.parent)
else: # anonymous target
if refuri:
target['refuri'] = refuri
target['anonymous'] = 1
self.document.note_anonymous_target(target)
def substitution_def(self, match):
pattern = self.explicit.patterns.substitution
lineno = self.state_machine.abs_line_number()
block, indent, offset, blank_finish = \
self.state_machine.get_first_known_indented(match.end(),
strip_indent=0)
blocktext = (match.string[:match.end()] + '\n'.join(block))
block.disconnect()
escaped = escape2null(block[0].rstrip())
blockindex = 0
while 1:
subdefmatch = pattern.match(escaped)
if subdefmatch:
break
blockindex += 1
try:
escaped = escaped + ' ' + escape2null(block[blockindex].strip())
except IndexError:
raise MarkupError('malformed substitution definition.',
lineno)
del block[:blockindex] # strip out the substitution marker
block[0] = (block[0].strip() + ' ')[subdefmatch.end()-len(escaped)-1:-1]
if not block[0]:
del block[0]
offset += 1
while block and not block[-1].strip():
block.pop()
subname = subdefmatch.group('name')
substitution_node = nodes.substitution_definition(blocktext)
substitution_node.line = lineno
if not block:
msg = self.reporter.warning(
'Substitution definition "%s" missing contents.' % subname,
nodes.literal_block(blocktext, blocktext), line=lineno)
return [msg], blank_finish
block[0] = block[0].strip()
substitution_node['names'].append(
nodes.whitespace_normalize_name(subname))
new_abs_offset, blank_finish = self.nested_list_parse(
block, input_offset=offset, node=substitution_node,
initial_state='SubstitutionDef', blank_finish=blank_finish)
i = 0
for node in substitution_node[:]:
if not (isinstance(node, nodes.Inline) or
isinstance(node, nodes.Text)):
self.parent += substitution_node[i]
del substitution_node[i]
else:
i += 1
for node in substitution_node.traverse(nodes.Element):
if self.disallowed_inside_substitution_definitions(node):
pformat = nodes.literal_block('', node.pformat().rstrip())
msg = self.reporter.error(
'Substitution definition contains illegal element:',
pformat, nodes.literal_block(blocktext, blocktext),
line=lineno)
return [msg], blank_finish
if len(substitution_node) == 0:
msg = self.reporter.warning(
'Substitution definition "%s" empty or invalid.'
% subname,
nodes.literal_block(blocktext, blocktext), line=lineno)
return [msg], blank_finish
self.document.note_substitution_def(
substitution_node, subname, self.parent)
return [substitution_node], blank_finish
def disallowed_inside_substitution_definitions(self, node):
if (node['ids'] or
isinstance(node, nodes.reference) and node.get('anonymous') or
isinstance(node, nodes.footnote_reference) and node.get('auto')):
return 1
else:
return 0
def directive(self, match, **option_presets):
"""Returns a 2-tuple: list of nodes, and a "blank finish" boolean."""
type_name = match.group(1)
directive_class, messages = directives.directive(
type_name, self.memo.language, self.document)
self.parent += messages
if directive_class:
return self.run_directive(
directive_class, match, type_name, option_presets)
else:
return self.unknown_directive(type_name)
def run_directive(self, directive, match, type_name, option_presets):
"""
Parse a directive then run its directive function.
Parameters:
- `directive`: The class implementing the directive. Must be
a subclass of `rst.Directive`.
- `match`: A regular expression match object which matched the first
line of the directive.
- `type_name`: The directive name, as used in the source text.
- `option_presets`: A dictionary of preset options, defaults for the
directive options. Currently, only an "alt" option is passed by
substitution definitions (value: the substitution name), which may
be used by an embedded image directive.
Returns a 2-tuple: list of nodes, and a "blank finish" boolean.
"""
if isinstance(directive, (FunctionType, MethodType)):
from docutils.parsers.rst import convert_directive_function
directive = convert_directive_function(directive)
lineno = self.state_machine.abs_line_number()
initial_line_offset = self.state_machine.line_offset
indented, indent, line_offset, blank_finish \
= self.state_machine.get_first_known_indented(match.end(),
strip_top=0)
block_text = '\n'.join(self.state_machine.input_lines[
initial_line_offset : self.state_machine.line_offset + 1])
try:
arguments, options, content, content_offset = (
self.parse_directive_block(indented, line_offset,
directive, option_presets))
except MarkupError, detail:
error = self.reporter.error(
'Error in "%s" directive:\n%s.' % (type_name,
' '.join(detail.args)),
nodes.literal_block(block_text, block_text), line=lineno)
return [error], blank_finish
directive_instance = directive(
type_name, arguments, options, content, lineno,
content_offset, block_text, self, self.state_machine)
try:
result = directive_instance.run()
except docutils.parsers.rst.DirectiveError, error:
msg_node = self.reporter.system_message(error.level, error.msg,
source=error.source, line=error.line)
msg_node += nodes.literal_block(block_text, block_text)
msg_node['line'] = lineno
result = [msg_node]
assert isinstance(result, list), \
'Directive "%s" must return a list of nodes.' % type_name
for i in range(len(result)):
assert isinstance(result[i], nodes.Node), \
('Directive "%s" returned non-Node object (index %s): %r'
% (type_name, i, result[i]))
return (result,
blank_finish or self.state_machine.is_next_line_blank())
def parse_directive_block(self, indented, line_offset, directive,
option_presets):
option_spec = directive.option_spec
has_content = directive.has_content
if indented and not indented[0].strip():
indented.trim_start()
line_offset += 1
while indented and not indented[-1].strip():
indented.trim_end()
if indented and (directive.required_arguments
or directive.optional_arguments
or option_spec):
for i in range(len(indented)):
if not indented[i].strip():
break
else:
i += 1
arg_block = indented[:i]
content = indented[i+1:]
content_offset = line_offset + i + 1
else:
content = indented
content_offset = line_offset
arg_block = []
while content and not content[0].strip():
content.trim_start()
content_offset += 1
if option_spec:
options, arg_block = self.parse_directive_options(
option_presets, option_spec, arg_block)
if arg_block and not (directive.required_arguments
or directive.optional_arguments):
raise MarkupError('no arguments permitted; blank line '
'required before content block')
else:
options = {}
if directive.required_arguments or directive.optional_arguments:
arguments = self.parse_directive_arguments(
directive, arg_block)
else:
arguments = []
if content and not has_content:
raise MarkupError('no content permitted')
return (arguments, options, content, content_offset)
def parse_directive_options(self, option_presets, option_spec, arg_block):
options = option_presets.copy()
for i in range(len(arg_block)):
if arg_block[i][:1] == ':':
opt_block = arg_block[i:]
arg_block = arg_block[:i]
break
else:
opt_block = []
if opt_block:
success, data = self.parse_extension_options(option_spec,
opt_block)
if success: # data is a dict of options
options.update(data)
else: # data is an error string
raise MarkupError(data)
return options, arg_block
def parse_directive_arguments(self, directive, arg_block):
required = directive.required_arguments
optional = directive.optional_arguments
arg_text = '\n'.join(arg_block)
arguments = arg_text.split()
if len(arguments) < required:
raise MarkupError('%s argument(s) required, %s supplied'
% (required, len(arguments)))
elif len(arguments) > required + optional:
if directive.final_argument_whitespace:
arguments = arg_text.split(None, required + optional - 1)
else:
raise MarkupError(
'maximum %s argument(s) allowed, %s supplied'
% (required + optional, len(arguments)))
return arguments
def parse_extension_options(self, option_spec, datalines):
"""
Parse `datalines` for a field list containing extension options
matching `option_spec`.
:Parameters:
- `option_spec`: a mapping of option name to conversion
function, which should raise an exception on bad input.
- `datalines`: a list of input strings.
:Return:
- Success value, 1 or 0.
- An option dictionary on success, an error string on failure.
"""
node = nodes.field_list()
newline_offset, blank_finish = self.nested_list_parse(
datalines, 0, node, initial_state='ExtensionOptions',
blank_finish=1)
if newline_offset != len(datalines): # incomplete parse of block
return 0, 'invalid option block'
try:
options = utils.extract_extension_options(node, option_spec)
except KeyError, detail:
return 0, ('unknown option: "%s"' % detail.args[0])
except (ValueError, TypeError), detail:
return 0, ('invalid option value: %s' % ' '.join(detail.args))
except utils.ExtensionOptionError, detail:
return 0, ('invalid option data: %s' % ' '.join(detail.args))
if blank_finish:
return 1, options
else:
return 0, 'option data incompletely parsed'
def unknown_directive(self, type_name):
lineno = self.state_machine.abs_line_number()
indented, indent, offset, blank_finish = \
self.state_machine.get_first_known_indented(0, strip_indent=0)
text = '\n'.join(indented)
error = self.reporter.error(
'Unknown directive type "%s".' % type_name,
nodes.literal_block(text, text), line=lineno)
return [error], blank_finish
def comment(self, match):
if not match.string[match.end():].strip() \
and self.state_machine.is_next_line_blank(): # an empty comment?
return [nodes.comment()], 1 # "A tiny but practical wart."
indented, indent, offset, blank_finish = \
self.state_machine.get_first_known_indented(match.end())
while indented and not indented[-1].strip():
indented.trim_end()
text = '\n'.join(indented)
return [nodes.comment(text, text)], blank_finish
explicit.constructs = [
(footnote,
re.compile(r"""
\.\.[ ]+ # explicit markup start
\[
( # footnote label:
[0-9]+ # manually numbered footnote
| # *OR*
\# # anonymous auto-numbered footnote
| # *OR*
\#%s # auto-number ed?) footnote label
| # *OR*
\* # auto-symbol footnote
)
\]
([ ]+|$) # whitespace or end of line
""" % Inliner.simplename, re.VERBOSE | re.UNICODE)),
(citation,
re.compile(r"""
\.\.[ ]+ # explicit markup start
\[(%s)\] # citation label
([ ]+|$) # whitespace or end of line
""" % Inliner.simplename, re.VERBOSE | re.UNICODE)),
(hyperlink_target,
re.compile(r"""
\.\.[ ]+ # explicit markup start
_ # target indicator
(?![ ]|$) # first char. not space or EOL
""", re.VERBOSE)),
(substitution_def,
re.compile(r"""
\.\.[ ]+ # explicit markup start
\| # substitution indicator
(?![ ]|$) # first char. not space or EOL
""", re.VERBOSE)),
(directive,
re.compile(r"""
\.\.[ ]+ # explicit markup start
(%s) # directive name
[ ]? # optional space
:: # directive delimiter
([ ]+|$) # whitespace or end of line
""" % Inliner.simplename, re.VERBOSE | re.UNICODE))]
def explicit_markup(self, match, context, next_state):
"""Footnotes, hyperlink targets, directives, comments."""
nodelist, blank_finish = self.explicit_construct(match)
self.parent += nodelist
self.explicit_list(blank_finish)
return [], next_state, []
def explicit_construct(self, match):
"""Determine which explicit construct this is, parse & return it."""
errors = []
for method, pattern in self.explicit.constructs:
expmatch = pattern.match(match.string)
if expmatch:
try:
return method(self, expmatch)
except MarkupError, error: # never reached?
message, lineno = error.args
errors.append(self.reporter.warning(message, line=lineno))
break
nodelist, blank_finish = self.comment(match)
return nodelist + errors, blank_finish
def explicit_list(self, blank_finish):
"""
Create a nested state machine for a series of explicit markup
constructs (including anonymous hyperlink targets).
"""
offset = self.state_machine.line_offset + 1 # next line
newline_offset, blank_finish = self.nested_list_parse(
self.state_machine.input_lines[offset:],
input_offset=self.state_machine.abs_line_offset() + 1,
node=self.parent, initial_state='Explicit',
blank_finish=blank_finish,
match_titles=self.state_machine.match_titles)
self.goto_line(newline_offset)
if not blank_finish:
self.parent += self.unindent_warning('Explicit markup')
def anonymous(self, match, context, next_state):
"""Anonymous hyperlink targets."""
nodelist, blank_finish = self.anonymous_target(match)
self.parent += nodelist
self.explicit_list(blank_finish)
return [], next_state, []
def anonymous_target(self, match):
lineno = self.state_machine.abs_line_number()
block, indent, offset, blank_finish \
= self.state_machine.get_first_known_indented(match.end(),
until_blank=1)
blocktext = match.string[:match.end()] + '\n'.join(block)
block = [escape2null(line) for line in block]
target = self.make_target(block, blocktext, lineno, '')
return [target], blank_finish
def line(self, match, context, next_state):
"""Section title overline or transition marker."""
if self.state_machine.match_titles:
return [match.string], 'Line', []
elif match.string.strip() == '::':
raise statemachine.TransitionCorrection('text')
elif len(match.string.strip()) < 4:
msg = self.reporter.info(
'Unexpected possible title overline or transition.\n'
"Treating it as ordinary text because it's so short.",
line=self.state_machine.abs_line_number())
self.parent += msg
raise statemachine.TransitionCorrection('text')
else:
blocktext = self.state_machine.line
msg = self.reporter.severe(
'Unexpected section title or transition.',
nodes.literal_block(blocktext, blocktext),
line=self.state_machine.abs_line_number())
self.parent += msg
return [], next_state, []
def text(self, match, context, next_state):
"""Titles, definition lists, paragraphs."""
return [match.string], 'Text', []
class RFC2822Body(Body):
"""
RFC2822 headers are only valid as the first constructs in documents. As
soon as anything else appears, the `Body` state should take over.
"""
patterns = Body.patterns.copy() # can't modify the original
patterns['rfc2822'] = r'[!-9;-~]+:( +|$)'
initial_transitions = [(name, 'Body')
for name in Body.initial_transitions]
initial_transitions.insert(-1, ('rfc2822', 'Body')) # just before 'text'
def rfc2822(self, match, context, next_state):
"""RFC2822-style field list item."""
fieldlist = nodes.field_list(classes=['rfc2822'])
self.parent += fieldlist
field, blank_finish = self.rfc2822_field(match)
fieldlist += field
offset = self.state_machine.line_offset + 1 # next line
newline_offset, blank_finish = self.nested_list_parse(
self.state_machine.input_lines[offset:],
input_offset=self.state_machine.abs_line_offset() + 1,
node=fieldlist, initial_state='RFC2822List',
blank_finish=blank_finish)
self.goto_line(newline_offset)
if not blank_finish:
self.parent += self.unindent_warning(
'RFC2822-style field list')
return [], next_state, []
def rfc2822_field(self, match):
name = match.string[:match.string.find(':')]
indented, indent, line_offset, blank_finish = \
self.state_machine.get_first_known_indented(match.end(),
until_blank=1)
fieldnode = nodes.field()
fieldnode += nodes.field_name(name, name)
fieldbody = nodes.field_body('\n'.join(indented))
fieldnode += fieldbody
if indented:
self.nested_parse(indented, input_offset=line_offset,
node=fieldbody)
return fieldnode, blank_finish
class SpecializedBody(Body):
"""
Superclass for second and subsequent compound element members. Compound
elements are lists and list-like constructs.
All transition methods are disabled (redefined as `invalid_input`).
Override individual methods in subclasses to re-enable.
For example, once an initial bullet list item, say, is recognized, the
`BulletList` subclass takes over, with a "bullet_list" node as its
container. Upon encountering the initial bullet list item, `Body.bullet`
calls its ``self.nested_list_parse`` (`RSTState.nested_list_parse`), which
starts up a nested parsing session with `BulletList` as the initial state.
Only the ``bullet`` transition method is enabled in `BulletList`; as long
as only bullet list items are encountered, they are parsed and inserted
into the container. The first construct which is *not* a bullet list item
triggers the `invalid_input` method, which ends the nested parse and
closes the container. `BulletList` needs to recognize input that is
invalid in the context of a bullet list, which means everything *other
than* bullet list items, so it inherits the transition list created in
`Body`.
"""
def invalid_input(self, match=None, context=None, next_state=None):
"""Not a compound element member. Abort this state machine."""
self.state_machine.previous_line() # back up so parent SM can reassess
raise EOFError
indent = invalid_input
bullet = invalid_input
enumerator = invalid_input
field_marker = invalid_input
option_marker = invalid_input
doctest = invalid_input
line_block = invalid_input
grid_table_top = invalid_input
simple_table_top = invalid_input
explicit_markup = invalid_input
anonymous = invalid_input
line = invalid_input
text = invalid_input
class BulletList(SpecializedBody):
"""Second and subsequent bullet_list list_items."""
def bullet(self, match, context, next_state):
"""Bullet list item."""
if match.string[0] != self.parent['bullet']:
# different bullet: new list
self.invalid_input()
listitem, blank_finish = self.list_item(match.end())
self.parent += listitem
self.blank_finish = blank_finish
return [], next_state, []
class DefinitionList(SpecializedBody):
"""Second and subsequent definition_list_items."""
def text(self, match, context, next_state):
"""Definition lists."""
return [match.string], 'Definition', []
class EnumeratedList(SpecializedBody):
"""Second and subsequent enumerated_list list_items."""
def enumerator(self, match, context, next_state):
"""Enumerated list item."""
format, sequence, text, ordinal = self.parse_enumerator(
match, self.parent['enumtype'])
if ( format != self.format
or (sequence != '#' and (sequence != self.parent['enumtype']
or self.auto
or ordinal != (self.lastordinal + 1)))
or not self.is_enumerated_list_item(ordinal, sequence, format)):
# different enumeration: new list
self.invalid_input()
if sequence == '#':
self.auto = 1
listitem, blank_finish = self.list_item(match.end())
self.parent += listitem
self.blank_finish = blank_finish
self.lastordinal = ordinal
return [], next_state, []
class FieldList(SpecializedBody):
"""Second and subsequent field_list fields."""
def field_marker(self, match, context, next_state):
"""Field list field."""
field, blank_finish = self.field(match)
self.parent += field
self.blank_finish = blank_finish
return [], next_state, []
class OptionList(SpecializedBody):
"""Second and subsequent option_list option_list_items."""
def option_marker(self, match, context, next_state):
"""Option list item."""
try:
option_list_item, blank_finish = self.option_list_item(match)
except MarkupError, (message, lineno):
self.invalid_input()
self.parent += option_list_item
self.blank_finish = blank_finish
return [], next_state, []
class RFC2822List(SpecializedBody, RFC2822Body):
"""Second and subsequent RFC2822-style field_list fields."""
patterns = RFC2822Body.patterns
initial_transitions = RFC2822Body.initial_transitions
def rfc2822(self, match, context, next_state):
"""RFC2822-style field list item."""
field, blank_finish = self.rfc2822_field(match)
self.parent += field
self.blank_finish = blank_finish
return [], 'RFC2822List', []
blank = SpecializedBody.invalid_input
class ExtensionOptions(FieldList):
"""
Parse field_list fields for extension options.
No nested parsing is done (including inline markup parsing).
"""
def parse_field_body(self, indented, offset, node):
"""Override `Body.parse_field_body` for simpler parsing."""
lines = []
for line in list(indented) + ['']:
if line.strip():
lines.append(line)
elif lines:
text = '\n'.join(lines)
node += nodes.paragraph(text, text)
lines = []
class LineBlock(SpecializedBody):
"""Second and subsequent lines of a line_block."""
blank = SpecializedBody.invalid_input
def line_block(self, match, context, next_state):
"""New line of line block."""
lineno = self.state_machine.abs_line_number()
line, messages, blank_finish = self.line_block_line(match, lineno)
self.parent += line
self.parent.parent += messages
self.blank_finish = blank_finish
return [], next_state, []
class Explicit(SpecializedBody):
"""Second and subsequent explicit markup construct."""
def explicit_markup(self, match, context, next_state):
"""Footnotes, hyperlink targets, directives, comments."""
nodelist, blank_finish = self.explicit_construct(match)
self.parent += nodelist
self.blank_finish = blank_finish
return [], next_state, []
def anonymous(self, match, context, next_state):
"""Anonymous hyperlink targets."""
nodelist, blank_finish = self.anonymous_target(match)
self.parent += nodelist
self.blank_finish = blank_finish
return [], next_state, []
blank = SpecializedBody.invalid_input
class SubstitutionDef(Body):
"""
Parser for the contents of a substitution_definition element.
"""
patterns = {
'embedded_directive': re.compile(r'(%s)::( +|$)'
% Inliner.simplename, re.UNICODE),
'text': r''}
initial_transitions = ['embedded_directive', 'text']
def embedded_directive(self, match, context, next_state):
nodelist, blank_finish = self.directive(match,
alt=self.parent['names'][0])
self.parent += nodelist
if not self.state_machine.at_eof():
self.blank_finish = blank_finish
raise EOFError
def text(self, match, context, next_state):
if not self.state_machine.at_eof():
self.blank_finish = self.state_machine.is_next_line_blank()
raise EOFError
class Text(RSTState):
"""
Classifier of second line of a text block.
Could be a paragraph, a definition list item, or a title.
"""
patterns = {'underline': Body.patterns['line'],
'text': r''}
initial_transitions = [('underline', 'Body'), ('text', 'Body')]
def blank(self, match, context, next_state):
"""End of paragraph."""
paragraph, literalnext = self.paragraph(
context, self.state_machine.abs_line_number() - 1)
self.parent += paragraph
if literalnext:
self.parent += self.literal_block()
return [], 'Body', []
def eof(self, context):
if context:
self.blank(None, context, None)
return []
def indent(self, match, context, next_state):
"""Definition list item."""
definitionlist = nodes.definition_list()
definitionlistitem, blank_finish = self.definition_list_item(context)
definitionlist += definitionlistitem
self.parent += definitionlist
offset = self.state_machine.line_offset + 1 # next line
newline_offset, blank_finish = self.nested_list_parse(
self.state_machine.input_lines[offset:],
input_offset=self.state_machine.abs_line_offset() + 1,
node=definitionlist, initial_state='DefinitionList',
blank_finish=blank_finish, blank_finish_state='Definition')
self.goto_line(newline_offset)
if not blank_finish:
self.parent += self.unindent_warning('Definition list')
return [], 'Body', []
def underline(self, match, context, next_state):
"""Section title."""
lineno = self.state_machine.abs_line_number()
title = context[0].rstrip()
underline = match.string.rstrip()
source = title + '\n' + underline
messages = []
if column_width(title) > len(underline):
if len(underline) < 4:
if self.state_machine.match_titles:
msg = self.reporter.info(
'Possible title underline, too short for the title.\n'
"Treating it as ordinary text because it's so short.",
line=lineno)
self.parent += msg
raise statemachine.TransitionCorrection('text')
else:
blocktext = context[0] + '\n' + self.state_machine.line
msg = self.reporter.warning(
'Title underline too short.',
nodes.literal_block(blocktext, blocktext), line=lineno)
messages.append(msg)
if not self.state_machine.match_titles:
blocktext = context[0] + '\n' + self.state_machine.line
msg = self.reporter.severe(
'Unexpected section title.',
nodes.literal_block(blocktext, blocktext), line=lineno)
self.parent += messages
self.parent += msg
return [], next_state, []
style = underline[0]
context[:] = []
self.section(title, source, style, lineno - 1, messages)
return [], next_state, []
def text(self, match, context, next_state):
"""Paragraph."""
startline = self.state_machine.abs_line_number() - 1
msg = None
try:
block = self.state_machine.get_text_block(flush_left=1)
except statemachine.UnexpectedIndentationError, instance:
block, source, lineno = instance.args
msg = self.reporter.error('Unexpected indentation.',
source=source, line=lineno)
lines = context + list(block)
paragraph, literalnext = self.paragraph(lines, startline)
self.parent += paragraph
self.parent += msg
if literalnext:
try:
self.state_machine.next_line()
except EOFError:
pass
self.parent += self.literal_block()
return [], next_state, []
def literal_block(self):
"""Return a list of nodes."""
indented, indent, offset, blank_finish = \
self.state_machine.get_indented()
while indented and not indented[-1].strip():
indented.trim_end()
if not indented:
return self.quoted_literal_block()
data = '\n'.join(indented)
literal_block = nodes.literal_block(data, data)
literal_block.line = offset + 1
nodelist = [literal_block]
if not blank_finish:
nodelist.append(self.unindent_warning('Literal block'))
return nodelist
def quoted_literal_block(self):
abs_line_offset = self.state_machine.abs_line_offset()
offset = self.state_machine.line_offset
parent_node = nodes.Element()
new_abs_offset = self.nested_parse(
self.state_machine.input_lines[offset:],
input_offset=abs_line_offset, node=parent_node, match_titles=0,
state_machine_kwargs={'state_classes': (QuotedLiteralBlock,),
'initial_state': 'QuotedLiteralBlock'})
self.goto_line(new_abs_offset)
return parent_node.children
def definition_list_item(self, termline):
indented, indent, line_offset, blank_finish = \
self.state_machine.get_indented()
definitionlistitem = nodes.definition_list_item(
'\n'.join(termline + list(indented)))
lineno = self.state_machine.abs_line_number() - 1
definitionlistitem.line = lineno
termlist, messages = self.term(termline, lineno)
definitionlistitem += termlist
definition = nodes.definition('', *messages)
definitionlistitem += definition
if termline[0][-2:] == '::':
definition += self.reporter.info(
'Blank line missing before literal block (after the "::")? '
'Interpreted as a definition list item.', line=line_offset+1)
self.nested_parse(indented, input_offset=line_offset, node=definition)
return definitionlistitem, blank_finish
classifier_delimiter = re.compile(' +: +')
def term(self, lines, lineno):
"""Return a definition_list's term and optional classifiers."""
assert len(lines) == 1
text_nodes, messages = self.inline_text(lines[0], lineno)
term_node = nodes.term()
node_list = [term_node]
for i in range(len(text_nodes)):
node = text_nodes[i]
if isinstance(node, nodes.Text):
parts = self.classifier_delimiter.split(node.rawsource)
if len(parts) == 1:
node_list[-1] += node
else:
node_list[-1] += nodes.Text(parts[0].rstrip())
for part in parts[1:]:
classifier_node = nodes.classifier('', part)
node_list.append(classifier_node)
else:
node_list[-1] += node
return node_list, messages
class SpecializedText(Text):
"""
Superclass for second and subsequent lines of Text-variants.
All transition methods are disabled. Override individual methods in
subclasses to re-enable.
"""
def eof(self, context):
"""Incomplete construct."""
return []
def invalid_input(self, match=None, context=None, next_state=None):
"""Not a compound element member. Abort this state machine."""
raise EOFError
blank = invalid_input
indent = invalid_input
underline = invalid_input
text = invalid_input
class Definition(SpecializedText):
"""Second line of potential definition_list_item."""
def eof(self, context):
"""Not a definition."""
self.state_machine.previous_line(2) # so parent SM can reassess
return []
def indent(self, match, context, next_state):
"""Definition list item."""
definitionlistitem, blank_finish = self.definition_list_item(context)
self.parent += definitionlistitem
self.blank_finish = blank_finish
return [], 'DefinitionList', []
class Line(SpecializedText):
"""
Second line of over- & underlined section title or transition marker.
"""
eofcheck = 1 # @@@ ???
"""Set to 0 while parsing sections, so that we don't catch the EOF."""
def eof(self, context):
"""Transition marker at end of section or document."""
marker = context[0].strip()
if self.memo.section_bubble_up_kludge:
self.memo.section_bubble_up_kludge = 0
elif len(marker) < 4:
self.state_correction(context)
if self.eofcheck: # ignore EOFError with sections
lineno = self.state_machine.abs_line_number() - 1
transition = nodes.transition(rawsource=context[0])
transition.line = lineno
self.parent += transition
self.eofcheck = 1
return []
def blank(self, match, context, next_state):
"""Transition marker."""
lineno = self.state_machine.abs_line_number() - 1
marker = context[0].strip()
if len(marker) < 4:
self.state_correction(context)
transition = nodes.transition(rawsource=marker)
transition.line = lineno
self.parent += transition
return [], 'Body', []
def text(self, match, context, next_state):
"""Potential over- & underlined title."""
lineno = self.state_machine.abs_line_number() - 1
overline = context[0]
title = match.string
underline = ''
try:
underline = self.state_machine.next_line()
except EOFError:
blocktext = overline + '\n' + title
if len(overline.rstrip()) < 4:
self.short_overline(context, blocktext, lineno, 2)
else:
msg = self.reporter.severe(
'Incomplete section title.',
nodes.literal_block(blocktext, blocktext), line=lineno)
self.parent += msg
return [], 'Body', []
source = '%s\n%s\n%s' % (overline, title, underline)
overline = overline.rstrip()
underline = underline.rstrip()
if not self.transitions['underline'][0].match(underline):
blocktext = overline + '\n' + title + '\n' + underline
if len(overline.rstrip()) < 4:
self.short_overline(context, blocktext, lineno, 2)
else:
msg = self.reporter.severe(
'Missing matching underline for section title overline.',
nodes.literal_block(source, source), line=lineno)
self.parent += msg
return [], 'Body', []
elif overline != underline:
blocktext = overline + '\n' + title + '\n' + underline
if len(overline.rstrip()) < 4:
self.short_overline(context, blocktext, lineno, 2)
else:
msg = self.reporter.severe(
'Title overline & underline mismatch.',
nodes.literal_block(source, source), line=lineno)
self.parent += msg
return [], 'Body', []
title = title.rstrip()
messages = []
if column_width(title) > len(overline):
blocktext = overline + '\n' + title + '\n' + underline
if len(overline.rstrip()) < 4:
self.short_overline(context, blocktext, lineno, 2)
else:
msg = self.reporter.warning(
'Title overline too short.',
nodes.literal_block(source, source), line=lineno)
messages.append(msg)
style = (overline[0], underline[0])
self.eofcheck = 0 # @@@ not sure this is correct
self.section(title.lstrip(), source, style, lineno + 1, messages)
self.eofcheck = 1
return [], 'Body', []
indent = text # indented title
def underline(self, match, context, next_state):
overline = context[0]
blocktext = overline + '\n' + self.state_machine.line
lineno = self.state_machine.abs_line_number() - 1
if len(overline.rstrip()) < 4:
self.short_overline(context, blocktext, lineno, 1)
msg = self.reporter.error(
'Invalid section title or transition marker.',
nodes.literal_block(blocktext, blocktext), line=lineno)
self.parent += msg
return [], 'Body', []
def short_overline(self, context, blocktext, lineno, lines=1):
msg = self.reporter.info(
'Possible incomplete section title.\nTreating the overline as '
"ordinary text because it's so short.", line=lineno)
self.parent += msg
self.state_correction(context, lines)
def state_correction(self, context, lines=1):
self.state_machine.previous_line(lines)
context[:] = []
raise statemachine.StateCorrection('Body', 'text')
class QuotedLiteralBlock(RSTState):
"""
Nested parse handler for quoted (unindented) literal blocks.
Special-purpose. Not for inclusion in `state_classes`.
"""
patterns = {'initial_quoted': r'(%(nonalphanum7bit)s)' % Body.pats,
'text': r''}
initial_transitions = ('initial_quoted', 'text')
def __init__(self, state_machine, debug=0):
RSTState.__init__(self, state_machine, debug)
self.messages = []
self.initial_lineno = None
def blank(self, match, context, next_state):
if context:
raise EOFError
else:
return context, next_state, []
def eof(self, context):
if context:
text = '\n'.join(context)
literal_block = nodes.literal_block(text, text)
literal_block.line = self.initial_lineno
self.parent += literal_block
else:
self.parent += self.reporter.warning(
'Literal block expected; none found.',
line=self.state_machine.abs_line_number())
self.state_machine.previous_line()
self.parent += self.messages
return []
def indent(self, match, context, next_state):
assert context, ('QuotedLiteralBlock.indent: context should not '
'be empty!')
self.messages.append(
self.reporter.error('Unexpected indentation.',
line=self.state_machine.abs_line_number()))
self.state_machine.previous_line()
raise EOFError
def initial_quoted(self, match, context, next_state):
"""Match arbitrary quote character on the first line only."""
self.remove_transition('initial_quoted')
quote = match.string[0]
pattern = re.compile(re.escape(quote))
# New transition matches consistent quotes only:
self.add_transition('quoted',
(pattern, self.quoted, self.__class__.__name__))
self.initial_lineno = self.state_machine.abs_line_number()
return [match.string], next_state, []
def quoted(self, match, context, next_state):
"""Match consistent quotes on subsequent lines."""
context.append(match.string)
return context, next_state, []
def text(self, match, context, next_state):
if context:
self.messages.append(
self.reporter.error('Inconsistent literal block quoting.',
line=self.state_machine.abs_line_number()))
self.state_machine.previous_line()
raise EOFError
state_classes = (Body, BulletList, DefinitionList, EnumeratedList, FieldList,
OptionList, LineBlock, ExtensionOptions, Explicit, Text,
Definition, Line, SubstitutionDef, RFC2822Body, RFC2822List)
"""Standard set of State classes used to start `RSTStateMachine`."""
| {
"repo_name": "rimbalinux/LMD3",
"path": "docutils/parsers/rst/states.py",
"copies": "2",
"size": "129629",
"license": "bsd-3-clause",
"hash": -8870795672387852000,
"line_mean": 41.0947473404,
"line_max": 80,
"alpha_frac": 0.5337848784,
"autogenerated": false,
"ratio": 4.470427975307791,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6004212853707791,
"avg_score": null,
"num_lines": null
} |
"""
This is the ``docutils.parsers.restructuredtext.states`` module, the core of
the reStructuredText parser. It defines the following:
:Classes:
- `RSTStateMachine`: reStructuredText parser's entry point.
- `NestedStateMachine`: recursive StateMachine.
- `RSTState`: reStructuredText State superclass.
- `Inliner`: For parsing inline markup.
- `Body`: Generic classifier of the first line of a block.
- `SpecializedBody`: Superclass for compound element members.
- `BulletList`: Second and subsequent bullet_list list_items
- `DefinitionList`: Second+ definition_list_items.
- `EnumeratedList`: Second+ enumerated_list list_items.
- `FieldList`: Second+ fields.
- `OptionList`: Second+ option_list_items.
- `RFC2822List`: Second+ RFC2822-style fields.
- `ExtensionOptions`: Parses directive option fields.
- `Explicit`: Second+ explicit markup constructs.
- `SubstitutionDef`: For embedded directives in substitution definitions.
- `Text`: Classifier of second line of a text block.
- `SpecializedText`: Superclass for continuation lines of Text-variants.
- `Definition`: Second line of potential definition_list_item.
- `Line`: Second line of overlined section title or transition marker.
- `Struct`: An auxiliary collection class.
:Exception classes:
- `MarkupError`
- `ParserError`
- `MarkupMismatch`
:Functions:
- `escape2null()`: Return a string, escape-backslashes converted to nulls.
- `unescape()`: Return a string, nulls removed or restored to backslashes.
:Attributes:
- `state_classes`: set of State classes used with `RSTStateMachine`.
Parser Overview
===============
The reStructuredText parser is implemented as a recursive state machine,
examining its input one line at a time. To understand how the parser works,
please first become familiar with the `docutils.statemachine` module. In the
description below, references are made to classes defined in this module;
please see the individual classes for details.
Parsing proceeds as follows:
1. The state machine examines each line of input, checking each of the
transition patterns of the state `Body`, in order, looking for a match.
The implicit transitions (blank lines and indentation) are checked before
any others. The 'text' transition is a catch-all (matches anything).
2. The method associated with the matched transition pattern is called.
A. Some transition methods are self-contained, appending elements to the
document tree (`Body.doctest` parses a doctest block). The parser's
current line index is advanced to the end of the element, and parsing
continues with step 1.
B. Other transition methods trigger the creation of a nested state machine,
whose job is to parse a compound construct ('indent' does a block quote,
'bullet' does a bullet list, 'overline' does a section [first checking
for a valid section header], etc.).
- In the case of lists and explicit markup, a one-off state machine is
created and run to parse contents of the first item.
- A new state machine is created and its initial state is set to the
appropriate specialized state (`BulletList` in the case of the
'bullet' transition; see `SpecializedBody` for more detail). This
state machine is run to parse the compound element (or series of
explicit markup elements), and returns as soon as a non-member element
is encountered. For centralfitestoque, the `BulletList` state machine ends as
soon as it encounters an element which is not a list item of that
bullet list. The optional omission of inter-element blank lines is
enabled by this nested state machine.
- The current line index is advanced to the end of the elements parsed,
and parsing continues with step 1.
C. The result of the 'text' transition depends on the next line of text.
The current state is changed to `Text`, under which the second line is
examined. If the second line is:
- Indented: The element is a definition list item, and parsing proceeds
similarly to step 2.B, using the `DefinitionList` state.
- A line of uniform punctuation characters: The element is a section
header; again, parsing proceeds as in step 2.B, and `Body` is still
used.
- Anything else: The element is a paragraph, which is examined for
inline markup and appended to the parent element. Processing
continues with step 1.
"""
__docformat__ = 'reStructuredText'
import sys
import re
import roman
from types import FunctionType, MethodType
from docutils import nodes, statemachine, utils, urischemes
from docutils import ApplicationError, DataError
from docutils.statemachine import StateMachineWS, StateWS
from docutils.nodes import fully_normalize_name as normalize_name
from docutils.nodes import whitespace_normalize_name
from docutils.utils import escape2null, unescape, column_width
import docutils.parsers.rst
from docutils.parsers.rst import directives, languages, tableparser, roles
from docutils.parsers.rst.languages import en as _fallback_language_module
class MarkupError(DataError): pass
class UnknownInterpretedRoleError(DataError): pass
class InterpretedRoleNotImplementedError(DataError): pass
class ParserError(ApplicationError): pass
class MarkupMismatch(Exception): pass
class Struct:
"""Stores data attributes for dotted-attribute access."""
def __init__(self, **keywordargs):
self.__dict__.update(keywordargs)
class RSTStateMachine(StateMachineWS):
"""
reStructuredText's master StateMachine.
The entry point to reStructuredText parsing is the `run()` method.
"""
def run(self, input_lines, document, input_offset=0, match_titles=1,
inliner=None):
"""
Parse `input_lines` and modify the `document` node in place.
Extend `StateMachineWS.run()`: set up parse-global data and
run the StateMachine.
"""
self.language = languages.get_language(
document.settings.language_code)
self.match_titles = match_titles
if inliner is None:
inliner = Inliner()
inliner.init_customizations(document.settings)
self.memo = Struct(document=document,
reporter=document.reporter,
language=self.language,
title_styles=[],
section_level=0,
section_bubble_up_kludge=0,
inliner=inliner)
self.document = document
self.attach_observer(document.note_source)
self.reporter = self.memo.reporter
self.node = document
results = StateMachineWS.run(self, input_lines, input_offset,
input_source=document['source'])
assert results == [], 'RSTStateMachine.run() results should be empty!'
self.node = self.memo = None # remove unneeded references
class NestedStateMachine(StateMachineWS):
"""
StateMachine run from within other StateMachine runs, to parse nested
document structures.
"""
def run(self, input_lines, input_offset, memo, node, match_titles=1):
"""
Parse `input_lines` and populate a `docutils.nodes.document` instance.
Extend `StateMachineWS.run()`: set up document-wide data.
"""
self.match_titles = match_titles
self.memo = memo
self.document = memo.document
self.attach_observer(self.document.note_source)
self.reporter = memo.reporter
self.language = memo.language
self.node = node
results = StateMachineWS.run(self, input_lines, input_offset)
assert results == [], ('NestedStateMachine.run() results should be '
'empty!')
return results
class RSTState(StateWS):
"""
reStructuredText State superclass.
Contains methods used by all State subclasses.
"""
nested_sm = NestedStateMachine
nested_sm_cache = []
def __init__(self, state_machine, debug=0):
self.nested_sm_kwargs = {'state_classes': state_classes,
'initial_state': 'Body'}
StateWS.__init__(self, state_machine, debug)
def runtime_init(self):
StateWS.runtime_init(self)
memo = self.state_machine.memo
self.memo = memo
self.reporter = memo.reporter
self.inliner = memo.inliner
self.document = memo.document
self.parent = self.state_machine.node
# enable the reporter to determine source and source-line
if not hasattr(self.reporter, 'locator'):
self.reporter.locator = self.state_machine.get_source_and_line
# print "adding locator to reporter", self.state_machine.input_offset
def goto_line(self, abs_line_offset):
"""
Jump to input line `abs_line_offset`, ignoring jumps past the end.
"""
try:
self.state_machine.goto_line(abs_line_offset)
except EOFError:
pass
def no_match(self, context, transitions):
"""
Override `StateWS.no_match` to generate a system message.
This code should never be run.
"""
src, srcline = self.state_machine.get_source_and_line()
self.reporter.severe(
'Internal error: no transition pattern match. State: "%s"; '
'transitions: %s; context: %s; current line: %r.'
% (self.__class__.__name__, transitions, context,
self.state_machine.line),
source=src, line=srcline)
return context, None, []
def bof(self, context):
"""Called at beginning of file."""
return [], []
def nested_parse(self, block, input_offset, node, match_titles=0,
state_machine_class=None, state_machine_kwargs=None):
"""
Create a new StateMachine rooted at `node` and run it over the input
`block`.
"""
use_default = 0
if state_machine_class is None:
state_machine_class = self.nested_sm
use_default += 1
if state_machine_kwargs is None:
state_machine_kwargs = self.nested_sm_kwargs
use_default += 1
block_length = len(block)
state_machine = None
if use_default == 2:
try:
state_machine = self.nested_sm_cache.pop()
except IndexError:
pass
if not state_machine:
state_machine = state_machine_class(debug=self.debug,
**state_machine_kwargs)
state_machine.run(block, input_offset, memo=self.memo,
node=node, match_titles=match_titles)
if use_default == 2:
self.nested_sm_cache.append(state_machine)
else:
state_machine.unlink()
new_offset = state_machine.abs_line_offset()
# No `block.parent` implies disconnected -- lines aren't in sync:
if block.parent and (len(block) - block_length) != 0:
# Adjustment for block if modified in nested parse:
self.state_machine.next_line(len(block) - block_length)
return new_offset
def nested_list_parse(self, block, input_offset, node, initial_state,
blank_finish,
blank_finish_state=None,
extra_settings={},
match_titles=0,
state_machine_class=None,
state_machine_kwargs=None):
"""
Create a new StateMachine rooted at `node` and run it over the input
`block`. Also keep track of optional intermediate blank lines and the
required final one.
"""
if state_machine_class is None:
state_machine_class = self.nested_sm
if state_machine_kwargs is None:
state_machine_kwargs = self.nested_sm_kwargs.copy()
state_machine_kwargs['initial_state'] = initial_state
state_machine = state_machine_class(debug=self.debug,
**state_machine_kwargs)
if blank_finish_state is None:
blank_finish_state = initial_state
state_machine.states[blank_finish_state].blank_finish = blank_finish
for key, value in extra_settings.items():
setattr(state_machine.states[initial_state], key, value)
state_machine.run(block, input_offset, memo=self.memo,
node=node, match_titles=match_titles)
blank_finish = state_machine.states[blank_finish_state].blank_finish
state_machine.unlink()
return state_machine.abs_line_offset(), blank_finish
def section(self, title, source, style, lineno, messages):
"""Check for a valid subsection and create one if it checks out."""
if self.check_subsection(source, style, lineno):
self.new_subsection(title, lineno, messages)
def check_subsection(self, source, style, lineno):
"""
Check for a valid subsection header. Return 1 (true) or None (false).
When a new section is reached that isn't a subsection of the current
section, back up the line count (use ``previous_line(-x)``), then
``raise EOFError``. The current StateMachine will finish, then the
calling StateMachine can re-examine the title. This will work its way
back up the calling chain until the correct section level isreached.
@@@ Alternative: Evaluate the title, store the title info & level, and
back up the chain until that level is reached. Store in memo? Or
return in results?
:Exception: `EOFError` when a sibling or supersection encountered.
"""
memo = self.memo
title_styles = memo.title_styles
mylevel = memo.section_level
try: # check for existing title style
level = title_styles.index(style) + 1
except ValueError: # new title style
if len(title_styles) == memo.section_level: # new subsection
title_styles.append(style)
return 1
else: # not at lowest level
self.parent += self.title_inconsistent(source, lineno)
return None
if level <= mylevel: # sibling or supersection
memo.section_level = level # bubble up to parent section
if len(style) == 2:
memo.section_bubble_up_kludge = 1
# back up 2 lines for underline title, 3 for overline title
self.state_machine.previous_line(len(style) + 1)
raise EOFError # let parent section re-evaluate
if level == mylevel + 1: # immediate subsection
return 1
else: # invalid subsection
self.parent += self.title_inconsistent(source, lineno)
return None
def title_inconsistent(self, sourcetext, lineno):
src, srcline = self.state_machine.get_source_and_line(lineno)
error = self.reporter.severe(
'Title level inconsistent:', nodes.literal_block('', sourcetext),
source=src, line=srcline)
return error
def new_subsection(self, title, lineno, messages):
"""Append new subsection to document tree. On return, check level."""
memo = self.memo
mylevel = memo.section_level
memo.section_level += 1
section_node = nodes.section()
self.parent += section_node
textnodes, title_messages = self.inline_text(title, lineno)
titlenode = nodes.title(title, '', *textnodes)
name = normalize_name(titlenode.astext())
section_node['names'].append(name)
section_node += titlenode
section_node += messages
section_node += title_messages
self.document.note_implicit_target(section_node, section_node)
offset = self.state_machine.line_offset + 1
absoffset = self.state_machine.abs_line_offset() + 1
newabsoffset = self.nested_parse(
self.state_machine.input_lines[offset:], input_offset=absoffset,
node=section_node, match_titles=1)
self.goto_line(newabsoffset)
if memo.section_level <= mylevel: # can't handle next section?
raise EOFError # bubble up to supersection
# reset section_level; next pass will detect it properly
memo.section_level = mylevel
def paragraph(self, lines, lineno):
"""
Return a list (paragraph & messages) & a boolean: literal_block next?
"""
data = '\n'.join(lines).rstrip()
if re.search(r'(?<!\\)(\\\\)*::$', data):
if len(data) == 2:
return [], 1
elif data[-3] in ' \n':
text = data[:-3].rstrip()
else:
text = data[:-1]
literalnext = 1
else:
text = data
literalnext = 0
textnodes, messages = self.inline_text(text, lineno)
p = nodes.paragraph(data, '', *textnodes)
p.source, p.line = self.state_machine.get_source_and_line(lineno)
return [p] + messages, literalnext
def inline_text(self, text, lineno):
"""
Return 2 lists: nodes (text and inline elements), and system_messages.
"""
return self.inliner.parse(text, lineno, self.memo, self.parent)
def unindent_warning(self, node_name):
# the actual problem is one line below the current line
src, srcline = self.state_machine.get_source_and_line()
return self.reporter.warning('%s ends without a blank line; '
'unexpected unindent.' % node_name,
source=src, line=srcline+1)
def build_regexp(definition, compile=1):
"""
Build, compile and return a regular expression based on `definition`.
:Parameter: `definition`: a 4-tuple (group name, prefix, suffix, parts),
where "parts" is a list of regular expressions and/or regular
expression definitions to be joined into an or-group.
"""
name, prefix, suffix, parts = definition
part_strings = []
for part in parts:
if type(part) is tuple:
part_strings.append(build_regexp(part, None))
else:
part_strings.append(part)
or_group = '|'.join(part_strings)
regexp = '%(prefix)s(?P<%(name)s>%(or_group)s)%(suffix)s' % locals()
if compile:
return re.compile(regexp, re.UNICODE)
else:
return regexp
class Inliner:
"""
Parse inline markup; call the `parse()` method.
"""
def __init__(self):
self.implicit_dispatch = [(self.patterns.uri, self.standalone_uri),]
"""List of (pattern, bound method) tuples, used by
`self.implicit_inline`."""
def init_customizations(self, settings):
"""Setting-based customizations; run when parsing begins."""
if settings.pep_references:
self.implicit_dispatch.append((self.patterns.pep,
self.pep_reference))
if settings.rfc_references:
self.implicit_dispatch.append((self.patterns.rfc,
self.rfc_reference))
def parse(self, text, lineno, memo, parent):
# Needs to be refactored for nested inline markup.
# Add nested_parse() method?
"""
Return 2 lists: nodes (text and inline elements), and system_messages.
Using `self.patterns.initial`, a pattern which matches start-strings
(emphasis, strong, interpreted, phrase reference, literal,
substitution reference, and inline target) and complete constructs
(simple reference, footnote reference), search for a candidate. When
one is found, check for validity (e.g., not a quoted '*' character).
If valid, search for the corresponding end string if applicable, and
check it for validity. If not found or invalid, generate a warning
and ignore the start-string. Implicit inline markup (e.g. standalone
URIs) is found last.
"""
self.reporter = memo.reporter
self.document = memo.document
self.language = memo.language
self.parent = parent
pattern_search = self.patterns.initial.search
dispatch = self.dispatch
remaining = escape2null(text)
processed = []
unprocessed = []
messages = []
while remaining:
match = pattern_search(remaining)
if match:
groups = match.groupdict()
method = dispatch[groups['start'] or groups['backquote']
or groups['refend'] or groups['fnend']]
before, inlines, remaining, sysmessages = method(self, match,
lineno)
unprocessed.append(before)
messages += sysmessages
if inlines:
processed += self.implicit_inline(''.join(unprocessed),
lineno)
processed += inlines
unprocessed = []
else:
break
remaining = ''.join(unprocessed) + remaining
if remaining:
processed += self.implicit_inline(remaining, lineno)
return processed, messages
openers = u'\'"([{<\u2018\u201c\xab\u00a1\u00bf' # see quoted_start below
closers = u'\'")]}>\u2019\u201d\xbb!?'
unicode_delimiters = u'\u2010\u2011\u2012\u2013\u2014\u00a0'
start_string_prefix = (u'((?<=^)|(?<=[-/: \\n\u2019%s%s]))'
% (re.escape(unicode_delimiters),
re.escape(openers)))
end_string_suffix = (r'((?=$)|(?=[-/:.,; \n\x00%s%s]))'
% (re.escape(unicode_delimiters),
re.escape(closers)))
non_whitespace_before = r'(?<![ \n])'
non_whitespace_escape_before = r'(?<![ \n\x00])'
non_whitespace_after = r'(?![ \n])'
# Alphanumerics with isolated internal [-._+:] chars (i.e. not 2 together):
simplename = r'(?:(?!_)\w)+(?:[-._+:](?:(?!_)\w)+)*'
# Valid URI characters (see RFC 2396 & RFC 2732);
# final \x00 allows backslash escapes in URIs:
uric = r"""[-_.!~*'()[\];/:@&=+$,%a-zA-Z0-9\x00]"""
# Delimiter indicating the end of a URI (not part of the URI):
uri_end_delim = r"""[>]"""
# Last URI character; same as uric but no punctuation:
urilast = r"""[_~*/=+a-zA-Z0-9]"""
# End of a URI (either 'urilast' or 'uric followed by a
# uri_end_delim'):
uri_end = r"""(?:%(urilast)s|%(uric)s(?=%(uri_end_delim)s))""" % locals()
emailc = r"""[-_!~*'{|}/#?^`&=+$%a-zA-Z0-9\x00]"""
email_pattern = r"""
%(emailc)s+(?:\.%(emailc)s+)* # name
(?<!\x00)@ # at
%(emailc)s+(?:\.%(emailc)s*)* # host
%(uri_end)s # final URI char
"""
parts = ('initial_inline', start_string_prefix, '',
[('start', '', non_whitespace_after, # simple start-strings
[r'\*\*', # strong
r'\*(?!\*)', # emphasis but not strong
r'``', # literal
r'_`', # inline internal target
r'\|(?!\|)'] # substitution reference
),
('whole', '', end_string_suffix, # whole constructs
[# reference name & end-string
r'(?P<refname>%s)(?P<refend>__?)' % simplename,
('footnotelabel', r'\[', r'(?P<fnend>\]_)',
[r'[0-9]+', # manually numbered
r'\#(%s)?' % simplename, # auto-numbered (w/ label?)
r'\*', # auto-symbol
r'(?P<citationlabel>%s)' % simplename] # citation reference
)
]
),
('backquote', # interpreted text or phrase reference
'(?P<role>(:%s:)?)' % simplename, # optional role
non_whitespace_after,
['`(?!`)'] # but not literal
)
]
)
patterns = Struct(
initial=build_regexp(parts),
emphasis=re.compile(non_whitespace_escape_before
+ r'(\*)' + end_string_suffix),
strong=re.compile(non_whitespace_escape_before
+ r'(\*\*)' + end_string_suffix),
interpreted_or_phrase_ref=re.compile(
r"""
%(non_whitespace_escape_before)s
(
`
(?P<suffix>
(?P<role>:%(simplename)s:)?
(?P<refend>__?)?
)
)
%(end_string_suffix)s
""" % locals(), re.VERBOSE | re.UNICODE),
embedded_uri=re.compile(
r"""
(
(?:[ \n]+|^) # spaces or beginning of line/string
< # open bracket
%(non_whitespace_after)s
([^<>\x00]+) # anything but angle brackets & nulls
%(non_whitespace_before)s
> # close bracket w/o whitespace before
)
$ # end of string
""" % locals(), re.VERBOSE),
literal=re.compile(non_whitespace_before + '(``)'
+ end_string_suffix),
target=re.compile(non_whitespace_escape_before
+ r'(`)' + end_string_suffix),
substitution_ref=re.compile(non_whitespace_escape_before
+ r'(\|_{0,2})'
+ end_string_suffix),
email=re.compile(email_pattern % locals() + '$', re.VERBOSE),
uri=re.compile(
(r"""
%(start_string_prefix)s
(?P<whole>
(?P<absolute> # absolute URI
(?P<scheme> # scheme (http, ftp, mailto)
[a-zA-Z][a-zA-Z0-9.+-]*
)
:
(
( # either:
(//?)? # hierarchical URI
%(uric)s* # URI characters
%(uri_end)s # final URI char
)
( # optional query
\?%(uric)s*
%(uri_end)s
)?
( # optional fragment
\#%(uric)s*
%(uri_end)s
)?
)
)
| # *OR*
(?P<email> # email address
""" + email_pattern + r"""
)
)
%(end_string_suffix)s
""") % locals(), re.VERBOSE),
pep=re.compile(
r"""
%(start_string_prefix)s
(
(pep-(?P<pepnum1>\d+)(.txt)?) # reference to source file
|
(PEP\s+(?P<pepnum2>\d+)) # reference by name
)
%(end_string_suffix)s""" % locals(), re.VERBOSE),
rfc=re.compile(
r"""
%(start_string_prefix)s
(RFC(-|\s+)?(?P<rfcnum>\d+))
%(end_string_suffix)s""" % locals(), re.VERBOSE))
def quoted_start(self, match):
"""Return 1 if inline markup start-string is 'quoted', 0 if not."""
string = match.string
start = match.start()
end = match.end()
if start == 0: # start-string at beginning of text
return 0
prestart = string[start - 1]
try:
poststart = string[end]
if self.openers.index(prestart) \
== self.closers.index(poststart): # quoted
return 1
except IndexError: # start-string at end of text
return 1
except ValueError: # not quoted
pass
return 0
def inline_obj(self, match, lineno, end_pattern, nodeclass,
restore_backslashes=0):
string = match.string
matchstart = match.start('start')
matchend = match.end('start')
if self.quoted_start(match):
return (string[:matchend], [], string[matchend:], [], '')
endmatch = end_pattern.search(string[matchend:])
if endmatch and endmatch.start(1): # 1 or more chars
text = unescape(endmatch.string[:endmatch.start(1)],
restore_backslashes)
textend = matchend + endmatch.end(1)
rawsource = unescape(string[matchstart:textend], 1)
return (string[:matchstart], [nodeclass(rawsource, text)],
string[textend:], [], endmatch.group(1))
msg = self.reporter.warning(
'Inline %s start-string without end-string.'
% nodeclass.__name__, line=lineno)
text = unescape(string[matchstart:matchend], 1)
rawsource = unescape(string[matchstart:matchend], 1)
prb = self.problematic(text, rawsource, msg)
return string[:matchstart], [prb], string[matchend:], [msg], ''
def problematic(self, text, rawsource, message):
msgid = self.document.set_id(message, self.parent)
problematic = nodes.problematic(rawsource, text, refid=msgid)
prbid = self.document.set_id(problematic)
message.add_backref(prbid)
return problematic
def emphasis(self, match, lineno):
before, inlines, remaining, sysmessages, endstring = self.inline_obj(
match, lineno, self.patterns.emphasis, nodes.emphasis)
return before, inlines, remaining, sysmessages
def strong(self, match, lineno):
before, inlines, remaining, sysmessages, endstring = self.inline_obj(
match, lineno, self.patterns.strong, nodes.strong)
return before, inlines, remaining, sysmessages
def interpreted_or_phrase_ref(self, match, lineno):
end_pattern = self.patterns.interpreted_or_phrase_ref
string = match.string
matchstart = match.start('backquote')
matchend = match.end('backquote')
rolestart = match.start('role')
role = match.group('role')
position = ''
if role:
role = role[1:-1]
position = 'prefix'
elif self.quoted_start(match):
return (string[:matchend], [], string[matchend:], [])
endmatch = end_pattern.search(string[matchend:])
if endmatch and endmatch.start(1): # 1 or more chars
textend = matchend + endmatch.end()
if endmatch.group('role'):
if role:
msg = self.reporter.warning(
'Multiple roles in interpreted text (both '
'prefix and suffix present; only one allowed).',
line=lineno)
text = unescape(string[rolestart:textend], 1)
prb = self.problematic(text, text, msg)
return string[:rolestart], [prb], string[textend:], [msg]
role = endmatch.group('suffix')[1:-1]
position = 'suffix'
escaped = endmatch.string[:endmatch.start(1)]
rawsource = unescape(string[matchstart:textend], 1)
if rawsource[-1:] == '_':
if role:
msg = self.reporter.warning(
'Mismatch: both interpreted text role %s and '
'reference suffix.' % position, line=lineno)
text = unescape(string[rolestart:textend], 1)
prb = self.problematic(text, text, msg)
return string[:rolestart], [prb], string[textend:], [msg]
return self.phrase_ref(string[:matchstart], string[textend:],
rawsource, escaped, unescape(escaped))
else:
rawsource = unescape(string[rolestart:textend], 1)
nodelist, messages = self.interpreted(rawsource, escaped, role,
lineno)
return (string[:rolestart], nodelist,
string[textend:], messages)
msg = self.reporter.warning(
'Inline interpreted text or phrase reference start-string '
'without end-string.', line=lineno)
text = unescape(string[matchstart:matchend], 1)
prb = self.problematic(text, text, msg)
return string[:matchstart], [prb], string[matchend:], [msg]
def phrase_ref(self, before, after, rawsource, escaped, text):
match = self.patterns.embedded_uri.search(escaped)
if match:
text = unescape(escaped[:match.start(0)])
uri_text = match.group(2)
uri = ''.join(uri_text.split())
uri = self.adjust_uri(uri)
if uri:
target = nodes.target(match.group(1), refuri=uri)
else:
raise ApplicationError('problem with URI: %r' % uri_text)
if not text:
text = uri
else:
target = None
refname = normalize_name(text)
reference = nodes.reference(rawsource, text,
name=whitespace_normalize_name(text))
node_list = [reference]
if rawsource[-2:] == '__':
if target:
reference['refuri'] = uri
else:
reference['anonymous'] = 1
else:
if target:
reference['refuri'] = uri
target['names'].append(refname)
self.document.note_explicit_target(target, self.parent)
node_list.append(target)
else:
reference['refname'] = refname
self.document.note_refname(reference)
return before, node_list, after, []
def adjust_uri(self, uri):
match = self.patterns.email.match(uri)
if match:
return 'mailto:' + uri
else:
return uri
def interpreted(self, rawsource, text, role, lineno):
role_fn, messages = roles.role(role, self.language, lineno,
self.reporter)
if role_fn:
nodes, messages2 = role_fn(role, rawsource, text, lineno, self)
return nodes, messages + messages2
else:
msg = self.reporter.error(
'Unknown interpreted text role "%s".' % role,
line=lineno)
return ([self.problematic(rawsource, rawsource, msg)],
messages + [msg])
def literal(self, match, lineno):
before, inlines, remaining, sysmessages, endstring = self.inline_obj(
match, lineno, self.patterns.literal, nodes.literal,
restore_backslashes=1)
return before, inlines, remaining, sysmessages
def inline_internal_target(self, match, lineno):
before, inlines, remaining, sysmessages, endstring = self.inline_obj(
match, lineno, self.patterns.target, nodes.target)
if inlines and isinstance(inlines[0], nodes.target):
assert len(inlines) == 1
target = inlines[0]
name = normalize_name(target.astext())
target['names'].append(name)
self.document.note_explicit_target(target, self.parent)
return before, inlines, remaining, sysmessages
def substitution_reference(self, match, lineno):
before, inlines, remaining, sysmessages, endstring = self.inline_obj(
match, lineno, self.patterns.substitution_ref,
nodes.substitution_reference)
if len(inlines) == 1:
subref_node = inlines[0]
if isinstance(subref_node, nodes.substitution_reference):
subref_text = subref_node.astext()
self.document.note_substitution_ref(subref_node, subref_text)
if endstring[-1:] == '_':
reference_node = nodes.reference(
'|%s%s' % (subref_text, endstring), '')
if endstring[-2:] == '__':
reference_node['anonymous'] = 1
else:
reference_node['refname'] = normalize_name(subref_text)
self.document.note_refname(reference_node)
reference_node += subref_node
inlines = [reference_node]
return before, inlines, remaining, sysmessages
def footnote_reference(self, match, lineno):
"""
Handles `nodes.footnote_reference` and `nodes.citation_reference`
elements.
"""
label = match.group('footnotelabel')
refname = normalize_name(label)
string = match.string
before = string[:match.start('whole')]
remaining = string[match.end('whole'):]
if match.group('citationlabel'):
refnode = nodes.citation_reference('[%s]_' % label,
refname=refname)
refnode += nodes.Text(label)
self.document.note_citation_ref(refnode)
else:
refnode = nodes.footnote_reference('[%s]_' % label)
if refname[0] == '#':
refname = refname[1:]
refnode['auto'] = 1
self.document.note_autofootnote_ref(refnode)
elif refname == '*':
refname = ''
refnode['auto'] = '*'
self.document.note_symbol_footnote_ref(
refnode)
else:
refnode += nodes.Text(label)
if refname:
refnode['refname'] = refname
self.document.note_footnote_ref(refnode)
if utils.get_trim_footnote_ref_space(self.document.settings):
before = before.rstrip()
return (before, [refnode], remaining, [])
def reference(self, match, lineno, anonymous=None):
referencename = match.group('refname')
refname = normalize_name(referencename)
referencenode = nodes.reference(
referencename + match.group('refend'), referencename,
name=whitespace_normalize_name(referencename))
if anonymous:
referencenode['anonymous'] = 1
else:
referencenode['refname'] = refname
self.document.note_refname(referencenode)
string = match.string
matchstart = match.start('whole')
matchend = match.end('whole')
return (string[:matchstart], [referencenode], string[matchend:], [])
def anonymous_reference(self, match, lineno):
return self.reference(match, lineno, anonymous=1)
def standalone_uri(self, match, lineno):
if (not match.group('scheme')
or match.group('scheme').lower() in urischemes.schemes):
if match.group('email'):
addscheme = 'mailto:'
else:
addscheme = ''
text = match.group('whole')
unescaped = unescape(text, 0)
return [nodes.reference(unescape(text, 1), unescaped,
refuri=addscheme + unescaped)]
else: # not a valid scheme
raise MarkupMismatch
def pep_reference(self, match, lineno):
text = match.group(0)
if text.startswith('pep-'):
pepnum = int(match.group('pepnum1'))
elif text.startswith('PEP'):
pepnum = int(match.group('pepnum2'))
else:
raise MarkupMismatch
ref = (self.document.settings.pep_base_url
+ self.document.settings.pep_file_url_template % pepnum)
unescaped = unescape(text, 0)
return [nodes.reference(unescape(text, 1), unescaped, refuri=ref)]
rfc_url = 'rfc%d.html'
def rfc_reference(self, match, lineno):
text = match.group(0)
if text.startswith('RFC'):
rfcnum = int(match.group('rfcnum'))
ref = self.document.settings.rfc_base_url + self.rfc_url % rfcnum
else:
raise MarkupMismatch
unescaped = unescape(text, 0)
return [nodes.reference(unescape(text, 1), unescaped, refuri=ref)]
def implicit_inline(self, text, lineno):
"""
Check each of the patterns in `self.implicit_dispatch` for a match,
and dispatch to the stored method for the pattern. Recursively check
the text before and after the match. Return a list of `nodes.Text`
and inline element nodes.
"""
if not text:
return []
for pattern, method in self.implicit_dispatch:
match = pattern.search(text)
if match:
try:
# Must recurse on strings before *and* after the match;
# there may be multiple patterns.
return (self.implicit_inline(text[:match.start()], lineno)
+ method(match, lineno) +
self.implicit_inline(text[match.end():], lineno))
except MarkupMismatch:
pass
return [nodes.Text(unescape(text), rawsource=unescape(text, 1))]
dispatch = {'*': emphasis,
'**': strong,
'`': interpreted_or_phrase_ref,
'``': literal,
'_`': inline_internal_target,
']_': footnote_reference,
'|': substitution_reference,
'_': reference,
'__': anonymous_reference}
def _loweralpha_to_int(s, _zero=(ord('a')-1)):
return ord(s) - _zero
def _upperalpha_to_int(s, _zero=(ord('A')-1)):
return ord(s) - _zero
def _lowerroman_to_int(s):
return roman.fromRoman(s.upper())
class Body(RSTState):
"""
Generic classifier of the first line of a block.
"""
double_width_pad_char = tableparser.TableParser.double_width_pad_char
"""Padding character for East Asian double-width text."""
enum = Struct()
"""Enumerated list parsing information."""
enum.formatinfo = {
'parens': Struct(prefix='(', suffix=')', start=1, end=-1),
'rparen': Struct(prefix='', suffix=')', start=0, end=-1),
'period': Struct(prefix='', suffix='.', start=0, end=-1)}
enum.formats = enum.formatinfo.keys()
enum.sequences = ['arabic', 'loweralpha', 'upperalpha',
'lowerroman', 'upperroman'] # ORDERED!
enum.sequencepats = {'arabic': '[0-9]+',
'loweralpha': '[a-z]',
'upperalpha': '[A-Z]',
'lowerroman': '[ivxlcdm]+',
'upperroman': '[IVXLCDM]+',}
enum.converters = {'arabic': int,
'loweralpha': _loweralpha_to_int,
'upperalpha': _upperalpha_to_int,
'lowerroman': _lowerroman_to_int,
'upperroman': roman.fromRoman}
enum.sequenceregexps = {}
for sequence in enum.sequences:
enum.sequenceregexps[sequence] = re.compile(
enum.sequencepats[sequence] + '$')
grid_table_top_pat = re.compile(r'\+-[-+]+-\+ *$')
"""Matches the top (& bottom) of a full table)."""
simple_table_top_pat = re.compile('=+( +=+)+ *$')
"""Matches the top of a simple table."""
simple_table_border_pat = re.compile('=+[ =]*$')
"""Matches the bottom & header bottom of a simple table."""
pats = {}
"""Fragments of patterns used by transitions."""
pats['nonalphanum7bit'] = '[!-/:-@[-`{-~]'
pats['alpha'] = '[a-zA-Z]'
pats['alphanum'] = '[a-zA-Z0-9]'
pats['alphanumplus'] = '[a-zA-Z0-9_-]'
pats['enum'] = ('(%(arabic)s|%(loweralpha)s|%(upperalpha)s|%(lowerroman)s'
'|%(upperroman)s|#)' % enum.sequencepats)
pats['optname'] = '%(alphanum)s%(alphanumplus)s*' % pats
# @@@ Loosen up the pattern? Allow Unicode?
pats['optarg'] = '(%(alpha)s%(alphanumplus)s*|<[^<>]+>)' % pats
pats['shortopt'] = r'(-|\+)%(alphanum)s( ?%(optarg)s)?' % pats
pats['longopt'] = r'(--|/)%(optname)s([ =]%(optarg)s)?' % pats
pats['option'] = r'(%(shortopt)s|%(longopt)s)' % pats
for format in enum.formats:
pats[format] = '(?P<%s>%s%s%s)' % (
format, re.escape(enum.formatinfo[format].prefix),
pats['enum'], re.escape(enum.formatinfo[format].suffix))
patterns = {
'bullet': u'[-+*\u2022\u2023\u2043]( +|$)',
'enumerator': r'(%(parens)s|%(rparen)s|%(period)s)( +|$)' % pats,
'field_marker': r':(?![: ])([^:\\]|\\.)*(?<! ):( +|$)',
'option_marker': r'%(option)s(, %(option)s)*( +| ?$)' % pats,
'doctest': r'>>>( +|$)',
'line_block': r'\|( +|$)',
'grid_table_top': grid_table_top_pat,
'simple_table_top': simple_table_top_pat,
'explicit_markup': r'\.\.( +|$)',
'anonymous': r'__( +|$)',
'line': r'(%(nonalphanum7bit)s)\1* *$' % pats,
'text': r''}
initial_transitions = (
'bullet',
'enumerator',
'field_marker',
'option_marker',
'doctest',
'line_block',
'grid_table_top',
'simple_table_top',
'explicit_markup',
'anonymous',
'line',
'text')
def indent(self, match, context, next_state):
"""Block quote."""
indented, indent, line_offset, blank_finish = \
self.state_machine.get_indented()
elements = self.block_quote(indented, line_offset)
self.parent += elements
if not blank_finish:
self.parent += self.unindent_warning('Block quote')
return context, next_state, []
def block_quote(self, indented, line_offset):
elements = []
while indented:
(blockquote_lines,
attribution_lines,
attribution_offset,
indented,
new_line_offset) = self.split_attribution(indented, line_offset)
blockquote = nodes.block_quote()
self.nested_parse(blockquote_lines, line_offset, blockquote)
elements.append(blockquote)
if attribution_lines:
attribution, messages = self.parse_attribution(
attribution_lines, attribution_offset)
blockquote += attribution
elements += messages
line_offset = new_line_offset
while indented and not indented[0]:
indented = indented[1:]
line_offset += 1
return elements
# U+2014 is an em-dash:
attribution_pattern = re.compile(u'(---?(?!-)|\u2014) *(?=[^ \\n])')
def split_attribution(self, indented, line_offset):
"""
Check for a block quote attribution and split it off:
* First line after a blank line must begin with a dash ("--", "---",
em-dash; matches `self.attribution_pattern`).
* Every line after that must have consistent indentation.
* Attributions must be preceded by block quote content.
Return a tuple of: (block quote content lines, content offset,
attribution lines, attribution offset, remaining indented lines).
"""
blank = None
nonblank_seen = False
for i in range(len(indented)):
line = indented[i].rstrip()
if line:
if nonblank_seen and blank == i - 1: # last line blank
match = self.attribution_pattern.match(line)
if match:
attribution_end, indent = self.check_attribution(
indented, i)
if attribution_end:
a_lines = indented[i:attribution_end]
a_lines.trim_left(match.end(), end=1)
a_lines.trim_left(indent, start=1)
return (indented[:i], a_lines,
i, indented[attribution_end:],
line_offset + attribution_end)
nonblank_seen = True
else:
blank = i
else:
return (indented, None, None, None, None)
def check_attribution(self, indented, attribution_start):
"""
Check attribution shape.
Return the index past the end of the attribution, and the indent.
"""
indent = None
i = attribution_start + 1
for i in range(attribution_start + 1, len(indented)):
line = indented[i].rstrip()
if not line:
break
if indent is None:
indent = len(line) - len(line.lstrip())
elif len(line) - len(line.lstrip()) != indent:
return None, None # bad shape; not an attribution
else:
# return index of line after last attribution line:
i += 1
return i, (indent or 0)
def parse_attribution(self, indented, line_offset):
text = '\n'.join(indented).rstrip()
lineno = self.state_machine.abs_line_number() + line_offset
textnodes, messages = self.inline_text(text, lineno)
node = nodes.attribution(text, '', *textnodes)
node.line = lineno
# report with source and source-line results in
# ``IndexError: list index out of range``
# node.source, node.line = self.state_machine.get_source_and_line(lineno)
return node, messages
def bullet(self, match, context, next_state):
"""Bullet list item."""
bulletlist = nodes.bullet_list()
self.parent += bulletlist
bulletlist['bullet'] = match.string[0]
i, blank_finish = self.list_item(match.end())
bulletlist += i
offset = self.state_machine.line_offset + 1 # next line
new_line_offset, blank_finish = self.nested_list_parse(
self.state_machine.input_lines[offset:],
input_offset=self.state_machine.abs_line_offset() + 1,
node=bulletlist, initial_state='BulletList',
blank_finish=blank_finish)
self.goto_line(new_line_offset)
if not blank_finish:
self.parent += self.unindent_warning('Bullet list')
return [], next_state, []
def list_item(self, indent):
if self.state_machine.line[indent:]:
indented, line_offset, blank_finish = (
self.state_machine.get_known_indented(indent))
else:
indented, indent, line_offset, blank_finish = (
self.state_machine.get_first_known_indented(indent))
listitem = nodes.list_item('\n'.join(indented))
if indented:
self.nested_parse(indented, input_offset=line_offset,
node=listitem)
return listitem, blank_finish
def enumerator(self, match, context, next_state):
"""Enumerated List Item"""
format, sequence, text, ordinal = self.parse_enumerator(match)
if not self.is_enumerated_list_item(ordinal, sequence, format):
raise statemachine.TransitionCorrection('text')
enumlist = nodes.enumerated_list()
self.parent += enumlist
if sequence == '#':
enumlist['enumtype'] = 'arabic'
else:
enumlist['enumtype'] = sequence
enumlist['prefix'] = self.enum.formatinfo[format].prefix
enumlist['suffix'] = self.enum.formatinfo[format].suffix
if ordinal != 1:
enumlist['start'] = ordinal
src, srcline = self.state_machine.get_source_and_line()
msg = self.reporter.info(
'Enumerated list start value not ordinal-1: "%s" (ordinal %s)'
% (text, ordinal), source=src, line=srcline)
self.parent += msg
listitem, blank_finish = self.list_item(match.end())
enumlist += listitem
offset = self.state_machine.line_offset + 1 # next line
newline_offset, blank_finish = self.nested_list_parse(
self.state_machine.input_lines[offset:],
input_offset=self.state_machine.abs_line_offset() + 1,
node=enumlist, initial_state='EnumeratedList',
blank_finish=blank_finish,
extra_settings={'lastordinal': ordinal,
'format': format,
'auto': sequence == '#'})
self.goto_line(newline_offset)
if not blank_finish:
self.parent += self.unindent_warning('Enumerated list')
return [], next_state, []
def parse_enumerator(self, match, expected_sequence=None):
"""
Analyze an enumerator and return the results.
:Return:
- the enumerator format ('period', 'parens', or 'rparen'),
- the sequence used ('arabic', 'loweralpha', 'upperroman', etc.),
- the text of the enumerator, stripped of formatting, and
- the ordinal value of the enumerator ('a' -> 1, 'ii' -> 2, etc.;
``None`` is returned for invalid enumerator text).
The enumerator format has already been determined by the regular
expression match. If `expected_sequence` is given, that sequence is
tried first. If not, we check for Roman numeral 1. This way,
single-character Roman numerals (which are also alphabetical) can be
matched. If no sequence has been matched, all sequences are checked in
order.
"""
groupdict = match.groupdict()
sequence = ''
for format in self.enum.formats:
if groupdict[format]: # was this the format matched?
break # yes; keep `format`
else: # shouldn't happen
raise ParserError('enumerator format not matched')
text = groupdict[format][self.enum.formatinfo[format].start
:self.enum.formatinfo[format].end]
if text == '#':
sequence = '#'
elif expected_sequence:
try:
if self.enum.sequenceregexps[expected_sequence].match(text):
sequence = expected_sequence
except KeyError: # shouldn't happen
raise ParserError('unknown enumerator sequence: %s'
% sequence)
elif text == 'i':
sequence = 'lowerroman'
elif text == 'I':
sequence = 'upperroman'
if not sequence:
for sequence in self.enum.sequences:
if self.enum.sequenceregexps[sequence].match(text):
break
else: # shouldn't happen
raise ParserError('enumerator sequence not matched')
if sequence == '#':
ordinal = 1
else:
try:
ordinal = self.enum.converters[sequence](text)
except roman.InvalidRomanNumeralError:
ordinal = None
return format, sequence, text, ordinal
def is_enumerated_list_item(self, ordinal, sequence, format):
"""
Check validity based on the ordinal value and the second line.
Return true if the ordinal is valid and the second line is blank,
indented, or starts with the next enumerator or an auto-enumerator.
"""
if ordinal is None:
return None
try:
next_line = self.state_machine.next_line()
except EOFError: # end of input lines
self.state_machine.previous_line()
return 1
else:
self.state_machine.previous_line()
if not next_line[:1].strip(): # blank or indented
return 1
result = self.make_enumerator(ordinal + 1, sequence, format)
if result:
next_enumerator, auto_enumerator = result
try:
if ( next_line.startswith(next_enumerator) or
next_line.startswith(auto_enumerator) ):
return 1
except TypeError:
pass
return None
def make_enumerator(self, ordinal, sequence, format):
"""
Construct and return the next enumerated list item marker, and an
auto-enumerator ("#" instead of the regular enumerator).
Return ``None`` for invalid (out of range) ordinals.
""" #"
if sequence == '#':
enumerator = '#'
elif sequence == 'arabic':
enumerator = str(ordinal)
else:
if sequence.endswith('alpha'):
if ordinal > 26:
return None
enumerator = chr(ordinal + ord('a') - 1)
elif sequence.endswith('roman'):
try:
enumerator = roman.toRoman(ordinal)
except roman.RomanError:
return None
else: # shouldn't happen
raise ParserError('unknown enumerator sequence: "%s"'
% sequence)
if sequence.startswith('lower'):
enumerator = enumerator.lower()
elif sequence.startswith('upper'):
enumerator = enumerator.upper()
else: # shouldn't happen
raise ParserError('unknown enumerator sequence: "%s"'
% sequence)
formatinfo = self.enum.formatinfo[format]
next_enumerator = (formatinfo.prefix + enumerator + formatinfo.suffix
+ ' ')
auto_enumerator = formatinfo.prefix + '#' + formatinfo.suffix + ' '
return next_enumerator, auto_enumerator
def field_marker(self, match, context, next_state):
"""Field list item."""
field_list = nodes.field_list()
self.parent += field_list
field, blank_finish = self.field(match)
field_list += field
offset = self.state_machine.line_offset + 1 # next line
newline_offset, blank_finish = self.nested_list_parse(
self.state_machine.input_lines[offset:],
input_offset=self.state_machine.abs_line_offset() + 1,
node=field_list, initial_state='FieldList',
blank_finish=blank_finish)
self.goto_line(newline_offset)
if not blank_finish:
self.parent += self.unindent_warning('Field list')
return [], next_state, []
def field(self, match):
name = self.parse_field_marker(match)
src, srcline = self.state_machine.get_source_and_line()
lineno = self.state_machine.abs_line_number()
indented, indent, line_offset, blank_finish = \
self.state_machine.get_first_known_indented(match.end())
field_node = nodes.field()
field_node.source = src
field_node.line = srcline
name_nodes, name_messages = self.inline_text(name, lineno)
field_node += nodes.field_name(name, '', *name_nodes)
field_body = nodes.field_body('\n'.join(indented), *name_messages)
field_node += field_body
if indented:
self.parse_field_body(indented, line_offset, field_body)
return field_node, blank_finish
def parse_field_marker(self, match):
"""Extract & return field name from a field marker match."""
field = match.group()[1:] # strip off leading ':'
field = field[:field.rfind(':')] # strip off trailing ':' etc.
return field
def parse_field_body(self, indented, offset, node):
self.nested_parse(indented, input_offset=offset, node=node)
def option_marker(self, match, context, next_state):
"""Option list item."""
optionlist = nodes.option_list()
try:
listitem, blank_finish = self.option_list_item(match)
except MarkupError, error:
# This shouldn't happen; pattern won't match.
src, srcline = self.state_machine.get_source_and_line()
msg = self.reporter.error('Invalid option list marker: %s' %
str(error), source=src, line=srcline)
self.parent += msg
indented, indent, line_offset, blank_finish = \
self.state_machine.get_first_known_indented(match.end())
elements = self.block_quote(indented, line_offset)
self.parent += elements
if not blank_finish:
self.parent += self.unindent_warning('Option list')
return [], next_state, []
self.parent += optionlist
optionlist += listitem
offset = self.state_machine.line_offset + 1 # next line
newline_offset, blank_finish = self.nested_list_parse(
self.state_machine.input_lines[offset:],
input_offset=self.state_machine.abs_line_offset() + 1,
node=optionlist, initial_state='OptionList',
blank_finish=blank_finish)
self.goto_line(newline_offset)
if not blank_finish:
self.parent += self.unindent_warning('Option list')
return [], next_state, []
def option_list_item(self, match):
offset = self.state_machine.abs_line_offset()
options = self.parse_option_marker(match)
indented, indent, line_offset, blank_finish = \
self.state_machine.get_first_known_indented(match.end())
if not indented: # not an option list item
self.goto_line(offset)
raise statemachine.TransitionCorrection('text')
option_group = nodes.option_group('', *options)
description = nodes.description('\n'.join(indented))
option_list_item = nodes.option_list_item('', option_group,
description)
if indented:
self.nested_parse(indented, input_offset=line_offset,
node=description)
return option_list_item, blank_finish
def parse_option_marker(self, match):
"""
Return a list of `node.option` and `node.option_argument` objects,
parsed from an option marker match.
:Exception: `MarkupError` for invalid option markers.
"""
optlist = []
optionstrings = match.group().rstrip().split(', ')
for optionstring in optionstrings:
tokens = optionstring.split()
delimiter = ' '
firstopt = tokens[0].split('=')
if len(firstopt) > 1:
# "--opt=value" form
tokens[:1] = firstopt
delimiter = '='
elif (len(tokens[0]) > 2
and ((tokens[0].startswith('-')
and not tokens[0].startswith('--'))
or tokens[0].startswith('+'))):
# "-ovalue" form
tokens[:1] = [tokens[0][:2], tokens[0][2:]]
delimiter = ''
if len(tokens) > 1 and (tokens[1].startswith('<')
and tokens[-1].endswith('>')):
# "-o <value1 value2>" form; join all values into one token
tokens[1:] = [' '.join(tokens[1:])]
if 0 < len(tokens) <= 2:
option = nodes.option(optionstring)
option += nodes.option_string(tokens[0], tokens[0])
if len(tokens) > 1:
option += nodes.option_argument(tokens[1], tokens[1],
delimiter=delimiter)
optlist.append(option)
else:
raise MarkupError(
'wrong number of option tokens (=%s), should be 1 or 2: '
'"%s"' % (len(tokens), optionstring))
return optlist
def doctest(self, match, context, next_state):
data = '\n'.join(self.state_machine.get_text_block())
self.parent += nodes.doctest_block(data, data)
return [], next_state, []
def line_block(self, match, context, next_state):
"""First line of a line block."""
block = nodes.line_block()
self.parent += block
lineno = self.state_machine.abs_line_number()
line, messages, blank_finish = self.line_block_line(match, lineno)
block += line
self.parent += messages
if not blank_finish:
offset = self.state_machine.line_offset + 1 # next line
new_line_offset, blank_finish = self.nested_list_parse(
self.state_machine.input_lines[offset:],
input_offset=self.state_machine.abs_line_offset() + 1,
node=block, initial_state='LineBlock',
blank_finish=0)
self.goto_line(new_line_offset)
if not blank_finish:
src, srcline = self.state_machine.get_source_and_line()
self.parent += self.reporter.warning(
'Line block ends without a blank line.',
source=src, line=srcline+1)
if len(block):
if block[0].indent is None:
block[0].indent = 0
self.nest_line_block_lines(block)
return [], next_state, []
def line_block_line(self, match, lineno):
"""Return one line element of a line_block."""
indented, indent, line_offset, blank_finish = \
self.state_machine.get_first_known_indented(match.end(),
until_blank=1)
text = u'\n'.join(indented)
text_nodes, messages = self.inline_text(text, lineno)
line = nodes.line(text, '', *text_nodes)
if match.string.rstrip() != '|': # not empty
line.indent = len(match.group(1)) - 1
return line, messages, blank_finish
def nest_line_block_lines(self, block):
for index in range(1, len(block)):
if block[index].indent is None:
block[index].indent = block[index - 1].indent
self.nest_line_block_segment(block)
def nest_line_block_segment(self, block):
indents = [item.indent for item in block]
least = min(indents)
new_items = []
new_block = nodes.line_block()
for item in block:
if item.indent > least:
new_block.append(item)
else:
if len(new_block):
self.nest_line_block_segment(new_block)
new_items.append(new_block)
new_block = nodes.line_block()
new_items.append(item)
if len(new_block):
self.nest_line_block_segment(new_block)
new_items.append(new_block)
block[:] = new_items
def grid_table_top(self, match, context, next_state):
"""Top border of a full table."""
return self.table_top(match, context, next_state,
self.isolate_grid_table,
tableparser.GridTableParser)
def simple_table_top(self, match, context, next_state):
"""Top border of a simple table."""
return self.table_top(match, context, next_state,
self.isolate_simple_table,
tableparser.SimpleTableParser)
def table_top(self, match, context, next_state,
isolate_function, parser_class):
"""Top border of a generic table."""
nodelist, blank_finish = self.table(isolate_function, parser_class)
self.parent += nodelist
if not blank_finish:
src, srcline = self.state_machine.get_source_and_line()
msg = self.reporter.warning(
'Blank line required after table.',
source=src, line=srcline+1)
self.parent += msg
return [], next_state, []
def table(self, isolate_function, parser_class):
"""Parse a table."""
block, messages, blank_finish = isolate_function()
if block:
try:
parser = parser_class()
tabledata = parser.parse(block)
tableline = (self.state_machine.abs_line_number() - len(block)
+ 1)
table = self.build_table(tabledata, tableline)
nodelist = [table] + messages
except tableparser.TableMarkupError, detail:
nodelist = self.malformed_table(
block, ' '.join(detail.args)) + messages
else:
nodelist = messages
return nodelist, blank_finish
def isolate_grid_table(self):
messages = []
blank_finish = 1
try:
block = self.state_machine.get_text_block(flush_left=1)
except statemachine.UnexpectedIndentationError, instance:
block, src, srcline = instance.args
messages.append(self.reporter.error('Unexpected indentation.',
source=src, line=srcline))
blank_finish = 0
block.disconnect()
# for East Asian chars:
block.pad_double_width(self.double_width_pad_char)
width = len(block[0].strip())
for i in range(len(block)):
block[i] = block[i].strip()
if block[i][0] not in '+|': # check left edge
blank_finish = 0
self.state_machine.previous_line(len(block) - i)
del block[i:]
break
if not self.grid_table_top_pat.match(block[-1]): # find bottom
blank_finish = 0
# from second-last to third line of table:
for i in range(len(block) - 2, 1, -1):
if self.grid_table_top_pat.match(block[i]):
self.state_machine.previous_line(len(block) - i + 1)
del block[i+1:]
break
else:
messages.extend(self.malformed_table(block))
return [], messages, blank_finish
for i in range(len(block)): # check right edge
if len(block[i]) != width or block[i][-1] not in '+|':
messages.extend(self.malformed_table(block))
return [], messages, blank_finish
return block, messages, blank_finish
def isolate_simple_table(self):
start = self.state_machine.line_offset
lines = self.state_machine.input_lines
limit = len(lines) - 1
toplen = len(lines[start].strip())
pattern_match = self.simple_table_border_pat.match
found = 0
found_at = None
i = start + 1
while i <= limit:
line = lines[i]
match = pattern_match(line)
if match:
if len(line.strip()) != toplen:
self.state_machine.next_line(i - start)
messages = self.malformed_table(
lines[start:i+1], 'Bottom/header table border does '
'not match top border.')
return [], messages, i == limit or not lines[i+1].strip()
found += 1
found_at = i
if found == 2 or i == limit or not lines[i+1].strip():
end = i
break
i += 1
else: # reached end of input_lines
if found:
extra = ' or no blank line after table bottom'
self.state_machine.next_line(found_at - start)
block = lines[start:found_at+1]
else:
extra = ''
self.state_machine.next_line(i - start - 1)
block = lines[start:]
messages = self.malformed_table(
block, 'No bottom table border found%s.' % extra)
return [], messages, not extra
self.state_machine.next_line(end - start)
block = lines[start:end+1]
# for East Asian chars:
block.pad_double_width(self.double_width_pad_char)
return block, [], end == limit or not lines[end+1].strip()
def malformed_table(self, block, detail=''):
block.replace(self.double_width_pad_char, '')
data = '\n'.join(block)
message = 'Malformed table.'
startline = self.state_machine.abs_line_number() - len(block) + 1
src, srcline = self.state_machine.get_source_and_line(startline)
if detail:
message += '\n' + detail
error = self.reporter.error(message, nodes.literal_block(data, data),
source=src, line=srcline)
return [error]
def build_table(self, tabledata, tableline, stub_columns=0):
colwidths, headrows, bodyrows = tabledata
table = nodes.table()
tgroup = nodes.tgroup(cols=len(colwidths))
table += tgroup
for colwidth in colwidths:
colspec = nodes.colspec(colwidth=colwidth)
if stub_columns:
colspec.attributes['stub'] = 1
stub_columns -= 1
tgroup += colspec
if headrows:
thead = nodes.thead()
tgroup += thead
for row in headrows:
thead += self.build_table_row(row, tableline)
tbody = nodes.tbody()
tgroup += tbody
for row in bodyrows:
tbody += self.build_table_row(row, tableline)
return table
def build_table_row(self, rowdata, tableline):
row = nodes.row()
for cell in rowdata:
if cell is None:
continue
morerows, morecols, offset, cellblock = cell
attributes = {}
if morerows:
attributes['morerows'] = morerows
if morecols:
attributes['morecols'] = morecols
entry = nodes.entry(**attributes)
row += entry
if ''.join(cellblock):
self.nested_parse(cellblock, input_offset=tableline+offset,
node=entry)
return row
explicit = Struct()
"""Patterns and constants used for explicit markup recognition."""
explicit.patterns = Struct(
target=re.compile(r"""
(
_ # anonymous target
| # *OR*
(?!_) # no underscore at the beginning
(?P<quote>`?) # optional open quote
(?![ `]) # first char. not space or
# backquote
(?P<name> # reference name
.+?
)
%(non_whitespace_escape_before)s
(?P=quote) # close quote if open quote used
)
(?<!(?<!\x00):) # no unescaped colon at end
%(non_whitespace_escape_before)s
[ ]? # optional space
: # end of reference name
([ ]+|$) # followed by whitespace
""" % vars(Inliner), re.VERBOSE),
reference=re.compile(r"""
(
(?P<simple>%(simplename)s)_
| # *OR*
` # open backquote
(?![ ]) # not space
(?P<phrase>.+?) # hyperlink phrase
%(non_whitespace_escape_before)s
`_ # close backquote,
# reference mark
)
$ # end of string
""" % vars(Inliner), re.VERBOSE | re.UNICODE),
substitution=re.compile(r"""
(
(?![ ]) # first char. not space
(?P<name>.+?) # substitution text
%(non_whitespace_escape_before)s
\| # close delimiter
)
([ ]+|$) # followed by whitespace
""" % vars(Inliner), re.VERBOSE),)
def footnote(self, match):
src, srcline = self.state_machine.get_source_and_line()
indented, indent, offset, blank_finish = \
self.state_machine.get_first_known_indented(match.end())
label = match.group(1)
name = normalize_name(label)
footnote = nodes.footnote('\n'.join(indented))
footnote.source = src
footnote.line = srcline
if name[0] == '#': # auto-numbered
name = name[1:] # autonumber label
footnote['auto'] = 1
if name:
footnote['names'].append(name)
self.document.note_autofootnote(footnote)
elif name == '*': # auto-symbol
name = ''
footnote['auto'] = '*'
self.document.note_symbol_footnote(footnote)
else: # manually numbered
footnote += nodes.label('', label)
footnote['names'].append(name)
self.document.note_footnote(footnote)
if name:
self.document.note_explicit_target(footnote, footnote)
else:
self.document.set_id(footnote, footnote)
if indented:
self.nested_parse(indented, input_offset=offset, node=footnote)
return [footnote], blank_finish
def citation(self, match):
src, srcline = self.state_machine.get_source_and_line()
indented, indent, offset, blank_finish = \
self.state_machine.get_first_known_indented(match.end())
label = match.group(1)
name = normalize_name(label)
citation = nodes.citation('\n'.join(indented))
citation.source = src
citation.line = srcline
citation += nodes.label('', label)
citation['names'].append(name)
self.document.note_citation(citation)
self.document.note_explicit_target(citation, citation)
if indented:
self.nested_parse(indented, input_offset=offset, node=citation)
return [citation], blank_finish
def hyperlink_target(self, match):
pattern = self.explicit.patterns.target
lineno = self.state_machine.abs_line_number()
src, srcline = self.state_machine.get_source_and_line()
block, indent, offset, blank_finish = \
self.state_machine.get_first_known_indented(
match.end(), until_blank=1, strip_indent=0)
blocktext = match.string[:match.end()] + '\n'.join(block)
block = [escape2null(line) for line in block]
escaped = block[0]
blockindex = 0
while 1:
targetmatch = pattern.match(escaped)
if targetmatch:
break
blockindex += 1
try:
escaped += block[blockindex]
except IndexError:
raise MarkupError('malformed hyperlink target.')
del block[:blockindex]
block[0] = (block[0] + ' ')[targetmatch.end()-len(escaped)-1:].strip()
target = self.make_target(block, blocktext, lineno,
targetmatch.group('name'))
return [target], blank_finish
def make_target(self, block, block_text, lineno, target_name):
target_type, data = self.parse_target(block, block_text, lineno)
if target_type == 'refname':
target = nodes.target(block_text, '', refname=normalize_name(data))
target.indirect_reference_name = data
self.add_target(target_name, '', target, lineno)
self.document.note_indirect_target(target)
return target
elif target_type == 'refuri':
target = nodes.target(block_text, '')
self.add_target(target_name, data, target, lineno)
return target
else:
return data
def parse_target(self, block, block_text, lineno):
"""
Determine the type of reference of a target.
:Return: A 2-tuple, one of:
- 'refname' and the indirect reference name
- 'refuri' and the URI
- 'malformed' and a system_message node
"""
if block and block[-1].strip()[-1:] == '_': # possible indirect target
reference = ' '.join([line.strip() for line in block])
refname = self.is_reference(reference)
if refname:
return 'refname', refname
reference = ''.join([''.join(line.split()) for line in block])
return 'refuri', unescape(reference)
def is_reference(self, reference):
match = self.explicit.patterns.reference.match(
whitespace_normalize_name(reference))
if not match:
return None
return unescape(match.group('simple') or match.group('phrase'))
def add_target(self, targetname, refuri, target, lineno):
target.line = lineno
if targetname:
name = normalize_name(unescape(targetname))
target['names'].append(name)
if refuri:
uri = self.inliner.adjust_uri(refuri)
if uri:
target['refuri'] = uri
else:
raise ApplicationError('problem with URI: %r' % refuri)
self.document.note_explicit_target(target, self.parent)
else: # anonymous target
if refuri:
target['refuri'] = refuri
target['anonymous'] = 1
self.document.note_anonymous_target(target)
def substitution_def(self, match):
pattern = self.explicit.patterns.substitution
src, srcline = self.state_machine.get_source_and_line()
block, indent, offset, blank_finish = \
self.state_machine.get_first_known_indented(match.end(),
strip_indent=0)
blocktext = (match.string[:match.end()] + '\n'.join(block))
block.disconnect()
escaped = escape2null(block[0].rstrip())
blockindex = 0
while 1:
subdefmatch = pattern.match(escaped)
if subdefmatch:
break
blockindex += 1
try:
escaped = escaped + ' ' + escape2null(block[blockindex].strip())
except IndexError:
raise MarkupError('malformed substitution definition.')
del block[:blockindex] # strip out the substitution marker
block[0] = (block[0].strip() + ' ')[subdefmatch.end()-len(escaped)-1:-1]
if not block[0]:
del block[0]
offset += 1
while block and not block[-1].strip():
block.pop()
subname = subdefmatch.group('name')
substitution_node = nodes.substitution_definition(blocktext)
substitution_node.source = src
substitution_node.line = srcline
if not block:
msg = self.reporter.warning(
'Substitution definition "%s" missing contents.' % subname,
nodes.literal_block(blocktext, blocktext),
source=src, line=srcline)
return [msg], blank_finish
block[0] = block[0].strip()
substitution_node['names'].append(
nodes.whitespace_normalize_name(subname))
new_abs_offset, blank_finish = self.nested_list_parse(
block, input_offset=offset, node=substitution_node,
initial_state='SubstitutionDef', blank_finish=blank_finish)
i = 0
for node in substitution_node[:]:
if not (isinstance(node, nodes.Inline) or
isinstance(node, nodes.Text)):
self.parent += substitution_node[i]
del substitution_node[i]
else:
i += 1
for node in substitution_node.traverse(nodes.Element):
if self.disallowed_inside_substitution_definitions(node):
pformat = nodes.literal_block('', node.pformat().rstrip())
msg = self.reporter.error(
'Substitution definition contains illegal element:',
pformat, nodes.literal_block(blocktext, blocktext),
source=src, line=srcline)
return [msg], blank_finish
if len(substitution_node) == 0:
msg = self.reporter.warning(
'Substitution definition "%s" empty or invalid.' % subname,
nodes.literal_block(blocktext, blocktext),
source=src, line=srcline)
return [msg], blank_finish
self.document.note_substitution_def(
substitution_node, subname, self.parent)
return [substitution_node], blank_finish
def disallowed_inside_substitution_definitions(self, node):
if (node['ids'] or
isinstance(node, nodes.reference) and node.get('anonymous') or
isinstance(node, nodes.footnote_reference) and node.get('auto')):
return 1
else:
return 0
def directive(self, match, **option_presets):
"""Returns a 2-tuple: list of nodes, and a "blank finish" boolean."""
type_name = match.group(1)
directive_class, messages = directives.directive(
type_name, self.memo.language, self.document)
self.parent += messages
if directive_class:
return self.run_directive(
directive_class, match, type_name, option_presets)
else:
return self.unknown_directive(type_name)
def run_directive(self, directive, match, type_name, option_presets):
"""
Parse a directive then run its directive function.
Parameters:
- `directive`: The class implementing the directive. Must be
a subclass of `rst.Directive`.
- `match`: A regular expression match object which matched the first
line of the directive.
- `type_name`: The directive name, as used in the source text.
- `option_presets`: A dictionary of preset options, defaults for the
directive options. Currently, only an "alt" option is passed by
substitution definitions (value: the substitution name), which may
be used by an embedded image directive.
Returns a 2-tuple: list of nodes, and a "blank finish" boolean.
"""
if isinstance(directive, (FunctionType, MethodType)):
from docutils.parsers.rst import convert_directive_function
directive = convert_directive_function(directive)
lineno = self.state_machine.abs_line_number()
src, srcline = self.state_machine.get_source_and_line()
initial_line_offset = self.state_machine.line_offset
indented, indent, line_offset, blank_finish \
= self.state_machine.get_first_known_indented(match.end(),
strip_top=0)
block_text = '\n'.join(self.state_machine.input_lines[
initial_line_offset : self.state_machine.line_offset + 1])
try:
arguments, options, content, content_offset = (
self.parse_directive_block(indented, line_offset,
directive, option_presets))
except MarkupError, detail:
error = self.reporter.error(
'Error in "%s" directive:\n%s.' % (type_name,
' '.join(detail.args)),
nodes.literal_block(block_text, block_text),
source=src, line=srcline)
return [error], blank_finish
directive_instance = directive(
type_name, arguments, options, content, lineno,
content_offset, block_text, self, self.state_machine)
try:
result = directive_instance.run()
except docutils.parsers.rst.DirectiveError, error:
msg_node = self.reporter.system_message(error.level, error.msg,
source=src, line=srcline)
msg_node += nodes.literal_block(block_text, block_text)
result = [msg_node]
assert isinstance(result, list), \
'Directive "%s" must return a list of nodes.' % type_name
for i in range(len(result)):
assert isinstance(result[i], nodes.Node), \
('Directive "%s" returned non-Node object (index %s): %r'
% (type_name, i, result[i]))
return (result,
blank_finish or self.state_machine.is_next_line_blank())
def parse_directive_block(self, indented, line_offset, directive,
option_presets):
option_spec = directive.option_spec
has_content = directive.has_content
if indented and not indented[0].strip():
indented.trim_start()
line_offset += 1
while indented and not indented[-1].strip():
indented.trim_end()
if indented and (directive.required_arguments
or directive.optional_arguments
or option_spec):
for i in range(len(indented)):
if not indented[i].strip():
break
else:
i += 1
arg_block = indented[:i]
content = indented[i+1:]
content_offset = line_offset + i + 1
else:
content = indented
content_offset = line_offset
arg_block = []
while content and not content[0].strip():
content.trim_start()
content_offset += 1
if option_spec:
options, arg_block = self.parse_directive_options(
option_presets, option_spec, arg_block)
if arg_block and not (directive.required_arguments
or directive.optional_arguments):
raise MarkupError('no arguments permitted; blank line '
'required before content block')
else:
options = {}
if directive.required_arguments or directive.optional_arguments:
arguments = self.parse_directive_arguments(
directive, arg_block)
else:
arguments = []
if content and not has_content:
raise MarkupError('no content permitted')
return (arguments, options, content, content_offset)
def parse_directive_options(self, option_presets, option_spec, arg_block):
options = option_presets.copy()
for i in range(len(arg_block)):
if arg_block[i][:1] == ':':
opt_block = arg_block[i:]
arg_block = arg_block[:i]
break
else:
opt_block = []
if opt_block:
success, data = self.parse_extension_options(option_spec,
opt_block)
if success: # data is a dict of options
options.update(data)
else: # data is an error string
raise MarkupError(data)
return options, arg_block
def parse_directive_arguments(self, directive, arg_block):
required = directive.required_arguments
optional = directive.optional_arguments
arg_text = '\n'.join(arg_block)
arguments = arg_text.split()
if len(arguments) < required:
raise MarkupError('%s argument(s) required, %s supplied'
% (required, len(arguments)))
elif len(arguments) > required + optional:
if directive.final_argument_whitespace:
arguments = arg_text.split(None, required + optional - 1)
else:
raise MarkupError(
'maximum %s argument(s) allowed, %s supplied'
% (required + optional, len(arguments)))
return arguments
def parse_extension_options(self, option_spec, datalines):
"""
Parse `datalines` for a field list containing extension options
matching `option_spec`.
:Parameters:
- `option_spec`: a mapping of option name to conversion
function, which should raise an exception on bad input.
- `datalines`: a list of input strings.
:Return:
- Success value, 1 or 0.
- An option dictionary on success, an error string on failure.
"""
node = nodes.field_list()
newline_offset, blank_finish = self.nested_list_parse(
datalines, 0, node, initial_state='ExtensionOptions',
blank_finish=1)
if newline_offset != len(datalines): # incomplete parse of block
return 0, 'invalid option block'
try:
options = utils.extract_extension_options(node, option_spec)
except KeyError, detail:
return 0, ('unknown option: "%s"' % detail.args[0])
except (ValueError, TypeError), detail:
return 0, ('invalid option value: %s' % ' '.join(detail.args))
except utils.ExtensionOptionError, detail:
return 0, ('invalid option data: %s' % ' '.join(detail.args))
if blank_finish:
return 1, options
else:
return 0, 'option data incompletely parsed'
def unknown_directive(self, type_name):
src, srcline = self.state_machine.get_source_and_line()
indented, indent, offset, blank_finish = \
self.state_machine.get_first_known_indented(0, strip_indent=0)
text = '\n'.join(indented)
error = self.reporter.error(
'Unknown directive type "%s".' % type_name,
nodes.literal_block(text, text), source=src, line=srcline)
return [error], blank_finish
def comment(self, match):
if not match.string[match.end():].strip() \
and self.state_machine.is_next_line_blank(): # an empty comment?
return [nodes.comment()], 1 # "A tiny but practical wart."
indented, indent, offset, blank_finish = \
self.state_machine.get_first_known_indented(match.end())
while indented and not indented[-1].strip():
indented.trim_end()
text = '\n'.join(indented)
return [nodes.comment(text, text)], blank_finish
explicit.constructs = [
(footnote,
re.compile(r"""
\.\.[ ]+ # explicit markup start
\[
( # footnote label:
[0-9]+ # manually numbered footnote
| # *OR*
\# # anonymous auto-numbered footnote
| # *OR*
\#%s # auto-number ed?) footnote label
| # *OR*
\* # auto-symbol footnote
)
\]
([ ]+|$) # whitespace or end of line
""" % Inliner.simplename, re.VERBOSE | re.UNICODE)),
(citation,
re.compile(r"""
\.\.[ ]+ # explicit markup start
\[(%s)\] # citation label
([ ]+|$) # whitespace or end of line
""" % Inliner.simplename, re.VERBOSE | re.UNICODE)),
(hyperlink_target,
re.compile(r"""
\.\.[ ]+ # explicit markup start
_ # target indicator
(?![ ]|$) # first char. not space or EOL
""", re.VERBOSE)),
(substitution_def,
re.compile(r"""
\.\.[ ]+ # explicit markup start
\| # substitution indicator
(?![ ]|$) # first char. not space or EOL
""", re.VERBOSE)),
(directive,
re.compile(r"""
\.\.[ ]+ # explicit markup start
(%s) # directive name
[ ]? # optional space
:: # directive delimiter
([ ]+|$) # whitespace or end of line
""" % Inliner.simplename, re.VERBOSE | re.UNICODE))]
def explicit_markup(self, match, context, next_state):
"""Footnotes, hyperlink targets, directives, comments."""
nodelist, blank_finish = self.explicit_construct(match)
self.parent += nodelist
self.explicit_list(blank_finish)
return [], next_state, []
def explicit_construct(self, match):
"""Determine which explicit construct this is, parse & return it."""
errors = []
for method, pattern in self.explicit.constructs:
expmatch = pattern.match(match.string)
if expmatch:
try:
return method(self, expmatch)
except MarkupError, error: # never reached?
message = ' '.join(error.args)
src, srcline = self.state_machine.get_source_and_line()
errors.append(self.reporter.warning(
message, source=src, line=srcline))
break
nodelist, blank_finish = self.comment(match)
return nodelist + errors, blank_finish
def explicit_list(self, blank_finish):
"""
Create a nested state machine for a series of explicit markup
constructs (including anonymous hyperlink targets).
"""
offset = self.state_machine.line_offset + 1 # next line
newline_offset, blank_finish = self.nested_list_parse(
self.state_machine.input_lines[offset:],
input_offset=self.state_machine.abs_line_offset() + 1,
node=self.parent, initial_state='Explicit',
blank_finish=blank_finish,
match_titles=self.state_machine.match_titles)
self.goto_line(newline_offset)
if not blank_finish:
self.parent += self.unindent_warning('Explicit markup')
def anonymous(self, match, context, next_state):
"""Anonymous hyperlink targets."""
nodelist, blank_finish = self.anonymous_target(match)
self.parent += nodelist
self.explicit_list(blank_finish)
return [], next_state, []
def anonymous_target(self, match):
lineno = self.state_machine.abs_line_number()
block, indent, offset, blank_finish \
= self.state_machine.get_first_known_indented(match.end(),
until_blank=1)
blocktext = match.string[:match.end()] + '\n'.join(block)
block = [escape2null(line) for line in block]
target = self.make_target(block, blocktext, lineno, '')
return [target], blank_finish
def line(self, match, context, next_state):
"""Section title overline or transition marker."""
if self.state_machine.match_titles:
return [match.string], 'Line', []
elif match.string.strip() == '::':
raise statemachine.TransitionCorrection('text')
elif len(match.string.strip()) < 4:
msg = self.reporter.info(
'Unexpected possible title overline or transition.\n'
"Treating it as ordinary text because it's so short.",
line=self.state_machine.abs_line_number())
self.parent += msg
raise statemachine.TransitionCorrection('text')
else:
blocktext = self.state_machine.line
msg = self.reporter.severe(
'Unexpected section title or transition.',
nodes.literal_block(blocktext, blocktext),
line=self.state_machine.abs_line_number())
self.parent += msg
return [], next_state, []
def text(self, match, context, next_state):
"""Titles, definition lists, paragraphs."""
return [match.string], 'Text', []
class RFC2822Body(Body):
"""
RFC2822 headers are only valid as the first constructs in documents. As
soon as anything else appears, the `Body` state should take over.
"""
patterns = Body.patterns.copy() # can't modify the original
patterns['rfc2822'] = r'[!-9;-~]+:( +|$)'
initial_transitions = [(name, 'Body')
for name in Body.initial_transitions]
initial_transitions.insert(-1, ('rfc2822', 'Body')) # just before 'text'
def rfc2822(self, match, context, next_state):
"""RFC2822-style field list item."""
fieldlist = nodes.field_list(classes=['rfc2822'])
self.parent += fieldlist
field, blank_finish = self.rfc2822_field(match)
fieldlist += field
offset = self.state_machine.line_offset + 1 # next line
newline_offset, blank_finish = self.nested_list_parse(
self.state_machine.input_lines[offset:],
input_offset=self.state_machine.abs_line_offset() + 1,
node=fieldlist, initial_state='RFC2822List',
blank_finish=blank_finish)
self.goto_line(newline_offset)
if not blank_finish:
self.parent += self.unindent_warning(
'RFC2822-style field list')
return [], next_state, []
def rfc2822_field(self, match):
name = match.string[:match.string.find(':')]
indented, indent, line_offset, blank_finish = \
self.state_machine.get_first_known_indented(match.end(),
until_blank=1)
fieldnode = nodes.field()
fieldnode += nodes.field_name(name, name)
fieldbody = nodes.field_body('\n'.join(indented))
fieldnode += fieldbody
if indented:
self.nested_parse(indented, input_offset=line_offset,
node=fieldbody)
return fieldnode, blank_finish
class SpecializedBody(Body):
"""
Superclass for second and subsequent compound element members. Compound
elements are lists and list-like constructs.
All transition methods are disabled (redefined as `invalid_input`).
Override individual methods in subclasses to re-enable.
For centralfitestoque, once an initial bullet list item, say, is recognized, the
`BulletList` subclass takes over, with a "bullet_list" node as its
container. Upon encountering the initial bullet list item, `Body.bullet`
calls its ``self.nested_list_parse`` (`RSTState.nested_list_parse`), which
starts up a nested parsing session with `BulletList` as the initial state.
Only the ``bullet`` transition method is enabled in `BulletList`; as long
as only bullet list items are encountered, they are parsed and inserted
into the container. The first construct which is *not* a bullet list item
triggers the `invalid_input` method, which ends the nested parse and
closes the container. `BulletList` needs to recognize input that is
invalid in the context of a bullet list, which means everything *other
than* bullet list items, so it inherits the transition list created in
`Body`.
"""
def invalid_input(self, match=None, context=None, next_state=None):
"""Not a compound element member. Abort this state machine."""
self.state_machine.previous_line() # back up so parent SM can reassess
raise EOFError
indent = invalid_input
bullet = invalid_input
enumerator = invalid_input
field_marker = invalid_input
option_marker = invalid_input
doctest = invalid_input
line_block = invalid_input
grid_table_top = invalid_input
simple_table_top = invalid_input
explicit_markup = invalid_input
anonymous = invalid_input
line = invalid_input
text = invalid_input
class BulletList(SpecializedBody):
"""Second and subsequent bullet_list list_items."""
def bullet(self, match, context, next_state):
"""Bullet list item."""
if match.string[0] != self.parent['bullet']:
# different bullet: new list
self.invalid_input()
listitem, blank_finish = self.list_item(match.end())
self.parent += listitem
self.blank_finish = blank_finish
return [], next_state, []
class DefinitionList(SpecializedBody):
"""Second and subsequent definition_list_items."""
def text(self, match, context, next_state):
"""Definition lists."""
return [match.string], 'Definition', []
class EnumeratedList(SpecializedBody):
"""Second and subsequent enumerated_list list_items."""
def enumerator(self, match, context, next_state):
"""Enumerated list item."""
format, sequence, text, ordinal = self.parse_enumerator(
match, self.parent['enumtype'])
if ( format != self.format
or (sequence != '#' and (sequence != self.parent['enumtype']
or self.auto
or ordinal != (self.lastordinal + 1)))
or not self.is_enumerated_list_item(ordinal, sequence, format)):
# different enumeration: new list
self.invalid_input()
if sequence == '#':
self.auto = 1
listitem, blank_finish = self.list_item(match.end())
self.parent += listitem
self.blank_finish = blank_finish
self.lastordinal = ordinal
return [], next_state, []
class FieldList(SpecializedBody):
"""Second and subsequent field_list fields."""
def field_marker(self, match, context, next_state):
"""Field list field."""
field, blank_finish = self.field(match)
self.parent += field
self.blank_finish = blank_finish
return [], next_state, []
class OptionList(SpecializedBody):
"""Second and subsequent option_list option_list_items."""
def option_marker(self, match, context, next_state):
"""Option list item."""
try:
option_list_item, blank_finish = self.option_list_item(match)
except MarkupError:
self.invalid_input()
self.parent += option_list_item
self.blank_finish = blank_finish
return [], next_state, []
class RFC2822List(SpecializedBody, RFC2822Body):
"""Second and subsequent RFC2822-style field_list fields."""
patterns = RFC2822Body.patterns
initial_transitions = RFC2822Body.initial_transitions
def rfc2822(self, match, context, next_state):
"""RFC2822-style field list item."""
field, blank_finish = self.rfc2822_field(match)
self.parent += field
self.blank_finish = blank_finish
return [], 'RFC2822List', []
blank = SpecializedBody.invalid_input
class ExtensionOptions(FieldList):
"""
Parse field_list fields for extension options.
No nested parsing is done (including inline markup parsing).
"""
def parse_field_body(self, indented, offset, node):
"""Override `Body.parse_field_body` for simpler parsing."""
lines = []
for line in list(indented) + ['']:
if line.strip():
lines.append(line)
elif lines:
text = '\n'.join(lines)
node += nodes.paragraph(text, text)
lines = []
class LineBlock(SpecializedBody):
"""Second and subsequent lines of a line_block."""
blank = SpecializedBody.invalid_input
def line_block(self, match, context, next_state):
"""New line of line block."""
lineno = self.state_machine.abs_line_number()
line, messages, blank_finish = self.line_block_line(match, lineno)
self.parent += line
self.parent.parent += messages
self.blank_finish = blank_finish
return [], next_state, []
class Explicit(SpecializedBody):
"""Second and subsequent explicit markup construct."""
def explicit_markup(self, match, context, next_state):
"""Footnotes, hyperlink targets, directives, comments."""
nodelist, blank_finish = self.explicit_construct(match)
self.parent += nodelist
self.blank_finish = blank_finish
return [], next_state, []
def anonymous(self, match, context, next_state):
"""Anonymous hyperlink targets."""
nodelist, blank_finish = self.anonymous_target(match)
self.parent += nodelist
self.blank_finish = blank_finish
return [], next_state, []
blank = SpecializedBody.invalid_input
class SubstitutionDef(Body):
"""
Parser for the contents of a substitution_definition element.
"""
patterns = {
'embedded_directive': re.compile(r'(%s)::( +|$)'
% Inliner.simplename, re.UNICODE),
'text': r''}
initial_transitions = ['embedded_directive', 'text']
def embedded_directive(self, match, context, next_state):
nodelist, blank_finish = self.directive(match,
alt=self.parent['names'][0])
self.parent += nodelist
if not self.state_machine.at_eof():
self.blank_finish = blank_finish
raise EOFError
def text(self, match, context, next_state):
if not self.state_machine.at_eof():
self.blank_finish = self.state_machine.is_next_line_blank()
raise EOFError
class Text(RSTState):
"""
Classifier of second line of a text block.
Could be a paragraph, a definition list item, or a title.
"""
patterns = {'underline': Body.patterns['line'],
'text': r''}
initial_transitions = [('underline', 'Body'), ('text', 'Body')]
def blank(self, match, context, next_state):
"""End of paragraph."""
paragraph, literalnext = self.paragraph(
context, self.state_machine.abs_line_number() - 1)
self.parent += paragraph
if literalnext:
self.parent += self.literal_block()
return [], 'Body', []
def eof(self, context):
if context:
self.blank(None, context, None)
return []
def indent(self, match, context, next_state):
"""Definition list item."""
definitionlist = nodes.definition_list()
definitionlistitem, blank_finish = self.definition_list_item(context)
definitionlist += definitionlistitem
self.parent += definitionlist
offset = self.state_machine.line_offset + 1 # next line
newline_offset, blank_finish = self.nested_list_parse(
self.state_machine.input_lines[offset:],
input_offset=self.state_machine.abs_line_offset() + 1,
node=definitionlist, initial_state='DefinitionList',
blank_finish=blank_finish, blank_finish_state='Definition')
self.goto_line(newline_offset)
if not blank_finish:
self.parent += self.unindent_warning('Definition list')
return [], 'Body', []
def underline(self, match, context, next_state):
"""Section title."""
lineno = self.state_machine.abs_line_number()
src, srcline = self.state_machine.get_source_and_line()
title = context[0].rstrip()
underline = match.string.rstrip()
source = title + '\n' + underline
messages = []
if column_width(title) > len(underline):
if len(underline) < 4:
if self.state_machine.match_titles:
msg = self.reporter.info(
'Possible title underline, too short for the title.\n'
"Treating it as ordinary text because it's so short.",
source=src, line=srcline)
self.parent += msg
raise statemachine.TransitionCorrection('text')
else:
blocktext = context[0] + '\n' + self.state_machine.line
msg = self.reporter.warning(
'Title underline too short.',
nodes.literal_block(blocktext, blocktext),
source=src, line=srcline)
messages.append(msg)
if not self.state_machine.match_titles:
blocktext = context[0] + '\n' + self.state_machine.line
msg = self.reporter.severe(
'Unexpected section title.',
nodes.literal_block(blocktext, blocktext),
source=src, line=srcline)
self.parent += messages
self.parent += msg
return [], next_state, []
style = underline[0]
context[:] = []
self.section(title, source, style, lineno - 1, messages)
return [], next_state, []
def text(self, match, context, next_state):
"""Paragraph."""
startline = self.state_machine.abs_line_number() - 1
msg = None
try:
block = self.state_machine.get_text_block(flush_left=1)
except statemachine.UnexpectedIndentationError, instance:
block, src, srcline = instance.args
msg = self.reporter.error('Unexpected indentation.',
source=src, line=srcline)
lines = context + list(block)
paragraph, literalnext = self.paragraph(lines, startline)
self.parent += paragraph
self.parent += msg
if literalnext:
try:
self.state_machine.next_line()
except EOFError:
pass
self.parent += self.literal_block()
return [], next_state, []
def literal_block(self):
"""Return a list of nodes."""
indented, indent, offset, blank_finish = \
self.state_machine.get_indented()
while indented and not indented[-1].strip():
indented.trim_end()
if not indented:
return self.quoted_literal_block()
data = '\n'.join(indented)
literal_block = nodes.literal_block(data, data)
literal_block.line = offset + 1
nodelist = [literal_block]
if not blank_finish:
nodelist.append(self.unindent_warning('Literal block'))
return nodelist
def quoted_literal_block(self):
abs_line_offset = self.state_machine.abs_line_offset()
offset = self.state_machine.line_offset
parent_node = nodes.Element()
new_abs_offset = self.nested_parse(
self.state_machine.input_lines[offset:],
input_offset=abs_line_offset, node=parent_node, match_titles=0,
state_machine_kwargs={'state_classes': (QuotedLiteralBlock,),
'initial_state': 'QuotedLiteralBlock'})
self.goto_line(new_abs_offset)
return parent_node.children
def definition_list_item(self, termline):
indented, indent, line_offset, blank_finish = \
self.state_machine.get_indented()
definitionlistitem = nodes.definition_list_item(
'\n'.join(termline + list(indented)))
lineno = self.state_machine.abs_line_number() - 1
src, srcline = self.state_machine.get_source_and_line()
definitionlistitem.source = src
definitionlistitem.line = srcline - 1
termlist, messages = self.term(termline, lineno)
definitionlistitem += termlist
definition = nodes.definition('', *messages)
definitionlistitem += definition
if termline[0][-2:] == '::':
definition += self.reporter.info(
'Blank line missing before literal block (after the "::")? '
'Interpreted as a definition list item.',
source=src, line=srcline)
self.nested_parse(indented, input_offset=line_offset, node=definition)
return definitionlistitem, blank_finish
classifier_delimiter = re.compile(' +: +')
def term(self, lines, lineno):
"""Return a definition_list's term and optional classifiers."""
assert len(lines) == 1
text_nodes, messages = self.inline_text(lines[0], lineno)
term_node = nodes.term()
node_list = [term_node]
for i in range(len(text_nodes)):
node = text_nodes[i]
if isinstance(node, nodes.Text):
parts = self.classifier_delimiter.split(node.rawsource)
if len(parts) == 1:
node_list[-1] += node
else:
node_list[-1] += nodes.Text(parts[0].rstrip())
for part in parts[1:]:
classifier_node = nodes.classifier('', part)
node_list.append(classifier_node)
else:
node_list[-1] += node
return node_list, messages
class SpecializedText(Text):
"""
Superclass for second and subsequent lines of Text-variants.
All transition methods are disabled. Override individual methods in
subclasses to re-enable.
"""
def eof(self, context):
"""Incomplete construct."""
return []
def invalid_input(self, match=None, context=None, next_state=None):
"""Not a compound element member. Abort this state machine."""
raise EOFError
blank = invalid_input
indent = invalid_input
underline = invalid_input
text = invalid_input
class Definition(SpecializedText):
"""Second line of potential definition_list_item."""
def eof(self, context):
"""Not a definition."""
self.state_machine.previous_line(2) # so parent SM can reassess
return []
def indent(self, match, context, next_state):
"""Definition list item."""
definitionlistitem, blank_finish = self.definition_list_item(context)
self.parent += definitionlistitem
self.blank_finish = blank_finish
return [], 'DefinitionList', []
class Line(SpecializedText):
"""
Second line of over- & underlined section title or transition marker.
"""
eofcheck = 1 # @@@ ???
"""Set to 0 while parsing sections, so that we don't catch the EOF."""
def eof(self, context):
"""Transition marker at end of section or document."""
marker = context[0].strip()
if self.memo.section_bubble_up_kludge:
self.memo.section_bubble_up_kludge = 0
elif len(marker) < 4:
self.state_correction(context)
if self.eofcheck: # ignore EOFError with sections
lineno = self.state_machine.abs_line_number() - 1
transition = nodes.transition(rawsource=context[0])
transition.line = lineno
self.parent += transition
self.eofcheck = 1
return []
def blank(self, match, context, next_state):
"""Transition marker."""
src, srcline = self.state_machine.get_source_and_line()
marker = context[0].strip()
if len(marker) < 4:
self.state_correction(context)
transition = nodes.transition(rawsource=marker)
transition.source = src
transition.line = srcline - 1
self.parent += transition
return [], 'Body', []
def text(self, match, context, next_state):
"""Potential over- & underlined title."""
lineno = self.state_machine.abs_line_number() - 1
src, srcline = self.state_machine.get_source_and_line()
overline = context[0]
title = match.string
underline = ''
try:
underline = self.state_machine.next_line()
except EOFError:
blocktext = overline + '\n' + title
if len(overline.rstrip()) < 4:
self.short_overline(context, blocktext, lineno, 2)
else:
msg = self.reporter.severe(
'Incomplete section title.',
nodes.literal_block(blocktext, blocktext),
source=src, line=srcline-1)
self.parent += msg
return [], 'Body', []
source = '%s\n%s\n%s' % (overline, title, underline)
overline = overline.rstrip()
underline = underline.rstrip()
if not self.transitions['underline'][0].match(underline):
blocktext = overline + '\n' + title + '\n' + underline
if len(overline.rstrip()) < 4:
self.short_overline(context, blocktext, lineno, 2)
else:
msg = self.reporter.severe(
'Missing matching underline for section title overline.',
nodes.literal_block(source, source),
source=src, line=srcline-1)
self.parent += msg
return [], 'Body', []
elif overline != underline:
blocktext = overline + '\n' + title + '\n' + underline
if len(overline.rstrip()) < 4:
self.short_overline(context, blocktext, lineno, 2)
else:
msg = self.reporter.severe(
'Title overline & underline mismatch.',
nodes.literal_block(source, source),
source=src, line=srcline-1)
self.parent += msg
return [], 'Body', []
title = title.rstrip()
messages = []
if column_width(title) > len(overline):
blocktext = overline + '\n' + title + '\n' + underline
if len(overline.rstrip()) < 4:
self.short_overline(context, blocktext, lineno, 2)
else:
msg = self.reporter.warning(
'Title overline too short.',
nodes.literal_block(source, source),
source=src, line=srcline-1)
messages.append(msg)
style = (overline[0], underline[0])
self.eofcheck = 0 # @@@ not sure this is correct
self.section(title.lstrip(), source, style, lineno + 1, messages)
self.eofcheck = 1
return [], 'Body', []
indent = text # indented title
def underline(self, match, context, next_state):
overline = context[0]
blocktext = overline + '\n' + self.state_machine.line
lineno = self.state_machine.abs_line_number() - 1
src, srcline = self.state_machine.get_source_and_line()
if len(overline.rstrip()) < 4:
self.short_overline(context, blocktext, lineno, 1)
msg = self.reporter.error(
'Invalid section title or transition marker.',
nodes.literal_block(blocktext, blocktext),
source=src, line=srcline-1)
self.parent += msg
return [], 'Body', []
def short_overline(self, context, blocktext, lineno, lines=1):
src, srcline = self.state_machine.get_source_and_line(lineno)
msg = self.reporter.info(
'Possible incomplete section title.\nTreating the overline as '
"ordinary text because it's so short.",
source=src, line=srcline)
self.parent += msg
self.state_correction(context, lines)
def state_correction(self, context, lines=1):
self.state_machine.previous_line(lines)
context[:] = []
raise statemachine.StateCorrection('Body', 'text')
class QuotedLiteralBlock(RSTState):
"""
Nested parse handler for quoted (unindented) literal blocks.
Special-purpose. Not for inclusion in `state_classes`.
"""
patterns = {'initial_quoted': r'(%(nonalphanum7bit)s)' % Body.pats,
'text': r''}
initial_transitions = ('initial_quoted', 'text')
def __init__(self, state_machine, debug=0):
RSTState.__init__(self, state_machine, debug)
self.messages = []
self.initial_lineno = None
def blank(self, match, context, next_state):
if context:
raise EOFError
else:
return context, next_state, []
def eof(self, context):
if context:
src, srcline = self.state_machine.get_source_and_line(
self.initial_lineno)
text = '\n'.join(context)
literal_block = nodes.literal_block(text, text)
literal_block.source = src
literal_block.line = srcline
self.parent += literal_block
else:
self.parent += self.reporter.warning(
'Literal block expected; none found.',
line=self.state_machine.abs_line_number())
# src not available, because statemachine.input_lines is empty
self.state_machine.previous_line()
self.parent += self.messages
return []
def indent(self, match, context, next_state):
assert context, ('QuotedLiteralBlock.indent: context should not '
'be empty!')
self.messages.append(
self.reporter.error('Unexpected indentation.',
line=self.state_machine.abs_line_number()))
self.state_machine.previous_line()
raise EOFError
def initial_quoted(self, match, context, next_state):
"""Match arbitrary quote character on the first line only."""
self.remove_transition('initial_quoted')
quote = match.string[0]
pattern = re.compile(re.escape(quote))
# New transition matches consistent quotes only:
self.add_transition('quoted',
(pattern, self.quoted, self.__class__.__name__))
self.initial_lineno = self.state_machine.abs_line_number()
return [match.string], next_state, []
def quoted(self, match, context, next_state):
"""Match consistent quotes on subsequent lines."""
context.append(match.string)
return context, next_state, []
def text(self, match, context, next_state):
if context:
src, srcline = self.state_machine.get_source_and_line()
self.messages.append(
self.reporter.error('Inconsistent literal block quoting.',
source=src, line=srcline))
self.state_machine.previous_line()
raise EOFError
state_classes = (Body, BulletList, DefinitionList, EnumeratedList, FieldList,
OptionList, LineBlock, ExtensionOptions, Explicit, Text,
Definition, Line, SubstitutionDef, RFC2822Body, RFC2822List)
"""Standard set of State classes used to start `RSTStateMachine`."""
| {
"repo_name": "akiokio/centralfitestoque",
"path": "src/.pycharm_helpers/docutils/parsers/rst/states.py",
"copies": "1",
"size": "129130",
"license": "bsd-2-clause",
"hash": 7157528635440201000,
"line_mean": 41.2822527832,
"line_max": 86,
"alpha_frac": 0.5473786107,
"autogenerated": false,
"ratio": 4.370769022474953,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00040502090925308577,
"num_lines": 3054
} |
"""
This is the ``docutils.parsers.rst.states`` module, the core of
the reStructuredText parser. It defines the following:
:Classes:
- `RSTStateMachine`: reStructuredText parser's entry point.
- `NestedStateMachine`: recursive StateMachine.
- `RSTState`: reStructuredText State superclass.
- `Inliner`: For parsing inline markup.
- `Body`: Generic classifier of the first line of a block.
- `SpecializedBody`: Superclass for compound element members.
- `BulletList`: Second and subsequent bullet_list list_items
- `DefinitionList`: Second+ definition_list_items.
- `EnumeratedList`: Second+ enumerated_list list_items.
- `FieldList`: Second+ fields.
- `OptionList`: Second+ option_list_items.
- `RFC2822List`: Second+ RFC2822-style fields.
- `ExtensionOptions`: Parses directive option fields.
- `Explicit`: Second+ explicit markup constructs.
- `SubstitutionDef`: For embedded directives in substitution definitions.
- `Text`: Classifier of second line of a text block.
- `SpecializedText`: Superclass for continuation lines of Text-variants.
- `Definition`: Second line of potential definition_list_item.
- `Line`: Second line of overlined section title or transition marker.
- `Struct`: An auxiliary collection class.
:Exception classes:
- `MarkupError`
- `ParserError`
- `MarkupMismatch`
:Functions:
- `escape2null()`: Return a string, escape-backslashes converted to nulls.
- `unescape()`: Return a string, nulls removed or restored to backslashes.
:Attributes:
- `state_classes`: set of State classes used with `RSTStateMachine`.
Parser Overview
===============
The reStructuredText parser is implemented as a recursive state machine,
examining its input one line at a time. To understand how the parser works,
please first become familiar with the `docutils.statemachine` module. In the
description below, references are made to classes defined in this module;
please see the individual classes for details.
Parsing proceeds as follows:
1. The state machine examines each line of input, checking each of the
transition patterns of the state `Body`, in order, looking for a match.
The implicit transitions (blank lines and indentation) are checked before
any others. The 'text' transition is a catch-all (matches anything).
2. The method associated with the matched transition pattern is called.
A. Some transition methods are self-contained, appending elements to the
document tree (`Body.doctest` parses a doctest block). The parser's
current line index is advanced to the end of the element, and parsing
continues with step 1.
B. Other transition methods trigger the creation of a nested state machine,
whose job is to parse a compound construct ('indent' does a block quote,
'bullet' does a bullet list, 'overline' does a section [first checking
for a valid section header], etc.).
- In the case of lists and explicit markup, a one-off state machine is
created and run to parse contents of the first item.
- A new state machine is created and its initial state is set to the
appropriate specialized state (`BulletList` in the case of the
'bullet' transition; see `SpecializedBody` for more detail). This
state machine is run to parse the compound element (or series of
explicit markup elements), and returns as soon as a non-member element
is encountered. For example, the `BulletList` state machine ends as
soon as it encounters an element which is not a list item of that
bullet list. The optional omission of inter-element blank lines is
enabled by this nested state machine.
- The current line index is advanced to the end of the elements parsed,
and parsing continues with step 1.
C. The result of the 'text' transition depends on the next line of text.
The current state is changed to `Text`, under which the second line is
examined. If the second line is:
- Indented: The element is a definition list item, and parsing proceeds
similarly to step 2.B, using the `DefinitionList` state.
- A line of uniform punctuation characters: The element is a section
header; again, parsing proceeds as in step 2.B, and `Body` is still
used.
- Anything else: The element is a paragraph, which is examined for
inline markup and appended to the parent element. Processing
continues with step 1.
"""
__docformat__ = 'reStructuredText'
import sys
import re
import roman
from types import FunctionType, MethodType
from docutils import nodes, statemachine, utils, urischemes
from docutils import ApplicationError, DataError
from docutils.statemachine import StateMachineWS, StateWS
from docutils.nodes import fully_normalize_name as normalize_name
from docutils.nodes import whitespace_normalize_name
from docutils.utils import escape2null, unescape, column_width
import docutils.parsers.rst
from docutils.parsers.rst import directives, languages, tableparser, roles
from docutils.parsers.rst.languages import en as _fallback_language_module
class MarkupError(DataError): pass
class UnknownInterpretedRoleError(DataError): pass
class InterpretedRoleNotImplementedError(DataError): pass
class ParserError(ApplicationError): pass
class MarkupMismatch(Exception): pass
class Struct:
"""Stores data attributes for dotted-attribute access."""
def __init__(self, **keywordargs):
self.__dict__.update(keywordargs)
class RSTStateMachine(StateMachineWS):
"""
reStructuredText's master StateMachine.
The entry point to reStructuredText parsing is the `run()` method.
"""
def run(self, input_lines, document, input_offset=0, match_titles=1,
inliner=None):
"""
Parse `input_lines` and modify the `document` node in place.
Extend `StateMachineWS.run()`: set up parse-global data and
run the StateMachine.
"""
self.language = languages.get_language(
document.settings.language_code)
self.match_titles = match_titles
if inliner is None:
inliner = Inliner()
inliner.init_customizations(document.settings)
self.memo = Struct(document=document,
reporter=document.reporter,
language=self.language,
title_styles=[],
section_level=0,
section_bubble_up_kludge=0,
inliner=inliner)
self.document = document
self.attach_observer(document.note_source)
self.reporter = self.memo.reporter
self.node = document
results = StateMachineWS.run(self, input_lines, input_offset,
input_source=document['source'])
assert results == [], 'RSTStateMachine.run() results should be empty!'
self.node = self.memo = None # remove unneeded references
class NestedStateMachine(StateMachineWS):
"""
StateMachine run from within other StateMachine runs, to parse nested
document structures.
"""
def run(self, input_lines, input_offset, memo, node, match_titles=1):
"""
Parse `input_lines` and populate a `docutils.nodes.document` instance.
Extend `StateMachineWS.run()`: set up document-wide data.
"""
self.match_titles = match_titles
self.memo = memo
self.document = memo.document
self.attach_observer(self.document.note_source)
self.reporter = memo.reporter
self.language = memo.language
self.node = node
results = StateMachineWS.run(self, input_lines, input_offset)
assert results == [], ('NestedStateMachine.run() results should be '
'empty!')
return results
class RSTState(StateWS):
"""
reStructuredText State superclass.
Contains methods used by all State subclasses.
"""
nested_sm = NestedStateMachine
nested_sm_cache = []
def __init__(self, state_machine, debug=0):
self.nested_sm_kwargs = {'state_classes': state_classes,
'initial_state': 'Body'}
StateWS.__init__(self, state_machine, debug)
def runtime_init(self):
StateWS.runtime_init(self)
memo = self.state_machine.memo
self.memo = memo
self.reporter = memo.reporter
self.inliner = memo.inliner
self.document = memo.document
self.parent = self.state_machine.node
# enable the reporter to determine source and source-line
if not hasattr(self.reporter, 'locator'):
self.reporter.locator = self.state_machine.get_source_and_line
# print "adding locator to reporter", self.state_machine.input_offset
def goto_line(self, abs_line_offset):
"""
Jump to input line `abs_line_offset`, ignoring jumps past the end.
"""
try:
self.state_machine.goto_line(abs_line_offset)
except EOFError:
pass
def no_match(self, context, transitions):
"""
Override `StateWS.no_match` to generate a system message.
This code should never be run.
"""
src, srcline = self.state_machine.get_source_and_line()
self.reporter.severe(
'Internal error: no transition pattern match. State: "%s"; '
'transitions: %s; context: %s; current line: %r.'
% (self.__class__.__name__, transitions, context,
self.state_machine.line),
source=src, line=srcline)
return context, None, []
def bof(self, context):
"""Called at beginning of file."""
return [], []
def nested_parse(self, block, input_offset, node, match_titles=0,
state_machine_class=None, state_machine_kwargs=None):
"""
Create a new StateMachine rooted at `node` and run it over the input
`block`.
"""
use_default = 0
if state_machine_class is None:
state_machine_class = self.nested_sm
use_default += 1
if state_machine_kwargs is None:
state_machine_kwargs = self.nested_sm_kwargs
use_default += 1
block_length = len(block)
state_machine = None
if use_default == 2:
try:
state_machine = self.nested_sm_cache.pop()
except IndexError:
pass
if not state_machine:
state_machine = state_machine_class(debug=self.debug,
**state_machine_kwargs)
state_machine.run(block, input_offset, memo=self.memo,
node=node, match_titles=match_titles)
if use_default == 2:
self.nested_sm_cache.append(state_machine)
else:
state_machine.unlink()
new_offset = state_machine.abs_line_offset()
# No `block.parent` implies disconnected -- lines aren't in sync:
if block.parent and (len(block) - block_length) != 0:
# Adjustment for block if modified in nested parse:
self.state_machine.next_line(len(block) - block_length)
return new_offset
def nested_list_parse(self, block, input_offset, node, initial_state,
blank_finish,
blank_finish_state=None,
extra_settings={},
match_titles=0,
state_machine_class=None,
state_machine_kwargs=None):
"""
Create a new StateMachine rooted at `node` and run it over the input
`block`. Also keep track of optional intermediate blank lines and the
required final one.
"""
if state_machine_class is None:
state_machine_class = self.nested_sm
if state_machine_kwargs is None:
state_machine_kwargs = self.nested_sm_kwargs.copy()
state_machine_kwargs['initial_state'] = initial_state
state_machine = state_machine_class(debug=self.debug,
**state_machine_kwargs)
if blank_finish_state is None:
blank_finish_state = initial_state
state_machine.states[blank_finish_state].blank_finish = blank_finish
for key, value in extra_settings.items():
setattr(state_machine.states[initial_state], key, value)
state_machine.run(block, input_offset, memo=self.memo,
node=node, match_titles=match_titles)
blank_finish = state_machine.states[blank_finish_state].blank_finish
state_machine.unlink()
return state_machine.abs_line_offset(), blank_finish
def section(self, title, source, style, lineno, messages):
"""Check for a valid subsection and create one if it checks out."""
if self.check_subsection(source, style, lineno):
self.new_subsection(title, lineno, messages)
def check_subsection(self, source, style, lineno):
"""
Check for a valid subsection header. Return 1 (true) or None (false).
When a new section is reached that isn't a subsection of the current
section, back up the line count (use ``previous_line(-x)``), then
``raise EOFError``. The current StateMachine will finish, then the
calling StateMachine can re-examine the title. This will work its way
back up the calling chain until the correct section level isreached.
@@@ Alternative: Evaluate the title, store the title info & level, and
back up the chain until that level is reached. Store in memo? Or
return in results?
:Exception: `EOFError` when a sibling or supersection encountered.
"""
memo = self.memo
title_styles = memo.title_styles
mylevel = memo.section_level
try: # check for existing title style
level = title_styles.index(style) + 1
except ValueError: # new title style
if len(title_styles) == memo.section_level: # new subsection
title_styles.append(style)
return 1
else: # not at lowest level
self.parent += self.title_inconsistent(source, lineno)
return None
if level <= mylevel: # sibling or supersection
memo.section_level = level # bubble up to parent section
if len(style) == 2:
memo.section_bubble_up_kludge = 1
# back up 2 lines for underline title, 3 for overline title
self.state_machine.previous_line(len(style) + 1)
raise EOFError # let parent section re-evaluate
if level == mylevel + 1: # immediate subsection
return 1
else: # invalid subsection
self.parent += self.title_inconsistent(source, lineno)
return None
def title_inconsistent(self, sourcetext, lineno):
src, srcline = self.state_machine.get_source_and_line(lineno)
error = self.reporter.severe(
'Title level inconsistent:', nodes.literal_block('', sourcetext),
source=src, line=srcline)
return error
def new_subsection(self, title, lineno, messages):
"""Append new subsection to document tree. On return, check level."""
memo = self.memo
mylevel = memo.section_level
memo.section_level += 1
section_node = nodes.section()
self.parent += section_node
textnodes, title_messages = self.inline_text(title, lineno)
titlenode = nodes.title(title, '', *textnodes)
name = normalize_name(titlenode.astext())
section_node['names'].append(name)
section_node += titlenode
section_node += messages
section_node += title_messages
self.document.note_implicit_target(section_node, section_node)
offset = self.state_machine.line_offset + 1
absoffset = self.state_machine.abs_line_offset() + 1
newabsoffset = self.nested_parse(
self.state_machine.input_lines[offset:], input_offset=absoffset,
node=section_node, match_titles=1)
self.goto_line(newabsoffset)
if memo.section_level <= mylevel: # can't handle next section?
raise EOFError # bubble up to supersection
# reset section_level; next pass will detect it properly
memo.section_level = mylevel
def paragraph(self, lines, lineno):
"""
Return a list (paragraph & messages) & a boolean: literal_block next?
"""
data = '\n'.join(lines).rstrip()
if re.search(r'(?<!\\)(\\\\)*::$', data):
if len(data) == 2:
return [], 1
elif data[-3] in ' \n':
text = data[:-3].rstrip()
else:
text = data[:-1]
literalnext = 1
else:
text = data
literalnext = 0
textnodes, messages = self.inline_text(text, lineno)
p = nodes.paragraph(data, '', *textnodes)
p.source, p.line = self.state_machine.get_source_and_line(lineno)
return [p] + messages, literalnext
def inline_text(self, text, lineno):
"""
Return 2 lists: nodes (text and inline elements), and system_messages.
"""
return self.inliner.parse(text, lineno, self.memo, self.parent)
def unindent_warning(self, node_name):
# the actual problem is one line below the current line
src, srcline = self.state_machine.get_source_and_line()
return self.reporter.warning('%s ends without a blank line; '
'unexpected unindent.' % node_name,
source=src, line=srcline+1)
def build_regexp(definition, compile=1):
"""
Build, compile and return a regular expression based on `definition`.
:Parameter: `definition`: a 4-tuple (group name, prefix, suffix, parts),
where "parts" is a list of regular expressions and/or regular
expression definitions to be joined into an or-group.
"""
name, prefix, suffix, parts = definition
part_strings = []
for part in parts:
if type(part) is tuple:
part_strings.append(build_regexp(part, None))
else:
part_strings.append(part)
or_group = '|'.join(part_strings)
regexp = '%(prefix)s(?P<%(name)s>%(or_group)s)%(suffix)s' % locals()
if compile:
return re.compile(regexp, re.UNICODE)
else:
return regexp
class Inliner:
"""
Parse inline markup; call the `parse()` method.
"""
def __init__(self):
self.implicit_dispatch = [(self.patterns.uri, self.standalone_uri),]
"""List of (pattern, bound method) tuples, used by
`self.implicit_inline`."""
def init_customizations(self, settings):
"""Setting-based customizations; run when parsing begins."""
if settings.pep_references:
self.implicit_dispatch.append((self.patterns.pep,
self.pep_reference))
if settings.rfc_references:
self.implicit_dispatch.append((self.patterns.rfc,
self.rfc_reference))
def parse(self, text, lineno, memo, parent):
# Needs to be refactored for nested inline markup.
# Add nested_parse() method?
"""
Return 2 lists: nodes (text and inline elements), and system_messages.
Using `self.patterns.initial`, a pattern which matches start-strings
(emphasis, strong, interpreted, phrase reference, literal,
substitution reference, and inline target) and complete constructs
(simple reference, footnote reference), search for a candidate. When
one is found, check for validity (e.g., not a quoted '*' character).
If valid, search for the corresponding end string if applicable, and
check it for validity. If not found or invalid, generate a warning
and ignore the start-string. Implicit inline markup (e.g. standalone
URIs) is found last.
"""
self.reporter = memo.reporter
self.document = memo.document
self.language = memo.language
self.parent = parent
pattern_search = self.patterns.initial.search
dispatch = self.dispatch
remaining = escape2null(text)
processed = []
unprocessed = []
messages = []
while remaining:
match = pattern_search(remaining)
if match:
groups = match.groupdict()
method = dispatch[groups['start'] or groups['backquote']
or groups['refend'] or groups['fnend']]
before, inlines, remaining, sysmessages = method(self, match,
lineno)
unprocessed.append(before)
messages += sysmessages
if inlines:
processed += self.implicit_inline(''.join(unprocessed),
lineno)
processed += inlines
unprocessed = []
else:
break
remaining = ''.join(unprocessed) + remaining
if remaining:
processed += self.implicit_inline(remaining, lineno)
return processed, messages
openers = u'\'"([{<\u2018\u201c\xab\u00a1\u00bf' # see quoted_start below
closers = u'\'")]}>\u2019\u201d\xbb!?'
unicode_delimiters = u'\u2010\u2011\u2012\u2013\u2014\u00a0'
start_string_prefix = (u'((?<=^)|(?<=[-/: \\n\u2019%s%s]))'
% (re.escape(unicode_delimiters),
re.escape(openers)))
end_string_suffix = (r'((?=$)|(?=[-/:.,; \n\x00%s%s]))'
% (re.escape(unicode_delimiters),
re.escape(closers)))
non_whitespace_before = r'(?<![ \n])'
non_whitespace_escape_before = r'(?<![ \n\x00])'
non_unescaped_whitespace_escape_before = r'(?<!(?<!\x00)[ \n\x00])'
non_whitespace_after = r'(?![ \n])'
# Alphanumerics with isolated internal [-._+:] chars (i.e. not 2 together):
simplename = r'(?:(?!_)\w)+(?:[-._+:](?:(?!_)\w)+)*'
# Valid URI characters (see RFC 2396 & RFC 2732);
# final \x00 allows backslash escapes in URIs:
uric = r"""[-_.!~*'()[\];/:@&=+$,%a-zA-Z0-9\x00]"""
# Delimiter indicating the end of a URI (not part of the URI):
uri_end_delim = r"""[>]"""
# Last URI character; same as uric but no punctuation:
urilast = r"""[_~*/=+a-zA-Z0-9]"""
# End of a URI (either 'urilast' or 'uric followed by a
# uri_end_delim'):
uri_end = r"""(?:%(urilast)s|%(uric)s(?=%(uri_end_delim)s))""" % locals()
emailc = r"""[-_!~*'{|}/#?^`&=+$%a-zA-Z0-9\x00]"""
email_pattern = r"""
%(emailc)s+(?:\.%(emailc)s+)* # name
(?<!\x00)@ # at
%(emailc)s+(?:\.%(emailc)s*)* # host
%(uri_end)s # final URI char
"""
parts = ('initial_inline', start_string_prefix, '',
[('start', '', non_whitespace_after, # simple start-strings
[r'\*\*', # strong
r'\*(?!\*)', # emphasis but not strong
r'``', # literal
r'_`', # inline internal target
r'\|(?!\|)'] # substitution reference
),
('whole', '', end_string_suffix, # whole constructs
[# reference name & end-string
r'(?P<refname>%s)(?P<refend>__?)' % simplename,
('footnotelabel', r'\[', r'(?P<fnend>\]_)',
[r'[0-9]+', # manually numbered
r'\#(%s)?' % simplename, # auto-numbered (w/ label?)
r'\*', # auto-symbol
r'(?P<citationlabel>%s)' % simplename] # citation reference
)
]
),
('backquote', # interpreted text or phrase reference
'(?P<role>(:%s:)?)' % simplename, # optional role
non_whitespace_after,
['`(?!`)'] # but not literal
)
]
)
patterns = Struct(
initial=build_regexp(parts),
emphasis=re.compile(non_whitespace_escape_before
+ r'(\*)' + end_string_suffix),
strong=re.compile(non_whitespace_escape_before
+ r'(\*\*)' + end_string_suffix),
interpreted_or_phrase_ref=re.compile(
r"""
%(non_unescaped_whitespace_escape_before)s
(
`
(?P<suffix>
(?P<role>:%(simplename)s:)?
(?P<refend>__?)?
)
)
%(end_string_suffix)s
""" % locals(), re.VERBOSE | re.UNICODE),
embedded_uri=re.compile(
r"""
(
(?:[ \n]+|^) # spaces or beginning of line/string
< # open bracket
%(non_whitespace_after)s
([^<>\x00]+) # anything but angle brackets & nulls
%(non_whitespace_before)s
> # close bracket w/o whitespace before
)
$ # end of string
""" % locals(), re.VERBOSE),
literal=re.compile(non_whitespace_before + '(``)'
+ end_string_suffix),
target=re.compile(non_whitespace_escape_before
+ r'(`)' + end_string_suffix),
substitution_ref=re.compile(non_whitespace_escape_before
+ r'(\|_{0,2})'
+ end_string_suffix),
email=re.compile(email_pattern % locals() + '$', re.VERBOSE),
uri=re.compile(
(r"""
%(start_string_prefix)s
(?P<whole>
(?P<absolute> # absolute URI
(?P<scheme> # scheme (http, ftp, mailto)
[a-zA-Z][a-zA-Z0-9.+-]*
)
:
(
( # either:
(//?)? # hierarchical URI
%(uric)s* # URI characters
%(uri_end)s # final URI char
)
( # optional query
\?%(uric)s*
%(uri_end)s
)?
( # optional fragment
\#%(uric)s*
%(uri_end)s
)?
)
)
| # *OR*
(?P<email> # email address
""" + email_pattern + r"""
)
)
%(end_string_suffix)s
""") % locals(), re.VERBOSE),
pep=re.compile(
r"""
%(start_string_prefix)s
(
(pep-(?P<pepnum1>\d+)(.txt)?) # reference to source file
|
(PEP\s+(?P<pepnum2>\d+)) # reference by name
)
%(end_string_suffix)s""" % locals(), re.VERBOSE),
rfc=re.compile(
r"""
%(start_string_prefix)s
(RFC(-|\s+)?(?P<rfcnum>\d+))
%(end_string_suffix)s""" % locals(), re.VERBOSE))
def quoted_start(self, match):
"""Return 1 if inline markup start-string is 'quoted', 0 if not."""
string = match.string
start = match.start()
end = match.end()
if start == 0: # start-string at beginning of text
return 0
prestart = string[start - 1]
try:
poststart = string[end]
if self.openers.index(prestart) \
== self.closers.index(poststart): # quoted
return 1
except IndexError: # start-string at end of text
return 1
except ValueError: # not quoted
pass
return 0
def inline_obj(self, match, lineno, end_pattern, nodeclass,
restore_backslashes=0):
string = match.string
matchstart = match.start('start')
matchend = match.end('start')
if self.quoted_start(match):
return (string[:matchend], [], string[matchend:], [], '')
endmatch = end_pattern.search(string[matchend:])
if endmatch and endmatch.start(1): # 1 or more chars
text = unescape(endmatch.string[:endmatch.start(1)],
restore_backslashes)
textend = matchend + endmatch.end(1)
rawsource = unescape(string[matchstart:textend], 1)
return (string[:matchstart], [nodeclass(rawsource, text)],
string[textend:], [], endmatch.group(1))
msg = self.reporter.warning(
'Inline %s start-string without end-string.'
% nodeclass.__name__, line=lineno)
text = unescape(string[matchstart:matchend], 1)
rawsource = unescape(string[matchstart:matchend], 1)
prb = self.problematic(text, rawsource, msg)
return string[:matchstart], [prb], string[matchend:], [msg], ''
def problematic(self, text, rawsource, message):
msgid = self.document.set_id(message, self.parent)
problematic = nodes.problematic(rawsource, text, refid=msgid)
prbid = self.document.set_id(problematic)
message.add_backref(prbid)
return problematic
def emphasis(self, match, lineno):
before, inlines, remaining, sysmessages, endstring = self.inline_obj(
match, lineno, self.patterns.emphasis, nodes.emphasis)
return before, inlines, remaining, sysmessages
def strong(self, match, lineno):
before, inlines, remaining, sysmessages, endstring = self.inline_obj(
match, lineno, self.patterns.strong, nodes.strong)
return before, inlines, remaining, sysmessages
def interpreted_or_phrase_ref(self, match, lineno):
end_pattern = self.patterns.interpreted_or_phrase_ref
string = match.string
matchstart = match.start('backquote')
matchend = match.end('backquote')
rolestart = match.start('role')
role = match.group('role')
position = ''
if role:
role = role[1:-1]
position = 'prefix'
elif self.quoted_start(match):
return (string[:matchend], [], string[matchend:], [])
endmatch = end_pattern.search(string[matchend:])
if endmatch and endmatch.start(1): # 1 or more chars
textend = matchend + endmatch.end()
if endmatch.group('role'):
if role:
msg = self.reporter.warning(
'Multiple roles in interpreted text (both '
'prefix and suffix present; only one allowed).',
line=lineno)
text = unescape(string[rolestart:textend], 1)
prb = self.problematic(text, text, msg)
return string[:rolestart], [prb], string[textend:], [msg]
role = endmatch.group('suffix')[1:-1]
position = 'suffix'
escaped = endmatch.string[:endmatch.start(1)]
rawsource = unescape(string[matchstart:textend], 1)
if rawsource[-1:] == '_':
if role:
msg = self.reporter.warning(
'Mismatch: both interpreted text role %s and '
'reference suffix.' % position, line=lineno)
text = unescape(string[rolestart:textend], 1)
prb = self.problematic(text, text, msg)
return string[:rolestart], [prb], string[textend:], [msg]
return self.phrase_ref(string[:matchstart], string[textend:],
rawsource, escaped, unescape(escaped))
else:
rawsource = unescape(string[rolestart:textend], 1)
nodelist, messages = self.interpreted(rawsource, escaped, role,
lineno)
return (string[:rolestart], nodelist,
string[textend:], messages)
msg = self.reporter.warning(
'Inline interpreted text or phrase reference start-string '
'without end-string.', line=lineno)
text = unescape(string[matchstart:matchend], 1)
prb = self.problematic(text, text, msg)
return string[:matchstart], [prb], string[matchend:], [msg]
def phrase_ref(self, before, after, rawsource, escaped, text):
match = self.patterns.embedded_uri.search(escaped)
if match:
text = unescape(escaped[:match.start(0)])
uri_text = match.group(2)
uri = ''.join(uri_text.split())
uri = self.adjust_uri(uri)
if uri:
target = nodes.target(match.group(1), refuri=uri)
else:
raise ApplicationError('problem with URI: %r' % uri_text)
if not text:
text = uri
else:
target = None
refname = normalize_name(text)
reference = nodes.reference(rawsource, text,
name=whitespace_normalize_name(text))
node_list = [reference]
if rawsource[-2:] == '__':
if target:
reference['refuri'] = uri
else:
reference['anonymous'] = 1
else:
if target:
reference['refuri'] = uri
target['names'].append(refname)
self.document.note_explicit_target(target, self.parent)
node_list.append(target)
else:
reference['refname'] = refname
self.document.note_refname(reference)
return before, node_list, after, []
def adjust_uri(self, uri):
match = self.patterns.email.match(uri)
if match:
return 'mailto:' + uri
else:
return uri
def interpreted(self, rawsource, text, role, lineno):
role_fn, messages = roles.role(role, self.language, lineno,
self.reporter)
if role_fn:
nodes, messages2 = role_fn(role, rawsource, text, lineno, self)
return nodes, messages + messages2
else:
msg = self.reporter.error(
'Unknown interpreted text role "%s".' % role,
line=lineno)
return ([self.problematic(rawsource, rawsource, msg)],
messages + [msg])
def literal(self, match, lineno):
before, inlines, remaining, sysmessages, endstring = self.inline_obj(
match, lineno, self.patterns.literal, nodes.literal,
restore_backslashes=1)
return before, inlines, remaining, sysmessages
def inline_internal_target(self, match, lineno):
before, inlines, remaining, sysmessages, endstring = self.inline_obj(
match, lineno, self.patterns.target, nodes.target)
if inlines and isinstance(inlines[0], nodes.target):
assert len(inlines) == 1
target = inlines[0]
name = normalize_name(target.astext())
target['names'].append(name)
self.document.note_explicit_target(target, self.parent)
return before, inlines, remaining, sysmessages
def substitution_reference(self, match, lineno):
before, inlines, remaining, sysmessages, endstring = self.inline_obj(
match, lineno, self.patterns.substitution_ref,
nodes.substitution_reference)
if len(inlines) == 1:
subref_node = inlines[0]
if isinstance(subref_node, nodes.substitution_reference):
subref_text = subref_node.astext()
self.document.note_substitution_ref(subref_node, subref_text)
if endstring[-1:] == '_':
reference_node = nodes.reference(
'|%s%s' % (subref_text, endstring), '')
if endstring[-2:] == '__':
reference_node['anonymous'] = 1
else:
reference_node['refname'] = normalize_name(subref_text)
self.document.note_refname(reference_node)
reference_node += subref_node
inlines = [reference_node]
return before, inlines, remaining, sysmessages
def footnote_reference(self, match, lineno):
"""
Handles `nodes.footnote_reference` and `nodes.citation_reference`
elements.
"""
label = match.group('footnotelabel')
refname = normalize_name(label)
string = match.string
before = string[:match.start('whole')]
remaining = string[match.end('whole'):]
if match.group('citationlabel'):
refnode = nodes.citation_reference('[%s]_' % label,
refname=refname)
refnode += nodes.Text(label)
self.document.note_citation_ref(refnode)
else:
refnode = nodes.footnote_reference('[%s]_' % label)
if refname[0] == '#':
refname = refname[1:]
refnode['auto'] = 1
self.document.note_autofootnote_ref(refnode)
elif refname == '*':
refname = ''
refnode['auto'] = '*'
self.document.note_symbol_footnote_ref(
refnode)
else:
refnode += nodes.Text(label)
if refname:
refnode['refname'] = refname
self.document.note_footnote_ref(refnode)
if utils.get_trim_footnote_ref_space(self.document.settings):
before = before.rstrip()
return (before, [refnode], remaining, [])
def reference(self, match, lineno, anonymous=None):
referencename = match.group('refname')
refname = normalize_name(referencename)
referencenode = nodes.reference(
referencename + match.group('refend'), referencename,
name=whitespace_normalize_name(referencename))
if anonymous:
referencenode['anonymous'] = 1
else:
referencenode['refname'] = refname
self.document.note_refname(referencenode)
string = match.string
matchstart = match.start('whole')
matchend = match.end('whole')
return (string[:matchstart], [referencenode], string[matchend:], [])
def anonymous_reference(self, match, lineno):
return self.reference(match, lineno, anonymous=1)
def standalone_uri(self, match, lineno):
if (not match.group('scheme')
or match.group('scheme').lower() in urischemes.schemes):
if match.group('email'):
addscheme = 'mailto:'
else:
addscheme = ''
text = match.group('whole')
unescaped = unescape(text, 0)
return [nodes.reference(unescape(text, 1), unescaped,
refuri=addscheme + unescaped)]
else: # not a valid scheme
raise MarkupMismatch
def pep_reference(self, match, lineno):
text = match.group(0)
if text.startswith('pep-'):
pepnum = int(match.group('pepnum1'))
elif text.startswith('PEP'):
pepnum = int(match.group('pepnum2'))
else:
raise MarkupMismatch
ref = (self.document.settings.pep_base_url
+ self.document.settings.pep_file_url_template % pepnum)
unescaped = unescape(text, 0)
return [nodes.reference(unescape(text, 1), unescaped, refuri=ref)]
rfc_url = 'rfc%d.html'
def rfc_reference(self, match, lineno):
text = match.group(0)
if text.startswith('RFC'):
rfcnum = int(match.group('rfcnum'))
ref = self.document.settings.rfc_base_url + self.rfc_url % rfcnum
else:
raise MarkupMismatch
unescaped = unescape(text, 0)
return [nodes.reference(unescape(text, 1), unescaped, refuri=ref)]
def implicit_inline(self, text, lineno):
"""
Check each of the patterns in `self.implicit_dispatch` for a match,
and dispatch to the stored method for the pattern. Recursively check
the text before and after the match. Return a list of `nodes.Text`
and inline element nodes.
"""
if not text:
return []
for pattern, method in self.implicit_dispatch:
match = pattern.search(text)
if match:
try:
# Must recurse on strings before *and* after the match;
# there may be multiple patterns.
return (self.implicit_inline(text[:match.start()], lineno)
+ method(match, lineno) +
self.implicit_inline(text[match.end():], lineno))
except MarkupMismatch:
pass
return [nodes.Text(unescape(text), rawsource=unescape(text, 1))]
dispatch = {'*': emphasis,
'**': strong,
'`': interpreted_or_phrase_ref,
'``': literal,
'_`': inline_internal_target,
']_': footnote_reference,
'|': substitution_reference,
'_': reference,
'__': anonymous_reference}
def _loweralpha_to_int(s, _zero=(ord('a')-1)):
return ord(s) - _zero
def _upperalpha_to_int(s, _zero=(ord('A')-1)):
return ord(s) - _zero
def _lowerroman_to_int(s):
return roman.fromRoman(s.upper())
class Body(RSTState):
"""
Generic classifier of the first line of a block.
"""
double_width_pad_char = tableparser.TableParser.double_width_pad_char
"""Padding character for East Asian double-width text."""
enum = Struct()
"""Enumerated list parsing information."""
enum.formatinfo = {
'parens': Struct(prefix='(', suffix=')', start=1, end=-1),
'rparen': Struct(prefix='', suffix=')', start=0, end=-1),
'period': Struct(prefix='', suffix='.', start=0, end=-1)}
enum.formats = enum.formatinfo.keys()
enum.sequences = ['arabic', 'loweralpha', 'upperalpha',
'lowerroman', 'upperroman'] # ORDERED!
enum.sequencepats = {'arabic': '[0-9]+',
'loweralpha': '[a-z]',
'upperalpha': '[A-Z]',
'lowerroman': '[ivxlcdm]+',
'upperroman': '[IVXLCDM]+',}
enum.converters = {'arabic': int,
'loweralpha': _loweralpha_to_int,
'upperalpha': _upperalpha_to_int,
'lowerroman': _lowerroman_to_int,
'upperroman': roman.fromRoman}
enum.sequenceregexps = {}
for sequence in enum.sequences:
enum.sequenceregexps[sequence] = re.compile(
enum.sequencepats[sequence] + '$')
grid_table_top_pat = re.compile(r'\+-[-+]+-\+ *$')
"""Matches the top (& bottom) of a full table)."""
simple_table_top_pat = re.compile('=+( +=+)+ *$')
"""Matches the top of a simple table."""
simple_table_border_pat = re.compile('=+[ =]*$')
"""Matches the bottom & header bottom of a simple table."""
pats = {}
"""Fragments of patterns used by transitions."""
pats['nonalphanum7bit'] = '[!-/:-@[-`{-~]'
pats['alpha'] = '[a-zA-Z]'
pats['alphanum'] = '[a-zA-Z0-9]'
pats['alphanumplus'] = '[a-zA-Z0-9_-]'
pats['enum'] = ('(%(arabic)s|%(loweralpha)s|%(upperalpha)s|%(lowerroman)s'
'|%(upperroman)s|#)' % enum.sequencepats)
pats['optname'] = '%(alphanum)s%(alphanumplus)s*' % pats
# @@@ Loosen up the pattern? Allow Unicode?
pats['optarg'] = '(%(alpha)s%(alphanumplus)s*|<[^<>]+>)' % pats
pats['shortopt'] = r'(-|\+)%(alphanum)s( ?%(optarg)s)?' % pats
pats['longopt'] = r'(--|/)%(optname)s([ =]%(optarg)s)?' % pats
pats['option'] = r'(%(shortopt)s|%(longopt)s)' % pats
for format in enum.formats:
pats[format] = '(?P<%s>%s%s%s)' % (
format, re.escape(enum.formatinfo[format].prefix),
pats['enum'], re.escape(enum.formatinfo[format].suffix))
patterns = {
'bullet': u'[-+*\u2022\u2023\u2043]( +|$)',
'enumerator': r'(%(parens)s|%(rparen)s|%(period)s)( +|$)' % pats,
'field_marker': r':(?![: ])([^:\\]|\\.)*(?<! ):( +|$)',
'option_marker': r'%(option)s(, %(option)s)*( +| ?$)' % pats,
'doctest': r'>>>( +|$)',
'line_block': r'\|( +|$)',
'grid_table_top': grid_table_top_pat,
'simple_table_top': simple_table_top_pat,
'explicit_markup': r'\.\.( +|$)',
'anonymous': r'__( +|$)',
'line': r'(%(nonalphanum7bit)s)\1* *$' % pats,
'text': r''}
initial_transitions = (
'bullet',
'enumerator',
'field_marker',
'option_marker',
'doctest',
'line_block',
'grid_table_top',
'simple_table_top',
'explicit_markup',
'anonymous',
'line',
'text')
def indent(self, match, context, next_state):
"""Block quote."""
indented, indent, line_offset, blank_finish = \
self.state_machine.get_indented()
elements = self.block_quote(indented, line_offset)
self.parent += elements
if not blank_finish:
self.parent += self.unindent_warning('Block quote')
return context, next_state, []
def block_quote(self, indented, line_offset):
elements = []
while indented:
(blockquote_lines,
attribution_lines,
attribution_offset,
indented,
new_line_offset) = self.split_attribution(indented, line_offset)
blockquote = nodes.block_quote()
self.nested_parse(blockquote_lines, line_offset, blockquote)
elements.append(blockquote)
if attribution_lines:
attribution, messages = self.parse_attribution(
attribution_lines, attribution_offset)
blockquote += attribution
elements += messages
line_offset = new_line_offset
while indented and not indented[0]:
indented = indented[1:]
line_offset += 1
return elements
# U+2014 is an em-dash:
attribution_pattern = re.compile(u'(---?(?!-)|\u2014) *(?=[^ \\n])')
def split_attribution(self, indented, line_offset):
"""
Check for a block quote attribution and split it off:
* First line after a blank line must begin with a dash ("--", "---",
em-dash; matches `self.attribution_pattern`).
* Every line after that must have consistent indentation.
* Attributions must be preceded by block quote content.
Return a tuple of: (block quote content lines, content offset,
attribution lines, attribution offset, remaining indented lines).
"""
blank = None
nonblank_seen = False
for i in range(len(indented)):
line = indented[i].rstrip()
if line:
if nonblank_seen and blank == i - 1: # last line blank
match = self.attribution_pattern.match(line)
if match:
attribution_end, indent = self.check_attribution(
indented, i)
if attribution_end:
a_lines = indented[i:attribution_end]
a_lines.trim_left(match.end(), end=1)
a_lines.trim_left(indent, start=1)
return (indented[:i], a_lines,
i, indented[attribution_end:],
line_offset + attribution_end)
nonblank_seen = True
else:
blank = i
else:
return (indented, None, None, None, None)
def check_attribution(self, indented, attribution_start):
"""
Check attribution shape.
Return the index past the end of the attribution, and the indent.
"""
indent = None
i = attribution_start + 1
for i in range(attribution_start + 1, len(indented)):
line = indented[i].rstrip()
if not line:
break
if indent is None:
indent = len(line) - len(line.lstrip())
elif len(line) - len(line.lstrip()) != indent:
return None, None # bad shape; not an attribution
else:
# return index of line after last attribution line:
i += 1
return i, (indent or 0)
def parse_attribution(self, indented, line_offset):
text = '\n'.join(indented).rstrip()
lineno = self.state_machine.abs_line_number() + line_offset
textnodes, messages = self.inline_text(text, lineno)
node = nodes.attribution(text, '', *textnodes)
node.line = lineno
# report with source and source-line results in
# ``IndexError: list index out of range``
# node.source, node.line = self.state_machine.get_source_and_line(lineno)
return node, messages
def bullet(self, match, context, next_state):
"""Bullet list item."""
bulletlist = nodes.bullet_list()
self.parent += bulletlist
bulletlist['bullet'] = match.string[0]
i, blank_finish = self.list_item(match.end())
bulletlist += i
offset = self.state_machine.line_offset + 1 # next line
new_line_offset, blank_finish = self.nested_list_parse(
self.state_machine.input_lines[offset:],
input_offset=self.state_machine.abs_line_offset() + 1,
node=bulletlist, initial_state='BulletList',
blank_finish=blank_finish)
self.goto_line(new_line_offset)
if not blank_finish:
self.parent += self.unindent_warning('Bullet list')
return [], next_state, []
def list_item(self, indent):
if self.state_machine.line[indent:]:
indented, line_offset, blank_finish = (
self.state_machine.get_known_indented(indent))
else:
indented, indent, line_offset, blank_finish = (
self.state_machine.get_first_known_indented(indent))
listitem = nodes.list_item('\n'.join(indented))
if indented:
self.nested_parse(indented, input_offset=line_offset,
node=listitem)
return listitem, blank_finish
def enumerator(self, match, context, next_state):
"""Enumerated List Item"""
format, sequence, text, ordinal = self.parse_enumerator(match)
if not self.is_enumerated_list_item(ordinal, sequence, format):
raise statemachine.TransitionCorrection('text')
enumlist = nodes.enumerated_list()
self.parent += enumlist
if sequence == '#':
enumlist['enumtype'] = 'arabic'
else:
enumlist['enumtype'] = sequence
enumlist['prefix'] = self.enum.formatinfo[format].prefix
enumlist['suffix'] = self.enum.formatinfo[format].suffix
if ordinal != 1:
enumlist['start'] = ordinal
src, srcline = self.state_machine.get_source_and_line()
msg = self.reporter.info(
'Enumerated list start value not ordinal-1: "%s" (ordinal %s)'
% (text, ordinal), source=src, line=srcline)
self.parent += msg
listitem, blank_finish = self.list_item(match.end())
enumlist += listitem
offset = self.state_machine.line_offset + 1 # next line
newline_offset, blank_finish = self.nested_list_parse(
self.state_machine.input_lines[offset:],
input_offset=self.state_machine.abs_line_offset() + 1,
node=enumlist, initial_state='EnumeratedList',
blank_finish=blank_finish,
extra_settings={'lastordinal': ordinal,
'format': format,
'auto': sequence == '#'})
self.goto_line(newline_offset)
if not blank_finish:
self.parent += self.unindent_warning('Enumerated list')
return [], next_state, []
def parse_enumerator(self, match, expected_sequence=None):
"""
Analyze an enumerator and return the results.
:Return:
- the enumerator format ('period', 'parens', or 'rparen'),
- the sequence used ('arabic', 'loweralpha', 'upperroman', etc.),
- the text of the enumerator, stripped of formatting, and
- the ordinal value of the enumerator ('a' -> 1, 'ii' -> 2, etc.;
``None`` is returned for invalid enumerator text).
The enumerator format has already been determined by the regular
expression match. If `expected_sequence` is given, that sequence is
tried first. If not, we check for Roman numeral 1. This way,
single-character Roman numerals (which are also alphabetical) can be
matched. If no sequence has been matched, all sequences are checked in
order.
"""
groupdict = match.groupdict()
sequence = ''
for format in self.enum.formats:
if groupdict[format]: # was this the format matched?
break # yes; keep `format`
else: # shouldn't happen
raise ParserError('enumerator format not matched')
text = groupdict[format][self.enum.formatinfo[format].start
:self.enum.formatinfo[format].end]
if text == '#':
sequence = '#'
elif expected_sequence:
try:
if self.enum.sequenceregexps[expected_sequence].match(text):
sequence = expected_sequence
except KeyError: # shouldn't happen
raise ParserError('unknown enumerator sequence: %s'
% sequence)
elif text == 'i':
sequence = 'lowerroman'
elif text == 'I':
sequence = 'upperroman'
if not sequence:
for sequence in self.enum.sequences:
if self.enum.sequenceregexps[sequence].match(text):
break
else: # shouldn't happen
raise ParserError('enumerator sequence not matched')
if sequence == '#':
ordinal = 1
else:
try:
ordinal = self.enum.converters[sequence](text)
except roman.InvalidRomanNumeralError:
ordinal = None
return format, sequence, text, ordinal
def is_enumerated_list_item(self, ordinal, sequence, format):
"""
Check validity based on the ordinal value and the second line.
Return true if the ordinal is valid and the second line is blank,
indented, or starts with the next enumerator or an auto-enumerator.
"""
if ordinal is None:
return None
try:
next_line = self.state_machine.next_line()
except EOFError: # end of input lines
self.state_machine.previous_line()
return 1
else:
self.state_machine.previous_line()
if not next_line[:1].strip(): # blank or indented
return 1
result = self.make_enumerator(ordinal + 1, sequence, format)
if result:
next_enumerator, auto_enumerator = result
try:
if ( next_line.startswith(next_enumerator) or
next_line.startswith(auto_enumerator) ):
return 1
except TypeError:
pass
return None
def make_enumerator(self, ordinal, sequence, format):
"""
Construct and return the next enumerated list item marker, and an
auto-enumerator ("#" instead of the regular enumerator).
Return ``None`` for invalid (out of range) ordinals.
""" #"
if sequence == '#':
enumerator = '#'
elif sequence == 'arabic':
enumerator = str(ordinal)
else:
if sequence.endswith('alpha'):
if ordinal > 26:
return None
enumerator = chr(ordinal + ord('a') - 1)
elif sequence.endswith('roman'):
try:
enumerator = roman.toRoman(ordinal)
except roman.RomanError:
return None
else: # shouldn't happen
raise ParserError('unknown enumerator sequence: "%s"'
% sequence)
if sequence.startswith('lower'):
enumerator = enumerator.lower()
elif sequence.startswith('upper'):
enumerator = enumerator.upper()
else: # shouldn't happen
raise ParserError('unknown enumerator sequence: "%s"'
% sequence)
formatinfo = self.enum.formatinfo[format]
next_enumerator = (formatinfo.prefix + enumerator + formatinfo.suffix
+ ' ')
auto_enumerator = formatinfo.prefix + '#' + formatinfo.suffix + ' '
return next_enumerator, auto_enumerator
def field_marker(self, match, context, next_state):
"""Field list item."""
field_list = nodes.field_list()
self.parent += field_list
field, blank_finish = self.field(match)
field_list += field
offset = self.state_machine.line_offset + 1 # next line
newline_offset, blank_finish = self.nested_list_parse(
self.state_machine.input_lines[offset:],
input_offset=self.state_machine.abs_line_offset() + 1,
node=field_list, initial_state='FieldList',
blank_finish=blank_finish)
self.goto_line(newline_offset)
if not blank_finish:
self.parent += self.unindent_warning('Field list')
return [], next_state, []
def field(self, match):
name = self.parse_field_marker(match)
src, srcline = self.state_machine.get_source_and_line()
lineno = self.state_machine.abs_line_number()
indented, indent, line_offset, blank_finish = \
self.state_machine.get_first_known_indented(match.end())
field_node = nodes.field()
field_node.source = src
field_node.line = srcline
name_nodes, name_messages = self.inline_text(name, lineno)
field_node += nodes.field_name(name, '', *name_nodes)
field_body = nodes.field_body('\n'.join(indented), *name_messages)
field_node += field_body
if indented:
self.parse_field_body(indented, line_offset, field_body)
return field_node, blank_finish
def parse_field_marker(self, match):
"""Extract & return field name from a field marker match."""
field = match.group()[1:] # strip off leading ':'
field = field[:field.rfind(':')] # strip off trailing ':' etc.
return field
def parse_field_body(self, indented, offset, node):
self.nested_parse(indented, input_offset=offset, node=node)
def option_marker(self, match, context, next_state):
"""Option list item."""
optionlist = nodes.option_list()
try:
listitem, blank_finish = self.option_list_item(match)
except MarkupError, error:
# This shouldn't happen; pattern won't match.
src, srcline = self.state_machine.get_source_and_line()
msg = self.reporter.error(u'Invalid option list marker: %s' %
error, source=src, line=srcline)
self.parent += msg
indented, indent, line_offset, blank_finish = \
self.state_machine.get_first_known_indented(match.end())
elements = self.block_quote(indented, line_offset)
self.parent += elements
if not blank_finish:
self.parent += self.unindent_warning('Option list')
return [], next_state, []
self.parent += optionlist
optionlist += listitem
offset = self.state_machine.line_offset + 1 # next line
newline_offset, blank_finish = self.nested_list_parse(
self.state_machine.input_lines[offset:],
input_offset=self.state_machine.abs_line_offset() + 1,
node=optionlist, initial_state='OptionList',
blank_finish=blank_finish)
self.goto_line(newline_offset)
if not blank_finish:
self.parent += self.unindent_warning('Option list')
return [], next_state, []
def option_list_item(self, match):
offset = self.state_machine.abs_line_offset()
options = self.parse_option_marker(match)
indented, indent, line_offset, blank_finish = \
self.state_machine.get_first_known_indented(match.end())
if not indented: # not an option list item
self.goto_line(offset)
raise statemachine.TransitionCorrection('text')
option_group = nodes.option_group('', *options)
description = nodes.description('\n'.join(indented))
option_list_item = nodes.option_list_item('', option_group,
description)
if indented:
self.nested_parse(indented, input_offset=line_offset,
node=description)
return option_list_item, blank_finish
def parse_option_marker(self, match):
"""
Return a list of `node.option` and `node.option_argument` objects,
parsed from an option marker match.
:Exception: `MarkupError` for invalid option markers.
"""
optlist = []
optionstrings = match.group().rstrip().split(', ')
for optionstring in optionstrings:
tokens = optionstring.split()
delimiter = ' '
firstopt = tokens[0].split('=')
if len(firstopt) > 1:
# "--opt=value" form
tokens[:1] = firstopt
delimiter = '='
elif (len(tokens[0]) > 2
and ((tokens[0].startswith('-')
and not tokens[0].startswith('--'))
or tokens[0].startswith('+'))):
# "-ovalue" form
tokens[:1] = [tokens[0][:2], tokens[0][2:]]
delimiter = ''
if len(tokens) > 1 and (tokens[1].startswith('<')
and tokens[-1].endswith('>')):
# "-o <value1 value2>" form; join all values into one token
tokens[1:] = [' '.join(tokens[1:])]
if 0 < len(tokens) <= 2:
option = nodes.option(optionstring)
option += nodes.option_string(tokens[0], tokens[0])
if len(tokens) > 1:
option += nodes.option_argument(tokens[1], tokens[1],
delimiter=delimiter)
optlist.append(option)
else:
raise MarkupError(
'wrong number of option tokens (=%s), should be 1 or 2: '
'"%s"' % (len(tokens), optionstring))
return optlist
def doctest(self, match, context, next_state):
data = '\n'.join(self.state_machine.get_text_block())
self.parent += nodes.doctest_block(data, data)
return [], next_state, []
def line_block(self, match, context, next_state):
"""First line of a line block."""
block = nodes.line_block()
self.parent += block
lineno = self.state_machine.abs_line_number()
line, messages, blank_finish = self.line_block_line(match, lineno)
block += line
self.parent += messages
if not blank_finish:
offset = self.state_machine.line_offset + 1 # next line
new_line_offset, blank_finish = self.nested_list_parse(
self.state_machine.input_lines[offset:],
input_offset=self.state_machine.abs_line_offset() + 1,
node=block, initial_state='LineBlock',
blank_finish=0)
self.goto_line(new_line_offset)
if not blank_finish:
src, srcline = self.state_machine.get_source_and_line()
self.parent += self.reporter.warning(
'Line block ends without a blank line.',
source=src, line=srcline+1)
if len(block):
if block[0].indent is None:
block[0].indent = 0
self.nest_line_block_lines(block)
return [], next_state, []
def line_block_line(self, match, lineno):
"""Return one line element of a line_block."""
indented, indent, line_offset, blank_finish = \
self.state_machine.get_first_known_indented(match.end(),
until_blank=1)
text = u'\n'.join(indented)
text_nodes, messages = self.inline_text(text, lineno)
line = nodes.line(text, '', *text_nodes)
if match.string.rstrip() != '|': # not empty
line.indent = len(match.group(1)) - 1
return line, messages, blank_finish
def nest_line_block_lines(self, block):
for index in range(1, len(block)):
if block[index].indent is None:
block[index].indent = block[index - 1].indent
self.nest_line_block_segment(block)
def nest_line_block_segment(self, block):
indents = [item.indent for item in block]
least = min(indents)
new_items = []
new_block = nodes.line_block()
for item in block:
if item.indent > least:
new_block.append(item)
else:
if len(new_block):
self.nest_line_block_segment(new_block)
new_items.append(new_block)
new_block = nodes.line_block()
new_items.append(item)
if len(new_block):
self.nest_line_block_segment(new_block)
new_items.append(new_block)
block[:] = new_items
def grid_table_top(self, match, context, next_state):
"""Top border of a full table."""
return self.table_top(match, context, next_state,
self.isolate_grid_table,
tableparser.GridTableParser)
def simple_table_top(self, match, context, next_state):
"""Top border of a simple table."""
return self.table_top(match, context, next_state,
self.isolate_simple_table,
tableparser.SimpleTableParser)
def table_top(self, match, context, next_state,
isolate_function, parser_class):
"""Top border of a generic table."""
nodelist, blank_finish = self.table(isolate_function, parser_class)
self.parent += nodelist
if not blank_finish:
src, srcline = self.state_machine.get_source_and_line()
msg = self.reporter.warning(
'Blank line required after table.',
source=src, line=srcline+1)
self.parent += msg
return [], next_state, []
def table(self, isolate_function, parser_class):
"""Parse a table."""
block, messages, blank_finish = isolate_function()
if block:
try:
parser = parser_class()
tabledata = parser.parse(block)
tableline = (self.state_machine.abs_line_number() - len(block)
+ 1)
table = self.build_table(tabledata, tableline)
nodelist = [table] + messages
except tableparser.TableMarkupError, detail:
nodelist = self.malformed_table(
block, ' '.join(detail.args)) + messages
else:
nodelist = messages
return nodelist, blank_finish
def isolate_grid_table(self):
messages = []
blank_finish = 1
try:
block = self.state_machine.get_text_block(flush_left=1)
except statemachine.UnexpectedIndentationError, instance:
block, src, srcline = instance.args
messages.append(self.reporter.error('Unexpected indentation.',
source=src, line=srcline))
blank_finish = 0
block.disconnect()
# for East Asian chars:
block.pad_double_width(self.double_width_pad_char)
width = len(block[0].strip())
for i in range(len(block)):
block[i] = block[i].strip()
if block[i][0] not in '+|': # check left edge
blank_finish = 0
self.state_machine.previous_line(len(block) - i)
del block[i:]
break
if not self.grid_table_top_pat.match(block[-1]): # find bottom
blank_finish = 0
# from second-last to third line of table:
for i in range(len(block) - 2, 1, -1):
if self.grid_table_top_pat.match(block[i]):
self.state_machine.previous_line(len(block) - i + 1)
del block[i+1:]
break
else:
messages.extend(self.malformed_table(block))
return [], messages, blank_finish
for i in range(len(block)): # check right edge
if len(block[i]) != width or block[i][-1] not in '+|':
messages.extend(self.malformed_table(block))
return [], messages, blank_finish
return block, messages, blank_finish
def isolate_simple_table(self):
start = self.state_machine.line_offset
lines = self.state_machine.input_lines
limit = len(lines) - 1
toplen = len(lines[start].strip())
pattern_match = self.simple_table_border_pat.match
found = 0
found_at = None
i = start + 1
while i <= limit:
line = lines[i]
match = pattern_match(line)
if match:
if len(line.strip()) != toplen:
self.state_machine.next_line(i - start)
messages = self.malformed_table(
lines[start:i+1], 'Bottom/header table border does '
'not match top border.')
return [], messages, i == limit or not lines[i+1].strip()
found += 1
found_at = i
if found == 2 or i == limit or not lines[i+1].strip():
end = i
break
i += 1
else: # reached end of input_lines
if found:
extra = ' or no blank line after table bottom'
self.state_machine.next_line(found_at - start)
block = lines[start:found_at+1]
else:
extra = ''
self.state_machine.next_line(i - start - 1)
block = lines[start:]
messages = self.malformed_table(
block, 'No bottom table border found%s.' % extra)
return [], messages, not extra
self.state_machine.next_line(end - start)
block = lines[start:end+1]
# for East Asian chars:
block.pad_double_width(self.double_width_pad_char)
return block, [], end == limit or not lines[end+1].strip()
def malformed_table(self, block, detail=''):
block.replace(self.double_width_pad_char, '')
data = '\n'.join(block)
message = 'Malformed table.'
startline = self.state_machine.abs_line_number() - len(block) + 1
src, srcline = self.state_machine.get_source_and_line(startline)
if detail:
message += '\n' + detail
error = self.reporter.error(message, nodes.literal_block(data, data),
source=src, line=srcline)
return [error]
def build_table(self, tabledata, tableline, stub_columns=0):
colwidths, headrows, bodyrows = tabledata
table = nodes.table()
tgroup = nodes.tgroup(cols=len(colwidths))
table += tgroup
for colwidth in colwidths:
colspec = nodes.colspec(colwidth=colwidth)
if stub_columns:
colspec.attributes['stub'] = 1
stub_columns -= 1
tgroup += colspec
if headrows:
thead = nodes.thead()
tgroup += thead
for row in headrows:
thead += self.build_table_row(row, tableline)
tbody = nodes.tbody()
tgroup += tbody
for row in bodyrows:
tbody += self.build_table_row(row, tableline)
return table
def build_table_row(self, rowdata, tableline):
row = nodes.row()
for cell in rowdata:
if cell is None:
continue
morerows, morecols, offset, cellblock = cell
attributes = {}
if morerows:
attributes['morerows'] = morerows
if morecols:
attributes['morecols'] = morecols
entry = nodes.entry(**attributes)
row += entry
if ''.join(cellblock):
self.nested_parse(cellblock, input_offset=tableline+offset,
node=entry)
return row
explicit = Struct()
"""Patterns and constants used for explicit markup recognition."""
explicit.patterns = Struct(
target=re.compile(r"""
(
_ # anonymous target
| # *OR*
(?!_) # no underscore at the beginning
(?P<quote>`?) # optional open quote
(?![ `]) # first char. not space or
# backquote
(?P<name> # reference name
.+?
)
%(non_whitespace_escape_before)s
(?P=quote) # close quote if open quote used
)
(?<!(?<!\x00):) # no unescaped colon at end
%(non_whitespace_escape_before)s
[ ]? # optional space
: # end of reference name
([ ]+|$) # followed by whitespace
""" % vars(Inliner), re.VERBOSE),
reference=re.compile(r"""
(
(?P<simple>%(simplename)s)_
| # *OR*
` # open backquote
(?![ ]) # not space
(?P<phrase>.+?) # hyperlink phrase
%(non_whitespace_escape_before)s
`_ # close backquote,
# reference mark
)
$ # end of string
""" % vars(Inliner), re.VERBOSE | re.UNICODE),
substitution=re.compile(r"""
(
(?![ ]) # first char. not space
(?P<name>.+?) # substitution text
%(non_whitespace_escape_before)s
\| # close delimiter
)
([ ]+|$) # followed by whitespace
""" % vars(Inliner), re.VERBOSE),)
def footnote(self, match):
src, srcline = self.state_machine.get_source_and_line()
indented, indent, offset, blank_finish = \
self.state_machine.get_first_known_indented(match.end())
label = match.group(1)
name = normalize_name(label)
footnote = nodes.footnote('\n'.join(indented))
footnote.source = src
footnote.line = srcline
if name[0] == '#': # auto-numbered
name = name[1:] # autonumber label
footnote['auto'] = 1
if name:
footnote['names'].append(name)
self.document.note_autofootnote(footnote)
elif name == '*': # auto-symbol
name = ''
footnote['auto'] = '*'
self.document.note_symbol_footnote(footnote)
else: # manually numbered
footnote += nodes.label('', label)
footnote['names'].append(name)
self.document.note_footnote(footnote)
if name:
self.document.note_explicit_target(footnote, footnote)
else:
self.document.set_id(footnote, footnote)
if indented:
self.nested_parse(indented, input_offset=offset, node=footnote)
return [footnote], blank_finish
def citation(self, match):
src, srcline = self.state_machine.get_source_and_line()
indented, indent, offset, blank_finish = \
self.state_machine.get_first_known_indented(match.end())
label = match.group(1)
name = normalize_name(label)
citation = nodes.citation('\n'.join(indented))
citation.source = src
citation.line = srcline
citation += nodes.label('', label)
citation['names'].append(name)
self.document.note_citation(citation)
self.document.note_explicit_target(citation, citation)
if indented:
self.nested_parse(indented, input_offset=offset, node=citation)
return [citation], blank_finish
def hyperlink_target(self, match):
pattern = self.explicit.patterns.target
lineno = self.state_machine.abs_line_number()
src, srcline = self.state_machine.get_source_and_line()
block, indent, offset, blank_finish = \
self.state_machine.get_first_known_indented(
match.end(), until_blank=1, strip_indent=0)
blocktext = match.string[:match.end()] + '\n'.join(block)
block = [escape2null(line) for line in block]
escaped = block[0]
blockindex = 0
while 1:
targetmatch = pattern.match(escaped)
if targetmatch:
break
blockindex += 1
try:
escaped += block[blockindex]
except IndexError:
raise MarkupError('malformed hyperlink target.')
del block[:blockindex]
block[0] = (block[0] + ' ')[targetmatch.end()-len(escaped)-1:].strip()
target = self.make_target(block, blocktext, lineno,
targetmatch.group('name'))
return [target], blank_finish
def make_target(self, block, block_text, lineno, target_name):
target_type, data = self.parse_target(block, block_text, lineno)
if target_type == 'refname':
target = nodes.target(block_text, '', refname=normalize_name(data))
target.indirect_reference_name = data
self.add_target(target_name, '', target, lineno)
self.document.note_indirect_target(target)
return target
elif target_type == 'refuri':
target = nodes.target(block_text, '')
self.add_target(target_name, data, target, lineno)
return target
else:
return data
def parse_target(self, block, block_text, lineno):
"""
Determine the type of reference of a target.
:Return: A 2-tuple, one of:
- 'refname' and the indirect reference name
- 'refuri' and the URI
- 'malformed' and a system_message node
"""
if block and block[-1].strip()[-1:] == '_': # possible indirect target
reference = ' '.join([line.strip() for line in block])
refname = self.is_reference(reference)
if refname:
return 'refname', refname
reference = ''.join([''.join(line.split()) for line in block])
return 'refuri', unescape(reference)
def is_reference(self, reference):
match = self.explicit.patterns.reference.match(
whitespace_normalize_name(reference))
if not match:
return None
return unescape(match.group('simple') or match.group('phrase'))
def add_target(self, targetname, refuri, target, lineno):
target.line = lineno
if targetname:
name = normalize_name(unescape(targetname))
target['names'].append(name)
if refuri:
uri = self.inliner.adjust_uri(refuri)
if uri:
target['refuri'] = uri
else:
raise ApplicationError('problem with URI: %r' % refuri)
self.document.note_explicit_target(target, self.parent)
else: # anonymous target
if refuri:
target['refuri'] = refuri
target['anonymous'] = 1
self.document.note_anonymous_target(target)
def substitution_def(self, match):
pattern = self.explicit.patterns.substitution
src, srcline = self.state_machine.get_source_and_line()
block, indent, offset, blank_finish = \
self.state_machine.get_first_known_indented(match.end(),
strip_indent=0)
blocktext = (match.string[:match.end()] + '\n'.join(block))
block.disconnect()
escaped = escape2null(block[0].rstrip())
blockindex = 0
while 1:
subdefmatch = pattern.match(escaped)
if subdefmatch:
break
blockindex += 1
try:
escaped = escaped + ' ' + escape2null(block[blockindex].strip())
except IndexError:
raise MarkupError('malformed substitution definition.')
del block[:blockindex] # strip out the substitution marker
block[0] = (block[0].strip() + ' ')[subdefmatch.end()-len(escaped)-1:-1]
if not block[0]:
del block[0]
offset += 1
while block and not block[-1].strip():
block.pop()
subname = subdefmatch.group('name')
substitution_node = nodes.substitution_definition(blocktext)
substitution_node.source = src
substitution_node.line = srcline
if not block:
msg = self.reporter.warning(
'Substitution definition "%s" missing contents.' % subname,
nodes.literal_block(blocktext, blocktext),
source=src, line=srcline)
return [msg], blank_finish
block[0] = block[0].strip()
substitution_node['names'].append(
nodes.whitespace_normalize_name(subname))
new_abs_offset, blank_finish = self.nested_list_parse(
block, input_offset=offset, node=substitution_node,
initial_state='SubstitutionDef', blank_finish=blank_finish)
i = 0
for node in substitution_node[:]:
if not (isinstance(node, nodes.Inline) or
isinstance(node, nodes.Text)):
self.parent += substitution_node[i]
del substitution_node[i]
else:
i += 1
for node in substitution_node.traverse(nodes.Element):
if self.disallowed_inside_substitution_definitions(node):
pformat = nodes.literal_block('', node.pformat().rstrip())
msg = self.reporter.error(
'Substitution definition contains illegal element:',
pformat, nodes.literal_block(blocktext, blocktext),
source=src, line=srcline)
return [msg], blank_finish
if len(substitution_node) == 0:
msg = self.reporter.warning(
'Substitution definition "%s" empty or invalid.' % subname,
nodes.literal_block(blocktext, blocktext),
source=src, line=srcline)
return [msg], blank_finish
self.document.note_substitution_def(
substitution_node, subname, self.parent)
return [substitution_node], blank_finish
def disallowed_inside_substitution_definitions(self, node):
if (node['ids'] or
isinstance(node, nodes.reference) and node.get('anonymous') or
isinstance(node, nodes.footnote_reference) and node.get('auto')):
return 1
else:
return 0
def directive(self, match, **option_presets):
"""Returns a 2-tuple: list of nodes, and a "blank finish" boolean."""
type_name = match.group(1)
directive_class, messages = directives.directive(
type_name, self.memo.language, self.document)
self.parent += messages
if directive_class:
return self.run_directive(
directive_class, match, type_name, option_presets)
else:
return self.unknown_directive(type_name)
def run_directive(self, directive, match, type_name, option_presets):
"""
Parse a directive then run its directive function.
Parameters:
- `directive`: The class implementing the directive. Must be
a subclass of `rst.Directive`.
- `match`: A regular expression match object which matched the first
line of the directive.
- `type_name`: The directive name, as used in the source text.
- `option_presets`: A dictionary of preset options, defaults for the
directive options. Currently, only an "alt" option is passed by
substitution definitions (value: the substitution name), which may
be used by an embedded image directive.
Returns a 2-tuple: list of nodes, and a "blank finish" boolean.
"""
if isinstance(directive, (FunctionType, MethodType)):
from docutils.parsers.rst import convert_directive_function
directive = convert_directive_function(directive)
lineno = self.state_machine.abs_line_number()
src, srcline = self.state_machine.get_source_and_line()
initial_line_offset = self.state_machine.line_offset
indented, indent, line_offset, blank_finish \
= self.state_machine.get_first_known_indented(match.end(),
strip_top=0)
block_text = '\n'.join(self.state_machine.input_lines[
initial_line_offset : self.state_machine.line_offset + 1])
try:
arguments, options, content, content_offset = (
self.parse_directive_block(indented, line_offset,
directive, option_presets))
except MarkupError, detail:
error = self.reporter.error(
'Error in "%s" directive:\n%s.' % (type_name,
' '.join(detail.args)),
nodes.literal_block(block_text, block_text),
source=src, line=srcline)
return [error], blank_finish
directive_instance = directive(
type_name, arguments, options, content, lineno,
content_offset, block_text, self, self.state_machine)
try:
result = directive_instance.run()
except docutils.parsers.rst.DirectiveError, error:
msg_node = self.reporter.system_message(error.level, error.msg,
source=src, line=srcline)
msg_node += nodes.literal_block(block_text, block_text)
result = [msg_node]
assert isinstance(result, list), \
'Directive "%s" must return a list of nodes.' % type_name
for i in range(len(result)):
assert isinstance(result[i], nodes.Node), \
('Directive "%s" returned non-Node object (index %s): %r'
% (type_name, i, result[i]))
return (result,
blank_finish or self.state_machine.is_next_line_blank())
def parse_directive_block(self, indented, line_offset, directive,
option_presets):
option_spec = directive.option_spec
has_content = directive.has_content
if indented and not indented[0].strip():
indented.trim_start()
line_offset += 1
while indented and not indented[-1].strip():
indented.trim_end()
if indented and (directive.required_arguments
or directive.optional_arguments
or option_spec):
for i, line in enumerate(indented):
if not line.strip():
break
else:
i += 1
arg_block = indented[:i]
content = indented[i+1:]
content_offset = line_offset + i + 1
else:
content = indented
content_offset = line_offset
arg_block = []
if option_spec:
options, arg_block = self.parse_directive_options(
option_presets, option_spec, arg_block)
else:
options = {}
if arg_block and not (directive.required_arguments
or directive.optional_arguments):
content = arg_block + indented[i:]
content_offset = line_offset
arg_block = []
while content and not content[0].strip():
content.trim_start()
content_offset += 1
if directive.required_arguments or directive.optional_arguments:
arguments = self.parse_directive_arguments(
directive, arg_block)
else:
arguments = []
if content and not has_content:
raise MarkupError('no content permitted')
return (arguments, options, content, content_offset)
def parse_directive_options(self, option_presets, option_spec, arg_block):
options = option_presets.copy()
for i in range(len(arg_block)):
if arg_block[i][:1] == ':':
opt_block = arg_block[i:]
arg_block = arg_block[:i]
break
else:
opt_block = []
if opt_block:
success, data = self.parse_extension_options(option_spec,
opt_block)
if success: # data is a dict of options
options.update(data)
else: # data is an error string
raise MarkupError(data)
return options, arg_block
def parse_directive_arguments(self, directive, arg_block):
required = directive.required_arguments
optional = directive.optional_arguments
arg_text = '\n'.join(arg_block)
arguments = arg_text.split()
if len(arguments) < required:
raise MarkupError('%s argument(s) required, %s supplied'
% (required, len(arguments)))
elif len(arguments) > required + optional:
if directive.final_argument_whitespace:
arguments = arg_text.split(None, required + optional - 1)
else:
raise MarkupError(
'maximum %s argument(s) allowed, %s supplied'
% (required + optional, len(arguments)))
return arguments
def parse_extension_options(self, option_spec, datalines):
"""
Parse `datalines` for a field list containing extension options
matching `option_spec`.
:Parameters:
- `option_spec`: a mapping of option name to conversion
function, which should raise an exception on bad input.
- `datalines`: a list of input strings.
:Return:
- Success value, 1 or 0.
- An option dictionary on success, an error string on failure.
"""
node = nodes.field_list()
newline_offset, blank_finish = self.nested_list_parse(
datalines, 0, node, initial_state='ExtensionOptions',
blank_finish=1)
if newline_offset != len(datalines): # incomplete parse of block
return 0, 'invalid option block'
try:
options = utils.extract_extension_options(node, option_spec)
except KeyError, detail:
return 0, ('unknown option: "%s"' % detail.args[0])
except (ValueError, TypeError), detail:
return 0, ('invalid option value: %s' % ' '.join(detail.args))
except utils.ExtensionOptionError, detail:
return 0, ('invalid option data: %s' % ' '.join(detail.args))
if blank_finish:
return 1, options
else:
return 0, 'option data incompletely parsed'
def unknown_directive(self, type_name):
src, srcline = self.state_machine.get_source_and_line()
indented, indent, offset, blank_finish = \
self.state_machine.get_first_known_indented(0, strip_indent=0)
text = '\n'.join(indented)
error = self.reporter.error(
'Unknown directive type "%s".' % type_name,
nodes.literal_block(text, text), source=src, line=srcline)
return [error], blank_finish
def comment(self, match):
if not match.string[match.end():].strip() \
and self.state_machine.is_next_line_blank(): # an empty comment?
return [nodes.comment()], 1 # "A tiny but practical wart."
indented, indent, offset, blank_finish = \
self.state_machine.get_first_known_indented(match.end())
while indented and not indented[-1].strip():
indented.trim_end()
text = '\n'.join(indented)
return [nodes.comment(text, text)], blank_finish
explicit.constructs = [
(footnote,
re.compile(r"""
\.\.[ ]+ # explicit markup start
\[
( # footnote label:
[0-9]+ # manually numbered footnote
| # *OR*
\# # anonymous auto-numbered footnote
| # *OR*
\#%s # auto-number ed?) footnote label
| # *OR*
\* # auto-symbol footnote
)
\]
([ ]+|$) # whitespace or end of line
""" % Inliner.simplename, re.VERBOSE | re.UNICODE)),
(citation,
re.compile(r"""
\.\.[ ]+ # explicit markup start
\[(%s)\] # citation label
([ ]+|$) # whitespace or end of line
""" % Inliner.simplename, re.VERBOSE | re.UNICODE)),
(hyperlink_target,
re.compile(r"""
\.\.[ ]+ # explicit markup start
_ # target indicator
(?![ ]|$) # first char. not space or EOL
""", re.VERBOSE)),
(substitution_def,
re.compile(r"""
\.\.[ ]+ # explicit markup start
\| # substitution indicator
(?![ ]|$) # first char. not space or EOL
""", re.VERBOSE)),
(directive,
re.compile(r"""
\.\.[ ]+ # explicit markup start
(%s) # directive name
[ ]? # optional space
:: # directive delimiter
([ ]+|$) # whitespace or end of line
""" % Inliner.simplename, re.VERBOSE | re.UNICODE))]
def explicit_markup(self, match, context, next_state):
"""Footnotes, hyperlink targets, directives, comments."""
nodelist, blank_finish = self.explicit_construct(match)
self.parent += nodelist
self.explicit_list(blank_finish)
return [], next_state, []
def explicit_construct(self, match):
"""Determine which explicit construct this is, parse & return it."""
errors = []
for method, pattern in self.explicit.constructs:
expmatch = pattern.match(match.string)
if expmatch:
try:
return method(self, expmatch)
except MarkupError, error: # never reached?
message = ' '.join(error.args)
src, srcline = self.state_machine.get_source_and_line()
errors.append(self.reporter.warning(
message, source=src, line=srcline))
break
nodelist, blank_finish = self.comment(match)
return nodelist + errors, blank_finish
def explicit_list(self, blank_finish):
"""
Create a nested state machine for a series of explicit markup
constructs (including anonymous hyperlink targets).
"""
offset = self.state_machine.line_offset + 1 # next line
newline_offset, blank_finish = self.nested_list_parse(
self.state_machine.input_lines[offset:],
input_offset=self.state_machine.abs_line_offset() + 1,
node=self.parent, initial_state='Explicit',
blank_finish=blank_finish,
match_titles=self.state_machine.match_titles)
self.goto_line(newline_offset)
if not blank_finish:
self.parent += self.unindent_warning('Explicit markup')
def anonymous(self, match, context, next_state):
"""Anonymous hyperlink targets."""
nodelist, blank_finish = self.anonymous_target(match)
self.parent += nodelist
self.explicit_list(blank_finish)
return [], next_state, []
def anonymous_target(self, match):
lineno = self.state_machine.abs_line_number()
block, indent, offset, blank_finish \
= self.state_machine.get_first_known_indented(match.end(),
until_blank=1)
blocktext = match.string[:match.end()] + '\n'.join(block)
block = [escape2null(line) for line in block]
target = self.make_target(block, blocktext, lineno, '')
return [target], blank_finish
def line(self, match, context, next_state):
"""Section title overline or transition marker."""
if self.state_machine.match_titles:
return [match.string], 'Line', []
elif match.string.strip() == '::':
raise statemachine.TransitionCorrection('text')
elif len(match.string.strip()) < 4:
msg = self.reporter.info(
'Unexpected possible title overline or transition.\n'
"Treating it as ordinary text because it's so short.",
line=self.state_machine.abs_line_number())
self.parent += msg
raise statemachine.TransitionCorrection('text')
else:
blocktext = self.state_machine.line
msg = self.reporter.severe(
'Unexpected section title or transition.',
nodes.literal_block(blocktext, blocktext),
line=self.state_machine.abs_line_number())
self.parent += msg
return [], next_state, []
def text(self, match, context, next_state):
"""Titles, definition lists, paragraphs."""
return [match.string], 'Text', []
class RFC2822Body(Body):
"""
RFC2822 headers are only valid as the first constructs in documents. As
soon as anything else appears, the `Body` state should take over.
"""
patterns = Body.patterns.copy() # can't modify the original
patterns['rfc2822'] = r'[!-9;-~]+:( +|$)'
initial_transitions = [(name, 'Body')
for name in Body.initial_transitions]
initial_transitions.insert(-1, ('rfc2822', 'Body')) # just before 'text'
def rfc2822(self, match, context, next_state):
"""RFC2822-style field list item."""
fieldlist = nodes.field_list(classes=['rfc2822'])
self.parent += fieldlist
field, blank_finish = self.rfc2822_field(match)
fieldlist += field
offset = self.state_machine.line_offset + 1 # next line
newline_offset, blank_finish = self.nested_list_parse(
self.state_machine.input_lines[offset:],
input_offset=self.state_machine.abs_line_offset() + 1,
node=fieldlist, initial_state='RFC2822List',
blank_finish=blank_finish)
self.goto_line(newline_offset)
if not blank_finish:
self.parent += self.unindent_warning(
'RFC2822-style field list')
return [], next_state, []
def rfc2822_field(self, match):
name = match.string[:match.string.find(':')]
indented, indent, line_offset, blank_finish = \
self.state_machine.get_first_known_indented(match.end(),
until_blank=1)
fieldnode = nodes.field()
fieldnode += nodes.field_name(name, name)
fieldbody = nodes.field_body('\n'.join(indented))
fieldnode += fieldbody
if indented:
self.nested_parse(indented, input_offset=line_offset,
node=fieldbody)
return fieldnode, blank_finish
class SpecializedBody(Body):
"""
Superclass for second and subsequent compound element members. Compound
elements are lists and list-like constructs.
All transition methods are disabled (redefined as `invalid_input`).
Override individual methods in subclasses to re-enable.
For example, once an initial bullet list item, say, is recognized, the
`BulletList` subclass takes over, with a "bullet_list" node as its
container. Upon encountering the initial bullet list item, `Body.bullet`
calls its ``self.nested_list_parse`` (`RSTState.nested_list_parse`), which
starts up a nested parsing session with `BulletList` as the initial state.
Only the ``bullet`` transition method is enabled in `BulletList`; as long
as only bullet list items are encountered, they are parsed and inserted
into the container. The first construct which is *not* a bullet list item
triggers the `invalid_input` method, which ends the nested parse and
closes the container. `BulletList` needs to recognize input that is
invalid in the context of a bullet list, which means everything *other
than* bullet list items, so it inherits the transition list created in
`Body`.
"""
def invalid_input(self, match=None, context=None, next_state=None):
"""Not a compound element member. Abort this state machine."""
self.state_machine.previous_line() # back up so parent SM can reassess
raise EOFError
indent = invalid_input
bullet = invalid_input
enumerator = invalid_input
field_marker = invalid_input
option_marker = invalid_input
doctest = invalid_input
line_block = invalid_input
grid_table_top = invalid_input
simple_table_top = invalid_input
explicit_markup = invalid_input
anonymous = invalid_input
line = invalid_input
text = invalid_input
class BulletList(SpecializedBody):
"""Second and subsequent bullet_list list_items."""
def bullet(self, match, context, next_state):
"""Bullet list item."""
if match.string[0] != self.parent['bullet']:
# different bullet: new list
self.invalid_input()
listitem, blank_finish = self.list_item(match.end())
self.parent += listitem
self.blank_finish = blank_finish
return [], next_state, []
class DefinitionList(SpecializedBody):
"""Second and subsequent definition_list_items."""
def text(self, match, context, next_state):
"""Definition lists."""
return [match.string], 'Definition', []
class EnumeratedList(SpecializedBody):
"""Second and subsequent enumerated_list list_items."""
def enumerator(self, match, context, next_state):
"""Enumerated list item."""
format, sequence, text, ordinal = self.parse_enumerator(
match, self.parent['enumtype'])
if ( format != self.format
or (sequence != '#' and (sequence != self.parent['enumtype']
or self.auto
or ordinal != (self.lastordinal + 1)))
or not self.is_enumerated_list_item(ordinal, sequence, format)):
# different enumeration: new list
self.invalid_input()
if sequence == '#':
self.auto = 1
listitem, blank_finish = self.list_item(match.end())
self.parent += listitem
self.blank_finish = blank_finish
self.lastordinal = ordinal
return [], next_state, []
class FieldList(SpecializedBody):
"""Second and subsequent field_list fields."""
def field_marker(self, match, context, next_state):
"""Field list field."""
field, blank_finish = self.field(match)
self.parent += field
self.blank_finish = blank_finish
return [], next_state, []
class OptionList(SpecializedBody):
"""Second and subsequent option_list option_list_items."""
def option_marker(self, match, context, next_state):
"""Option list item."""
try:
option_list_item, blank_finish = self.option_list_item(match)
except MarkupError:
self.invalid_input()
self.parent += option_list_item
self.blank_finish = blank_finish
return [], next_state, []
class RFC2822List(SpecializedBody, RFC2822Body):
"""Second and subsequent RFC2822-style field_list fields."""
patterns = RFC2822Body.patterns
initial_transitions = RFC2822Body.initial_transitions
def rfc2822(self, match, context, next_state):
"""RFC2822-style field list item."""
field, blank_finish = self.rfc2822_field(match)
self.parent += field
self.blank_finish = blank_finish
return [], 'RFC2822List', []
blank = SpecializedBody.invalid_input
class ExtensionOptions(FieldList):
"""
Parse field_list fields for extension options.
No nested parsing is done (including inline markup parsing).
"""
def parse_field_body(self, indented, offset, node):
"""Override `Body.parse_field_body` for simpler parsing."""
lines = []
for line in list(indented) + ['']:
if line.strip():
lines.append(line)
elif lines:
text = '\n'.join(lines)
node += nodes.paragraph(text, text)
lines = []
class LineBlock(SpecializedBody):
"""Second and subsequent lines of a line_block."""
blank = SpecializedBody.invalid_input
def line_block(self, match, context, next_state):
"""New line of line block."""
lineno = self.state_machine.abs_line_number()
line, messages, blank_finish = self.line_block_line(match, lineno)
self.parent += line
self.parent.parent += messages
self.blank_finish = blank_finish
return [], next_state, []
class Explicit(SpecializedBody):
"""Second and subsequent explicit markup construct."""
def explicit_markup(self, match, context, next_state):
"""Footnotes, hyperlink targets, directives, comments."""
nodelist, blank_finish = self.explicit_construct(match)
self.parent += nodelist
self.blank_finish = blank_finish
return [], next_state, []
def anonymous(self, match, context, next_state):
"""Anonymous hyperlink targets."""
nodelist, blank_finish = self.anonymous_target(match)
self.parent += nodelist
self.blank_finish = blank_finish
return [], next_state, []
blank = SpecializedBody.invalid_input
class SubstitutionDef(Body):
"""
Parser for the contents of a substitution_definition element.
"""
patterns = {
'embedded_directive': re.compile(r'(%s)::( +|$)'
% Inliner.simplename, re.UNICODE),
'text': r''}
initial_transitions = ['embedded_directive', 'text']
def embedded_directive(self, match, context, next_state):
nodelist, blank_finish = self.directive(match,
alt=self.parent['names'][0])
self.parent += nodelist
if not self.state_machine.at_eof():
self.blank_finish = blank_finish
raise EOFError
def text(self, match, context, next_state):
if not self.state_machine.at_eof():
self.blank_finish = self.state_machine.is_next_line_blank()
raise EOFError
class Text(RSTState):
"""
Classifier of second line of a text block.
Could be a paragraph, a definition list item, or a title.
"""
patterns = {'underline': Body.patterns['line'],
'text': r''}
initial_transitions = [('underline', 'Body'), ('text', 'Body')]
def blank(self, match, context, next_state):
"""End of paragraph."""
# NOTE: self.paragraph returns [ node, system_message(s) ], literalnext
paragraph, literalnext = self.paragraph(
context, self.state_machine.abs_line_number() - 1)
self.parent += paragraph
if literalnext:
self.parent += self.literal_block()
return [], 'Body', []
def eof(self, context):
if context:
self.blank(None, context, None)
return []
def indent(self, match, context, next_state):
"""Definition list item."""
definitionlist = nodes.definition_list()
definitionlistitem, blank_finish = self.definition_list_item(context)
definitionlist += definitionlistitem
self.parent += definitionlist
offset = self.state_machine.line_offset + 1 # next line
newline_offset, blank_finish = self.nested_list_parse(
self.state_machine.input_lines[offset:],
input_offset=self.state_machine.abs_line_offset() + 1,
node=definitionlist, initial_state='DefinitionList',
blank_finish=blank_finish, blank_finish_state='Definition')
self.goto_line(newline_offset)
if not blank_finish:
self.parent += self.unindent_warning('Definition list')
return [], 'Body', []
def underline(self, match, context, next_state):
"""Section title."""
lineno = self.state_machine.abs_line_number()
src, srcline = self.state_machine.get_source_and_line()
title = context[0].rstrip()
underline = match.string.rstrip()
source = title + '\n' + underline
messages = []
if column_width(title) > len(underline):
if len(underline) < 4:
if self.state_machine.match_titles:
msg = self.reporter.info(
'Possible title underline, too short for the title.\n'
"Treating it as ordinary text because it's so short.",
source=src, line=srcline)
self.parent += msg
raise statemachine.TransitionCorrection('text')
else:
blocktext = context[0] + '\n' + self.state_machine.line
msg = self.reporter.warning(
'Title underline too short.',
nodes.literal_block(blocktext, blocktext),
source=src, line=srcline)
messages.append(msg)
if not self.state_machine.match_titles:
blocktext = context[0] + '\n' + self.state_machine.line
msg = self.reporter.severe(
'Unexpected section title.',
nodes.literal_block(blocktext, blocktext),
source=src, line=srcline)
self.parent += messages
self.parent += msg
return [], next_state, []
style = underline[0]
context[:] = []
self.section(title, source, style, lineno - 1, messages)
return [], next_state, []
def text(self, match, context, next_state):
"""Paragraph."""
startline = self.state_machine.abs_line_number() - 1
msg = None
try:
block = self.state_machine.get_text_block(flush_left=1)
except statemachine.UnexpectedIndentationError, instance:
block, src, srcline = instance.args
msg = self.reporter.error('Unexpected indentation.',
source=src, line=srcline)
lines = context + list(block)
paragraph, literalnext = self.paragraph(lines, startline)
self.parent += paragraph
self.parent += msg
if literalnext:
try:
self.state_machine.next_line()
except EOFError:
pass
self.parent += self.literal_block()
return [], next_state, []
def literal_block(self):
"""Return a list of nodes."""
indented, indent, offset, blank_finish = \
self.state_machine.get_indented()
while indented and not indented[-1].strip():
indented.trim_end()
if not indented:
return self.quoted_literal_block()
data = '\n'.join(indented)
literal_block = nodes.literal_block(data, data)
literal_block.line = offset + 1
nodelist = [literal_block]
if not blank_finish:
nodelist.append(self.unindent_warning('Literal block'))
return nodelist
def quoted_literal_block(self):
abs_line_offset = self.state_machine.abs_line_offset()
offset = self.state_machine.line_offset
parent_node = nodes.Element()
new_abs_offset = self.nested_parse(
self.state_machine.input_lines[offset:],
input_offset=abs_line_offset, node=parent_node, match_titles=0,
state_machine_kwargs={'state_classes': (QuotedLiteralBlock,),
'initial_state': 'QuotedLiteralBlock'})
self.goto_line(new_abs_offset)
return parent_node.children
def definition_list_item(self, termline):
indented, indent, line_offset, blank_finish = \
self.state_machine.get_indented()
definitionlistitem = nodes.definition_list_item(
'\n'.join(termline + list(indented)))
lineno = self.state_machine.abs_line_number() - 1
src, srcline = self.state_machine.get_source_and_line()
definitionlistitem.source = src
definitionlistitem.line = srcline - 1
termlist, messages = self.term(termline, lineno)
definitionlistitem += termlist
definition = nodes.definition('', *messages)
definitionlistitem += definition
if termline[0][-2:] == '::':
definition += self.reporter.info(
'Blank line missing before literal block (after the "::")? '
'Interpreted as a definition list item.',
source=src, line=srcline)
self.nested_parse(indented, input_offset=line_offset, node=definition)
return definitionlistitem, blank_finish
classifier_delimiter = re.compile(' +: +')
def term(self, lines, lineno):
"""Return a definition_list's term and optional classifiers."""
assert len(lines) == 1
text_nodes, messages = self.inline_text(lines[0], lineno)
term_node = nodes.term()
node_list = [term_node]
for i in range(len(text_nodes)):
node = text_nodes[i]
if isinstance(node, nodes.Text):
parts = self.classifier_delimiter.split(node.rawsource)
if len(parts) == 1:
node_list[-1] += node
else:
node_list[-1] += nodes.Text(parts[0].rstrip())
for part in parts[1:]:
classifier_node = nodes.classifier('', part)
node_list.append(classifier_node)
else:
node_list[-1] += node
return node_list, messages
class SpecializedText(Text):
"""
Superclass for second and subsequent lines of Text-variants.
All transition methods are disabled. Override individual methods in
subclasses to re-enable.
"""
def eof(self, context):
"""Incomplete construct."""
return []
def invalid_input(self, match=None, context=None, next_state=None):
"""Not a compound element member. Abort this state machine."""
raise EOFError
blank = invalid_input
indent = invalid_input
underline = invalid_input
text = invalid_input
class Definition(SpecializedText):
"""Second line of potential definition_list_item."""
def eof(self, context):
"""Not a definition."""
self.state_machine.previous_line(2) # so parent SM can reassess
return []
def indent(self, match, context, next_state):
"""Definition list item."""
definitionlistitem, blank_finish = self.definition_list_item(context)
self.parent += definitionlistitem
self.blank_finish = blank_finish
return [], 'DefinitionList', []
class Line(SpecializedText):
"""
Second line of over- & underlined section title or transition marker.
"""
eofcheck = 1 # @@@ ???
"""Set to 0 while parsing sections, so that we don't catch the EOF."""
def eof(self, context):
"""Transition marker at end of section or document."""
marker = context[0].strip()
if self.memo.section_bubble_up_kludge:
self.memo.section_bubble_up_kludge = 0
elif len(marker) < 4:
self.state_correction(context)
if self.eofcheck: # ignore EOFError with sections
lineno = self.state_machine.abs_line_number() - 1
transition = nodes.transition(rawsource=context[0])
transition.line = lineno
self.parent += transition
self.eofcheck = 1
return []
def blank(self, match, context, next_state):
"""Transition marker."""
src, srcline = self.state_machine.get_source_and_line()
marker = context[0].strip()
if len(marker) < 4:
self.state_correction(context)
transition = nodes.transition(rawsource=marker)
transition.source = src
transition.line = srcline - 1
self.parent += transition
return [], 'Body', []
def text(self, match, context, next_state):
"""Potential over- & underlined title."""
lineno = self.state_machine.abs_line_number() - 1
src, srcline = self.state_machine.get_source_and_line()
overline = context[0]
title = match.string
underline = ''
try:
underline = self.state_machine.next_line()
except EOFError:
blocktext = overline + '\n' + title
if len(overline.rstrip()) < 4:
self.short_overline(context, blocktext, lineno, 2)
else:
msg = self.reporter.severe(
'Incomplete section title.',
nodes.literal_block(blocktext, blocktext),
source=src, line=srcline-1)
self.parent += msg
return [], 'Body', []
source = '%s\n%s\n%s' % (overline, title, underline)
overline = overline.rstrip()
underline = underline.rstrip()
if not self.transitions['underline'][0].match(underline):
blocktext = overline + '\n' + title + '\n' + underline
if len(overline.rstrip()) < 4:
self.short_overline(context, blocktext, lineno, 2)
else:
msg = self.reporter.severe(
'Missing matching underline for section title overline.',
nodes.literal_block(source, source),
source=src, line=srcline-1)
self.parent += msg
return [], 'Body', []
elif overline != underline:
blocktext = overline + '\n' + title + '\n' + underline
if len(overline.rstrip()) < 4:
self.short_overline(context, blocktext, lineno, 2)
else:
msg = self.reporter.severe(
'Title overline & underline mismatch.',
nodes.literal_block(source, source),
source=src, line=srcline-1)
self.parent += msg
return [], 'Body', []
title = title.rstrip()
messages = []
if column_width(title) > len(overline):
blocktext = overline + '\n' + title + '\n' + underline
if len(overline.rstrip()) < 4:
self.short_overline(context, blocktext, lineno, 2)
else:
msg = self.reporter.warning(
'Title overline too short.',
nodes.literal_block(source, source),
source=src, line=srcline-1)
messages.append(msg)
style = (overline[0], underline[0])
self.eofcheck = 0 # @@@ not sure this is correct
self.section(title.lstrip(), source, style, lineno + 1, messages)
self.eofcheck = 1
return [], 'Body', []
indent = text # indented title
def underline(self, match, context, next_state):
overline = context[0]
blocktext = overline + '\n' + self.state_machine.line
lineno = self.state_machine.abs_line_number() - 1
src, srcline = self.state_machine.get_source_and_line()
if len(overline.rstrip()) < 4:
self.short_overline(context, blocktext, lineno, 1)
msg = self.reporter.error(
'Invalid section title or transition marker.',
nodes.literal_block(blocktext, blocktext),
source=src, line=srcline-1)
self.parent += msg
return [], 'Body', []
def short_overline(self, context, blocktext, lineno, lines=1):
src, srcline = self.state_machine.get_source_and_line(lineno)
msg = self.reporter.info(
'Possible incomplete section title.\nTreating the overline as '
"ordinary text because it's so short.",
source=src, line=srcline)
self.parent += msg
self.state_correction(context, lines)
def state_correction(self, context, lines=1):
self.state_machine.previous_line(lines)
context[:] = []
raise statemachine.StateCorrection('Body', 'text')
class QuotedLiteralBlock(RSTState):
"""
Nested parse handler for quoted (unindented) literal blocks.
Special-purpose. Not for inclusion in `state_classes`.
"""
patterns = {'initial_quoted': r'(%(nonalphanum7bit)s)' % Body.pats,
'text': r''}
initial_transitions = ('initial_quoted', 'text')
def __init__(self, state_machine, debug=0):
RSTState.__init__(self, state_machine, debug)
self.messages = []
self.initial_lineno = None
def blank(self, match, context, next_state):
if context:
raise EOFError
else:
return context, next_state, []
def eof(self, context):
if context:
src, srcline = self.state_machine.get_source_and_line(
self.initial_lineno)
text = '\n'.join(context)
literal_block = nodes.literal_block(text, text)
literal_block.source = src
literal_block.line = srcline
self.parent += literal_block
else:
self.parent += self.reporter.warning(
'Literal block expected; none found.',
line=self.state_machine.abs_line_number())
# src not available, because statemachine.input_lines is empty
self.state_machine.previous_line()
self.parent += self.messages
return []
def indent(self, match, context, next_state):
assert context, ('QuotedLiteralBlock.indent: context should not '
'be empty!')
self.messages.append(
self.reporter.error('Unexpected indentation.',
line=self.state_machine.abs_line_number()))
self.state_machine.previous_line()
raise EOFError
def initial_quoted(self, match, context, next_state):
"""Match arbitrary quote character on the first line only."""
self.remove_transition('initial_quoted')
quote = match.string[0]
pattern = re.compile(re.escape(quote))
# New transition matches consistent quotes only:
self.add_transition('quoted',
(pattern, self.quoted, self.__class__.__name__))
self.initial_lineno = self.state_machine.abs_line_number()
return [match.string], next_state, []
def quoted(self, match, context, next_state):
"""Match consistent quotes on subsequent lines."""
context.append(match.string)
return context, next_state, []
def text(self, match, context, next_state):
if context:
src, srcline = self.state_machine.get_source_and_line()
self.messages.append(
self.reporter.error('Inconsistent literal block quoting.',
source=src, line=srcline))
self.state_machine.previous_line()
raise EOFError
state_classes = (Body, BulletList, DefinitionList, EnumeratedList, FieldList,
OptionList, LineBlock, ExtensionOptions, Explicit, Text,
Definition, Line, SubstitutionDef, RFC2822Body, RFC2822List)
"""Standard set of State classes used to start `RSTStateMachine`."""
| {
"repo_name": "paaschpa/badcomputering",
"path": "docutils/parsers/rst/states.py",
"copies": "6",
"size": "129221",
"license": "bsd-3-clause",
"hash": -8601686546023138000,
"line_mean": 41.2705266601,
"line_max": 81,
"alpha_frac": 0.5474032858,
"autogenerated": false,
"ratio": 4.36749247980532,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0003976331510794228,
"num_lines": 3057
} |
"""
This is the ``docutils.parsers.rst.states`` module, the core of
the reStructuredText parser. It defines the following:
:Classes:
- `RSTStateMachine`: reStructuredText parser's entry point.
- `NestedStateMachine`: recursive StateMachine.
- `RSTState`: reStructuredText State superclass.
- `Inliner`: For parsing inline markup.
- `Body`: Generic classifier of the first line of a block.
- `SpecializedBody`: Superclass for compound element members.
- `BulletList`: Second and subsequent bullet_list list_items
- `DefinitionList`: Second+ definition_list_items.
- `EnumeratedList`: Second+ enumerated_list list_items.
- `FieldList`: Second+ fields.
- `OptionList`: Second+ option_list_items.
- `RFC2822List`: Second+ RFC2822-style fields.
- `ExtensionOptions`: Parses directive option fields.
- `Explicit`: Second+ explicit markup constructs.
- `SubstitutionDef`: For embedded directives in substitution definitions.
- `Text`: Classifier of second line of a text block.
- `SpecializedText`: Superclass for continuation lines of Text-variants.
- `Definition`: Second line of potential definition_list_item.
- `Line`: Second line of overlined section title or transition marker.
- `Struct`: An auxiliary collection class.
:Exception classes:
- `MarkupError`
- `ParserError`
- `MarkupMismatch`
:Functions:
- `escape2null()`: Return a string, escape-backslashes converted to nulls.
- `unescape()`: Return a string, nulls removed or restored to backslashes.
:Attributes:
- `state_classes`: set of State classes used with `RSTStateMachine`.
Parser Overview
===============
The reStructuredText parser is implemented as a recursive state machine,
examining its input one line at a time. To understand how the parser works,
please first become familiar with the `docutils.statemachine` module. In the
description below, references are made to classes defined in this module;
please see the individual classes for details.
Parsing proceeds as follows:
1. The state machine examines each line of input, checking each of the
transition patterns of the state `Body`, in order, looking for a match.
The implicit transitions (blank lines and indentation) are checked before
any others. The 'text' transition is a catch-all (matches anything).
2. The method associated with the matched transition pattern is called.
A. Some transition methods are self-contained, appending elements to the
document tree (`Body.doctest` parses a doctest block). The parser's
current line index is advanced to the end of the element, and parsing
continues with step 1.
B. Other transition methods trigger the creation of a nested state machine,
whose job is to parse a compound construct ('indent' does a block quote,
'bullet' does a bullet list, 'overline' does a section [first checking
for a valid section header], etc.).
- In the case of lists and explicit markup, a one-off state machine is
created and run to parse contents of the first item.
- A new state machine is created and its initial state is set to the
appropriate specialized state (`BulletList` in the case of the
'bullet' transition; see `SpecializedBody` for more detail). This
state machine is run to parse the compound element (or series of
explicit markup elements), and returns as soon as a non-member element
is encountered. For example, the `BulletList` state machine ends as
soon as it encounters an element which is not a list item of that
bullet list. The optional omission of inter-element blank lines is
enabled by this nested state machine.
- The current line index is advanced to the end of the elements parsed,
and parsing continues with step 1.
C. The result of the 'text' transition depends on the next line of text.
The current state is changed to `Text`, under which the second line is
examined. If the second line is:
- Indented: The element is a definition list item, and parsing proceeds
similarly to step 2.B, using the `DefinitionList` state.
- A line of uniform punctuation characters: The element is a section
header; again, parsing proceeds as in step 2.B, and `Body` is still
used.
- Anything else: The element is a paragraph, which is examined for
inline markup and appended to the parent element. Processing
continues with step 1.
"""
__docformat__ = 'reStructuredText'
import sys
import re
import roman
from types import FunctionType, MethodType
from docutils import nodes, statemachine, utils, urischemes
from docutils import ApplicationError, DataError
from docutils.statemachine import StateMachineWS, StateWS
from docutils.nodes import fully_normalize_name as normalize_name
from docutils.nodes import whitespace_normalize_name
from docutils.utils import escape2null, unescape, column_width
import docutils.parsers.rst
from docutils.parsers.rst import directives, languages, tableparser, roles
from docutils.parsers.rst.languages import en as _fallback_language_module
class MarkupError(DataError): pass
class UnknownInterpretedRoleError(DataError): pass
class InterpretedRoleNotImplementedError(DataError): pass
class ParserError(ApplicationError): pass
class MarkupMismatch(Exception): pass
class Struct:
"""Stores data attributes for dotted-attribute access."""
def __init__(self, **keywordargs):
self.__dict__.update(keywordargs)
class RSTStateMachine(StateMachineWS):
"""
reStructuredText's master StateMachine.
The entry point to reStructuredText parsing is the `run()` method.
"""
def run(self, input_lines, document, input_offset=0, match_titles=1,
inliner=None):
"""
Parse `input_lines` and modify the `document` node in place.
Extend `StateMachineWS.run()`: set up parse-global data and
run the StateMachine.
"""
self.language = languages.get_language(
document.settings.language_code)
self.match_titles = match_titles
if inliner is None:
inliner = Inliner()
inliner.init_customizations(document.settings)
self.memo = Struct(document=document,
reporter=document.reporter,
language=self.language,
title_styles=[],
section_level=0,
section_bubble_up_kludge=0,
inliner=inliner)
self.document = document
self.attach_observer(document.note_source)
self.reporter = self.memo.reporter
self.node = document
results = StateMachineWS.run(self, input_lines, input_offset,
input_source=document['source'])
assert results == [], 'RSTStateMachine.run() results should be empty!'
self.node = self.memo = None # remove unneeded references
class NestedStateMachine(StateMachineWS):
"""
StateMachine run from within other StateMachine runs, to parse nested
document structures.
"""
def run(self, input_lines, input_offset, memo, node, match_titles=1):
"""
Parse `input_lines` and populate a `docutils.nodes.document` instance.
Extend `StateMachineWS.run()`: set up document-wide data.
"""
self.match_titles = match_titles
self.memo = memo
self.document = memo.document
self.attach_observer(self.document.note_source)
self.reporter = memo.reporter
self.language = memo.language
self.node = node
results = StateMachineWS.run(self, input_lines, input_offset)
assert results == [], ('NestedStateMachine.run() results should be '
'empty!')
return results
class RSTState(StateWS):
"""
reStructuredText State superclass.
Contains methods used by all State subclasses.
"""
nested_sm = NestedStateMachine
nested_sm_cache = []
def __init__(self, state_machine, debug=0):
self.nested_sm_kwargs = {'state_classes': state_classes,
'initial_state': 'Body'}
StateWS.__init__(self, state_machine, debug)
def runtime_init(self):
StateWS.runtime_init(self)
memo = self.state_machine.memo
self.memo = memo
self.reporter = memo.reporter
self.inliner = memo.inliner
self.document = memo.document
self.parent = self.state_machine.node
# enable the reporter to determine source and source-line
if not hasattr(self.reporter, 'locator'):
self.reporter.locator = self.state_machine.get_source_and_line
# print "adding locator to reporter", self.state_machine.input_offset
def goto_line(self, abs_line_offset):
"""
Jump to input line `abs_line_offset`, ignoring jumps past the end.
"""
try:
self.state_machine.goto_line(abs_line_offset)
except EOFError:
pass
def no_match(self, context, transitions):
"""
Override `StateWS.no_match` to generate a system message.
This code should never be run.
"""
src, srcline = self.state_machine.get_source_and_line()
self.reporter.severe(
'Internal error: no transition pattern match. State: "%s"; '
'transitions: %s; context: %s; current line: %r.'
% (self.__class__.__name__, transitions, context,
self.state_machine.line),
source=src, line=srcline)
return context, None, []
def bof(self, context):
"""Called at beginning of file."""
return [], []
def nested_parse(self, block, input_offset, node, match_titles=0,
state_machine_class=None, state_machine_kwargs=None):
"""
Create a new StateMachine rooted at `node` and run it over the input
`block`.
"""
use_default = 0
if state_machine_class is None:
state_machine_class = self.nested_sm
use_default += 1
if state_machine_kwargs is None:
state_machine_kwargs = self.nested_sm_kwargs
use_default += 1
block_length = len(block)
state_machine = None
if use_default == 2:
try:
state_machine = self.nested_sm_cache.pop()
except IndexError:
pass
if not state_machine:
state_machine = state_machine_class(debug=self.debug,
**state_machine_kwargs)
state_machine.run(block, input_offset, memo=self.memo,
node=node, match_titles=match_titles)
if use_default == 2:
self.nested_sm_cache.append(state_machine)
else:
state_machine.unlink()
new_offset = state_machine.abs_line_offset()
# No `block.parent` implies disconnected -- lines aren't in sync:
if block.parent and (len(block) - block_length) != 0:
# Adjustment for block if modified in nested parse:
self.state_machine.next_line(len(block) - block_length)
return new_offset
def nested_list_parse(self, block, input_offset, node, initial_state,
blank_finish,
blank_finish_state=None,
extra_settings={},
match_titles=0,
state_machine_class=None,
state_machine_kwargs=None):
"""
Create a new StateMachine rooted at `node` and run it over the input
`block`. Also keep track of optional intermediate blank lines and the
required final one.
"""
if state_machine_class is None:
state_machine_class = self.nested_sm
if state_machine_kwargs is None:
state_machine_kwargs = self.nested_sm_kwargs.copy()
state_machine_kwargs['initial_state'] = initial_state
state_machine = state_machine_class(debug=self.debug,
**state_machine_kwargs)
if blank_finish_state is None:
blank_finish_state = initial_state
state_machine.states[blank_finish_state].blank_finish = blank_finish
for key, value in extra_settings.items():
setattr(state_machine.states[initial_state], key, value)
state_machine.run(block, input_offset, memo=self.memo,
node=node, match_titles=match_titles)
blank_finish = state_machine.states[blank_finish_state].blank_finish
state_machine.unlink()
return state_machine.abs_line_offset(), blank_finish
def section(self, title, source, style, lineno, messages):
"""Check for a valid subsection and create one if it checks out."""
if self.check_subsection(source, style, lineno):
self.new_subsection(title, lineno, messages)
def check_subsection(self, source, style, lineno):
"""
Check for a valid subsection header. Return 1 (true) or None (false).
When a new section is reached that isn't a subsection of the current
section, back up the line count (use ``previous_line(-x)``), then
``raise EOFError``. The current StateMachine will finish, then the
calling StateMachine can re-examine the title. This will work its way
back up the calling chain until the correct section level isreached.
@@@ Alternative: Evaluate the title, store the title info & level, and
back up the chain until that level is reached. Store in memo? Or
return in results?
:Exception: `EOFError` when a sibling or supersection encountered.
"""
memo = self.memo
title_styles = memo.title_styles
mylevel = memo.section_level
try: # check for existing title style
level = title_styles.index(style) + 1
except ValueError: # new title style
if len(title_styles) == memo.section_level: # new subsection
title_styles.append(style)
return 1
else: # not at lowest level
self.parent += self.title_inconsistent(source, lineno)
return None
if level <= mylevel: # sibling or supersection
memo.section_level = level # bubble up to parent section
if len(style) == 2:
memo.section_bubble_up_kludge = 1
# back up 2 lines for underline title, 3 for overline title
self.state_machine.previous_line(len(style) + 1)
raise EOFError # let parent section re-evaluate
if level == mylevel + 1: # immediate subsection
return 1
else: # invalid subsection
self.parent += self.title_inconsistent(source, lineno)
return None
def title_inconsistent(self, sourcetext, lineno):
src, srcline = self.state_machine.get_source_and_line(lineno)
error = self.reporter.severe(
'Title level inconsistent:', nodes.literal_block('', sourcetext),
source=src, line=srcline)
return error
def new_subsection(self, title, lineno, messages):
"""Append new subsection to document tree. On return, check level."""
memo = self.memo
mylevel = memo.section_level
memo.section_level += 1
section_node = nodes.section()
self.parent += section_node
textnodes, title_messages = self.inline_text(title, lineno)
titlenode = nodes.title(title, '', *textnodes)
name = normalize_name(titlenode.astext())
section_node['names'].append(name)
section_node += titlenode
section_node += messages
section_node += title_messages
self.document.note_implicit_target(section_node, section_node)
offset = self.state_machine.line_offset + 1
absoffset = self.state_machine.abs_line_offset() + 1
newabsoffset = self.nested_parse(
self.state_machine.input_lines[offset:], input_offset=absoffset,
node=section_node, match_titles=1)
self.goto_line(newabsoffset)
if memo.section_level <= mylevel: # can't handle next section?
raise EOFError # bubble up to supersection
# reset section_level; next pass will detect it properly
memo.section_level = mylevel
def paragraph(self, lines, lineno):
"""
Return a list (paragraph & messages) & a boolean: literal_block next?
"""
data = '\n'.join(lines).rstrip()
if re.search(r'(?<!\\)(\\\\)*::$', data):
if len(data) == 2:
return [], 1
elif data[-3] in ' \n':
text = data[:-3].rstrip()
else:
text = data[:-1]
literalnext = 1
else:
text = data
literalnext = 0
textnodes, messages = self.inline_text(text, lineno)
p = nodes.paragraph(data, '', *textnodes)
p.source, p.line = self.state_machine.get_source_and_line(lineno)
return [p] + messages, literalnext
def inline_text(self, text, lineno):
"""
Return 2 lists: nodes (text and inline elements), and system_messages.
"""
return self.inliner.parse(text, lineno, self.memo, self.parent)
def unindent_warning(self, node_name):
# the actual problem is one line below the current line
src, srcline = self.state_machine.get_source_and_line()
return self.reporter.warning('%s ends without a blank line; '
'unexpected unindent.' % node_name,
source=src, line=srcline+1)
def build_regexp(definition, compile=1):
"""
Build, compile and return a regular expression based on `definition`.
:Parameter: `definition`: a 4-tuple (group name, prefix, suffix, parts),
where "parts" is a list of regular expressions and/or regular
expression definitions to be joined into an or-group.
"""
name, prefix, suffix, parts = definition
part_strings = []
for part in parts:
if type(part) is tuple:
part_strings.append(build_regexp(part, None))
else:
part_strings.append(part)
or_group = '|'.join(part_strings)
regexp = '%(prefix)s(?P<%(name)s>%(or_group)s)%(suffix)s' % locals()
if compile:
return re.compile(regexp, re.UNICODE)
else:
return regexp
class Inliner:
"""
Parse inline markup; call the `parse()` method.
"""
def __init__(self):
self.implicit_dispatch = [(self.patterns.uri, self.standalone_uri),]
"""List of (pattern, bound method) tuples, used by
`self.implicit_inline`."""
def init_customizations(self, settings):
"""Setting-based customizations; run when parsing begins."""
if settings.pep_references:
self.implicit_dispatch.append((self.patterns.pep,
self.pep_reference))
if settings.rfc_references:
self.implicit_dispatch.append((self.patterns.rfc,
self.rfc_reference))
def parse(self, text, lineno, memo, parent):
# Needs to be refactored for nested inline markup.
# Add nested_parse() method?
"""
Return 2 lists: nodes (text and inline elements), and system_messages.
Using `self.patterns.initial`, a pattern which matches start-strings
(emphasis, strong, interpreted, phrase reference, literal,
substitution reference, and inline target) and complete constructs
(simple reference, footnote reference), search for a candidate. When
one is found, check for validity (e.g., not a quoted '*' character).
If valid, search for the corresponding end string if applicable, and
check it for validity. If not found or invalid, generate a warning
and ignore the start-string. Implicit inline markup (e.g. standalone
URIs) is found last.
"""
self.reporter = memo.reporter
self.document = memo.document
self.language = memo.language
self.parent = parent
pattern_search = self.patterns.initial.search
dispatch = self.dispatch
remaining = escape2null(text)
processed = []
unprocessed = []
messages = []
while remaining:
match = pattern_search(remaining)
if match:
groups = match.groupdict()
method = dispatch[groups['start'] or groups['backquote']
or groups['refend'] or groups['fnend']]
before, inlines, remaining, sysmessages = method(self, match,
lineno)
unprocessed.append(before)
messages += sysmessages
if inlines:
processed += self.implicit_inline(''.join(unprocessed),
lineno)
processed += inlines
unprocessed = []
else:
break
remaining = ''.join(unprocessed) + remaining
if remaining:
processed += self.implicit_inline(remaining, lineno)
return processed, messages
openers = u'\'"([{<\u2018\u201c\xab\u00a1\u00bf' # see quoted_start below
closers = u'\'")]}>\u2019\u201d\xbb!?'
unicode_delimiters = u'\u2010\u2011\u2012\u2013\u2014\u00a0'
start_string_prefix = (u'((?<=^)|(?<=[-/: \\n\u2019%s%s]))'
% (re.escape(unicode_delimiters),
re.escape(openers)))
end_string_suffix = (r'((?=$)|(?=[-/:.,; \n\x00%s%s]))'
% (re.escape(unicode_delimiters),
re.escape(closers)))
non_whitespace_before = r'(?<![ \n])'
non_whitespace_escape_before = r'(?<![ \n\x00])'
non_unescaped_whitespace_escape_before = r'(?<!(?<!\x00)[ \n\x00])'
non_whitespace_after = r'(?![ \n])'
# Alphanumerics with isolated internal [-._+:] chars (i.e. not 2 together):
simplename = r'(?:(?!_)\w)+(?:[-._+:](?:(?!_)\w)+)*'
# Valid URI characters (see RFC 2396 & RFC 2732);
# final \x00 allows backslash escapes in URIs:
uric = r"""[-_.!~*'()[\];/:@&=+$,%a-zA-Z0-9\x00]"""
# Delimiter indicating the end of a URI (not part of the URI):
uri_end_delim = r"""[>]"""
# Last URI character; same as uric but no punctuation:
urilast = r"""[_~*/=+a-zA-Z0-9]"""
# End of a URI (either 'urilast' or 'uric followed by a
# uri_end_delim'):
uri_end = r"""(?:%(urilast)s|%(uric)s(?=%(uri_end_delim)s))""" % locals()
emailc = r"""[-_!~*'{|}/#?^`&=+$%a-zA-Z0-9\x00]"""
email_pattern = r"""
%(emailc)s+(?:\.%(emailc)s+)* # name
(?<!\x00)@ # at
%(emailc)s+(?:\.%(emailc)s*)* # host
%(uri_end)s # final URI char
"""
parts = ('initial_inline', start_string_prefix, '',
[('start', '', non_whitespace_after, # simple start-strings
[r'\*\*', # strong
r'\*(?!\*)', # emphasis but not strong
r'``', # literal
r'_`', # inline internal target
r'\|(?!\|)'] # substitution reference
),
('whole', '', end_string_suffix, # whole constructs
[# reference name & end-string
r'(?P<refname>%s)(?P<refend>__?)' % simplename,
('footnotelabel', r'\[', r'(?P<fnend>\]_)',
[r'[0-9]+', # manually numbered
r'\#(%s)?' % simplename, # auto-numbered (w/ label?)
r'\*', # auto-symbol
r'(?P<citationlabel>%s)' % simplename] # citation reference
)
]
),
('backquote', # interpreted text or phrase reference
'(?P<role>(:%s:)?)' % simplename, # optional role
non_whitespace_after,
['`(?!`)'] # but not literal
)
]
)
patterns = Struct(
initial=build_regexp(parts),
emphasis=re.compile(non_whitespace_escape_before
+ r'(\*)' + end_string_suffix),
strong=re.compile(non_whitespace_escape_before
+ r'(\*\*)' + end_string_suffix),
interpreted_or_phrase_ref=re.compile(
r"""
%(non_unescaped_whitespace_escape_before)s
(
`
(?P<suffix>
(?P<role>:%(simplename)s:)?
(?P<refend>__?)?
)
)
%(end_string_suffix)s
""" % locals(), re.VERBOSE | re.UNICODE),
embedded_uri=re.compile(
r"""
(
(?:[ \n]+|^) # spaces or beginning of line/string
< # open bracket
%(non_whitespace_after)s
([^<>\x00]+) # anything but angle brackets & nulls
%(non_whitespace_before)s
> # close bracket w/o whitespace before
)
$ # end of string
""" % locals(), re.VERBOSE),
literal=re.compile(non_whitespace_before + '(``)'
+ end_string_suffix),
target=re.compile(non_whitespace_escape_before
+ r'(`)' + end_string_suffix),
substitution_ref=re.compile(non_whitespace_escape_before
+ r'(\|_{0,2})'
+ end_string_suffix),
email=re.compile(email_pattern % locals() + '$', re.VERBOSE),
uri=re.compile(
(r"""
%(start_string_prefix)s
(?P<whole>
(?P<absolute> # absolute URI
(?P<scheme> # scheme (http, ftp, mailto)
[a-zA-Z][a-zA-Z0-9.+-]*
)
:
(
( # either:
(//?)? # hierarchical URI
%(uric)s* # URI characters
%(uri_end)s # final URI char
)
( # optional query
\?%(uric)s*
%(uri_end)s
)?
( # optional fragment
\#%(uric)s*
%(uri_end)s
)?
)
)
| # *OR*
(?P<email> # email address
""" + email_pattern + r"""
)
)
%(end_string_suffix)s
""") % locals(), re.VERBOSE),
pep=re.compile(
r"""
%(start_string_prefix)s
(
(pep-(?P<pepnum1>\d+)(.txt)?) # reference to source file
|
(PEP\s+(?P<pepnum2>\d+)) # reference by name
)
%(end_string_suffix)s""" % locals(), re.VERBOSE),
rfc=re.compile(
r"""
%(start_string_prefix)s
(RFC(-|\s+)?(?P<rfcnum>\d+))
%(end_string_suffix)s""" % locals(), re.VERBOSE))
def quoted_start(self, match):
"""Return 1 if inline markup start-string is 'quoted', 0 if not."""
string = match.string
start = match.start()
end = match.end()
if start == 0: # start-string at beginning of text
return 0
prestart = string[start - 1]
try:
poststart = string[end]
if self.openers.index(prestart) \
== self.closers.index(poststart): # quoted
return 1
except IndexError: # start-string at end of text
return 1
except ValueError: # not quoted
pass
return 0
def inline_obj(self, match, lineno, end_pattern, nodeclass,
restore_backslashes=0):
string = match.string
matchstart = match.start('start')
matchend = match.end('start')
if self.quoted_start(match):
return (string[:matchend], [], string[matchend:], [], '')
endmatch = end_pattern.search(string[matchend:])
if endmatch and endmatch.start(1): # 1 or more chars
text = unescape(endmatch.string[:endmatch.start(1)],
restore_backslashes)
textend = matchend + endmatch.end(1)
rawsource = unescape(string[matchstart:textend], 1)
return (string[:matchstart], [nodeclass(rawsource, text)],
string[textend:], [], endmatch.group(1))
msg = self.reporter.warning(
'Inline %s start-string without end-string.'
% nodeclass.__name__, line=lineno)
text = unescape(string[matchstart:matchend], 1)
rawsource = unescape(string[matchstart:matchend], 1)
prb = self.problematic(text, rawsource, msg)
return string[:matchstart], [prb], string[matchend:], [msg], ''
def problematic(self, text, rawsource, message):
msgid = self.document.set_id(message, self.parent)
problematic = nodes.problematic(rawsource, text, refid=msgid)
prbid = self.document.set_id(problematic)
message.add_backref(prbid)
return problematic
def emphasis(self, match, lineno):
before, inlines, remaining, sysmessages, endstring = self.inline_obj(
match, lineno, self.patterns.emphasis, nodes.emphasis)
return before, inlines, remaining, sysmessages
def strong(self, match, lineno):
before, inlines, remaining, sysmessages, endstring = self.inline_obj(
match, lineno, self.patterns.strong, nodes.strong)
return before, inlines, remaining, sysmessages
def interpreted_or_phrase_ref(self, match, lineno):
end_pattern = self.patterns.interpreted_or_phrase_ref
string = match.string
matchstart = match.start('backquote')
matchend = match.end('backquote')
rolestart = match.start('role')
role = match.group('role')
position = ''
if role:
role = role[1:-1]
position = 'prefix'
elif self.quoted_start(match):
return (string[:matchend], [], string[matchend:], [])
endmatch = end_pattern.search(string[matchend:])
if endmatch and endmatch.start(1): # 1 or more chars
textend = matchend + endmatch.end()
if endmatch.group('role'):
if role:
msg = self.reporter.warning(
'Multiple roles in interpreted text (both '
'prefix and suffix present; only one allowed).',
line=lineno)
text = unescape(string[rolestart:textend], 1)
prb = self.problematic(text, text, msg)
return string[:rolestart], [prb], string[textend:], [msg]
role = endmatch.group('suffix')[1:-1]
position = 'suffix'
escaped = endmatch.string[:endmatch.start(1)]
rawsource = unescape(string[matchstart:textend], 1)
if rawsource[-1:] == '_':
if role:
msg = self.reporter.warning(
'Mismatch: both interpreted text role %s and '
'reference suffix.' % position, line=lineno)
text = unescape(string[rolestart:textend], 1)
prb = self.problematic(text, text, msg)
return string[:rolestart], [prb], string[textend:], [msg]
return self.phrase_ref(string[:matchstart], string[textend:],
rawsource, escaped, unescape(escaped))
else:
rawsource = unescape(string[rolestart:textend], 1)
nodelist, messages = self.interpreted(rawsource, escaped, role,
lineno)
return (string[:rolestart], nodelist,
string[textend:], messages)
msg = self.reporter.warning(
'Inline interpreted text or phrase reference start-string '
'without end-string.', line=lineno)
text = unescape(string[matchstart:matchend], 1)
prb = self.problematic(text, text, msg)
return string[:matchstart], [prb], string[matchend:], [msg]
def phrase_ref(self, before, after, rawsource, escaped, text):
match = self.patterns.embedded_uri.search(escaped)
if match:
text = unescape(escaped[:match.start(0)])
uri_text = match.group(2)
uri = ''.join(uri_text.split())
uri = self.adjust_uri(uri)
if uri:
target = nodes.target(match.group(1), refuri=uri)
else:
raise ApplicationError('problem with URI: %r' % uri_text)
if not text:
text = uri
else:
target = None
refname = normalize_name(text)
reference = nodes.reference(rawsource, text,
name=whitespace_normalize_name(text))
node_list = [reference]
if rawsource[-2:] == '__':
if target:
reference['refuri'] = uri
else:
reference['anonymous'] = 1
else:
if target:
reference['refuri'] = uri
target['names'].append(refname)
self.document.note_explicit_target(target, self.parent)
node_list.append(target)
else:
reference['refname'] = refname
self.document.note_refname(reference)
return before, node_list, after, []
def adjust_uri(self, uri):
match = self.patterns.email.match(uri)
if match:
return 'mailto:' + uri
else:
return uri
def interpreted(self, rawsource, text, role, lineno):
role_fn, messages = roles.role(role, self.language, lineno,
self.reporter)
if role_fn:
nodes, messages2 = role_fn(role, rawsource, text, lineno, self)
return nodes, messages + messages2
else:
msg = self.reporter.error(
'Unknown interpreted text role "%s".' % role,
line=lineno)
return ([self.problematic(rawsource, rawsource, msg)],
messages + [msg])
def literal(self, match, lineno):
before, inlines, remaining, sysmessages, endstring = self.inline_obj(
match, lineno, self.patterns.literal, nodes.literal,
restore_backslashes=1)
return before, inlines, remaining, sysmessages
def inline_internal_target(self, match, lineno):
before, inlines, remaining, sysmessages, endstring = self.inline_obj(
match, lineno, self.patterns.target, nodes.target)
if inlines and isinstance(inlines[0], nodes.target):
assert len(inlines) == 1
target = inlines[0]
name = normalize_name(target.astext())
target['names'].append(name)
self.document.note_explicit_target(target, self.parent)
return before, inlines, remaining, sysmessages
def substitution_reference(self, match, lineno):
before, inlines, remaining, sysmessages, endstring = self.inline_obj(
match, lineno, self.patterns.substitution_ref,
nodes.substitution_reference)
if len(inlines) == 1:
subref_node = inlines[0]
if isinstance(subref_node, nodes.substitution_reference):
subref_text = subref_node.astext()
self.document.note_substitution_ref(subref_node, subref_text)
if endstring[-1:] == '_':
reference_node = nodes.reference(
'|%s%s' % (subref_text, endstring), '')
if endstring[-2:] == '__':
reference_node['anonymous'] = 1
else:
reference_node['refname'] = normalize_name(subref_text)
self.document.note_refname(reference_node)
reference_node += subref_node
inlines = [reference_node]
return before, inlines, remaining, sysmessages
def footnote_reference(self, match, lineno):
"""
Handles `nodes.footnote_reference` and `nodes.citation_reference`
elements.
"""
label = match.group('footnotelabel')
refname = normalize_name(label)
string = match.string
before = string[:match.start('whole')]
remaining = string[match.end('whole'):]
if match.group('citationlabel'):
refnode = nodes.citation_reference('[%s]_' % label,
refname=refname)
refnode += nodes.Text(label)
self.document.note_citation_ref(refnode)
else:
refnode = nodes.footnote_reference('[%s]_' % label)
if refname[0] == '#':
refname = refname[1:]
refnode['auto'] = 1
self.document.note_autofootnote_ref(refnode)
elif refname == '*':
refname = ''
refnode['auto'] = '*'
self.document.note_symbol_footnote_ref(
refnode)
else:
refnode += nodes.Text(label)
if refname:
refnode['refname'] = refname
self.document.note_footnote_ref(refnode)
if utils.get_trim_footnote_ref_space(self.document.settings):
before = before.rstrip()
return (before, [refnode], remaining, [])
def reference(self, match, lineno, anonymous=None):
referencename = match.group('refname')
refname = normalize_name(referencename)
referencenode = nodes.reference(
referencename + match.group('refend'), referencename,
name=whitespace_normalize_name(referencename))
if anonymous:
referencenode['anonymous'] = 1
else:
referencenode['refname'] = refname
self.document.note_refname(referencenode)
string = match.string
matchstart = match.start('whole')
matchend = match.end('whole')
return (string[:matchstart], [referencenode], string[matchend:], [])
def anonymous_reference(self, match, lineno):
return self.reference(match, lineno, anonymous=1)
def standalone_uri(self, match, lineno):
if (not match.group('scheme')
or match.group('scheme').lower() in urischemes.schemes):
if match.group('email'):
addscheme = 'mailto:'
else:
addscheme = ''
text = match.group('whole')
unescaped = unescape(text, 0)
return [nodes.reference(unescape(text, 1), unescaped,
refuri=addscheme + unescaped)]
else: # not a valid scheme
raise MarkupMismatch
def pep_reference(self, match, lineno):
text = match.group(0)
if text.startswith('pep-'):
pepnum = int(match.group('pepnum1'))
elif text.startswith('PEP'):
pepnum = int(match.group('pepnum2'))
else:
raise MarkupMismatch
ref = (self.document.settings.pep_base_url
+ self.document.settings.pep_file_url_template % pepnum)
unescaped = unescape(text, 0)
return [nodes.reference(unescape(text, 1), unescaped, refuri=ref)]
rfc_url = 'rfc%d.html'
def rfc_reference(self, match, lineno):
text = match.group(0)
if text.startswith('RFC'):
rfcnum = int(match.group('rfcnum'))
ref = self.document.settings.rfc_base_url + self.rfc_url % rfcnum
else:
raise MarkupMismatch
unescaped = unescape(text, 0)
return [nodes.reference(unescape(text, 1), unescaped, refuri=ref)]
def implicit_inline(self, text, lineno):
"""
Check each of the patterns in `self.implicit_dispatch` for a match,
and dispatch to the stored method for the pattern. Recursively check
the text before and after the match. Return a list of `nodes.Text`
and inline element nodes.
"""
if not text:
return []
for pattern, method in self.implicit_dispatch:
match = pattern.search(text)
if match:
try:
# Must recurse on strings before *and* after the match;
# there may be multiple patterns.
return (self.implicit_inline(text[:match.start()], lineno)
+ method(match, lineno) +
self.implicit_inline(text[match.end():], lineno))
except MarkupMismatch:
pass
return [nodes.Text(unescape(text), rawsource=unescape(text, 1))]
dispatch = {'*': emphasis,
'**': strong,
'`': interpreted_or_phrase_ref,
'``': literal,
'_`': inline_internal_target,
']_': footnote_reference,
'|': substitution_reference,
'_': reference,
'__': anonymous_reference}
def _loweralpha_to_int(s, _zero=(ord('a')-1)):
return ord(s) - _zero
def _upperalpha_to_int(s, _zero=(ord('A')-1)):
return ord(s) - _zero
def _lowerroman_to_int(s):
return roman.fromRoman(s.upper())
class Body(RSTState):
"""
Generic classifier of the first line of a block.
"""
double_width_pad_char = tableparser.TableParser.double_width_pad_char
"""Padding character for East Asian double-width text."""
enum = Struct()
"""Enumerated list parsing information."""
enum.formatinfo = {
'parens': Struct(prefix='(', suffix=')', start=1, end=-1),
'rparen': Struct(prefix='', suffix=')', start=0, end=-1),
'period': Struct(prefix='', suffix='.', start=0, end=-1)}
enum.formats = enum.formatinfo.keys()
enum.sequences = ['arabic', 'loweralpha', 'upperalpha',
'lowerroman', 'upperroman'] # ORDERED!
enum.sequencepats = {'arabic': '[0-9]+',
'loweralpha': '[a-z]',
'upperalpha': '[A-Z]',
'lowerroman': '[ivxlcdm]+',
'upperroman': '[IVXLCDM]+',}
enum.converters = {'arabic': int,
'loweralpha': _loweralpha_to_int,
'upperalpha': _upperalpha_to_int,
'lowerroman': _lowerroman_to_int,
'upperroman': roman.fromRoman}
enum.sequenceregexps = {}
for sequence in enum.sequences:
enum.sequenceregexps[sequence] = re.compile(
enum.sequencepats[sequence] + '$')
grid_table_top_pat = re.compile(r'\+-[-+]+-\+ *$')
"""Matches the top (& bottom) of a full table)."""
simple_table_top_pat = re.compile('=+( +=+)+ *$')
"""Matches the top of a simple table."""
simple_table_border_pat = re.compile('=+[ =]*$')
"""Matches the bottom & header bottom of a simple table."""
pats = {}
"""Fragments of patterns used by transitions."""
pats['nonalphanum7bit'] = '[!-/:-@[-`{-~]'
pats['alpha'] = '[a-zA-Z]'
pats['alphanum'] = '[a-zA-Z0-9]'
pats['alphanumplus'] = '[a-zA-Z0-9_-]'
pats['enum'] = ('(%(arabic)s|%(loweralpha)s|%(upperalpha)s|%(lowerroman)s'
'|%(upperroman)s|#)' % enum.sequencepats)
pats['optname'] = '%(alphanum)s%(alphanumplus)s*' % pats
# @@@ Loosen up the pattern? Allow Unicode?
pats['optarg'] = '(%(alpha)s%(alphanumplus)s*|<[^<>]+>)' % pats
pats['shortopt'] = r'(-|\+)%(alphanum)s( ?%(optarg)s)?' % pats
pats['longopt'] = r'(--|/)%(optname)s([ =]%(optarg)s)?' % pats
pats['option'] = r'(%(shortopt)s|%(longopt)s)' % pats
for format in enum.formats:
pats[format] = '(?P<%s>%s%s%s)' % (
format, re.escape(enum.formatinfo[format].prefix),
pats['enum'], re.escape(enum.formatinfo[format].suffix))
patterns = {
'bullet': u'[-+*\u2022\u2023\u2043]( +|$)',
'enumerator': r'(%(parens)s|%(rparen)s|%(period)s)( +|$)' % pats,
'field_marker': r':(?![: ])([^:\\]|\\.)*(?<! ):( +|$)',
'option_marker': r'%(option)s(, %(option)s)*( +| ?$)' % pats,
'doctest': r'>>>( +|$)',
'line_block': r'\|( +|$)',
'grid_table_top': grid_table_top_pat,
'simple_table_top': simple_table_top_pat,
'explicit_markup': r'\.\.( +|$)',
'anonymous': r'__( +|$)',
'line': r'(%(nonalphanum7bit)s)\1* *$' % pats,
'text': r''}
initial_transitions = (
'bullet',
'enumerator',
'field_marker',
'option_marker',
'doctest',
'line_block',
'grid_table_top',
'simple_table_top',
'explicit_markup',
'anonymous',
'line',
'text')
def indent(self, match, context, next_state):
"""Block quote."""
indented, indent, line_offset, blank_finish = \
self.state_machine.get_indented()
elements = self.block_quote(indented, line_offset)
self.parent += elements
if not blank_finish:
self.parent += self.unindent_warning('Block quote')
return context, next_state, []
def block_quote(self, indented, line_offset):
elements = []
while indented:
(blockquote_lines,
attribution_lines,
attribution_offset,
indented,
new_line_offset) = self.split_attribution(indented, line_offset)
blockquote = nodes.block_quote()
self.nested_parse(blockquote_lines, line_offset, blockquote)
elements.append(blockquote)
if attribution_lines:
attribution, messages = self.parse_attribution(
attribution_lines, attribution_offset)
blockquote += attribution
elements += messages
line_offset = new_line_offset
while indented and not indented[0]:
indented = indented[1:]
line_offset += 1
return elements
# U+2014 is an em-dash:
attribution_pattern = re.compile(u'(---?(?!-)|\u2014) *(?=[^ \\n])')
def split_attribution(self, indented, line_offset):
"""
Check for a block quote attribution and split it off:
* First line after a blank line must begin with a dash ("--", "---",
em-dash; matches `self.attribution_pattern`).
* Every line after that must have consistent indentation.
* Attributions must be preceded by block quote content.
Return a tuple of: (block quote content lines, content offset,
attribution lines, attribution offset, remaining indented lines).
"""
blank = None
nonblank_seen = False
for i in range(len(indented)):
line = indented[i].rstrip()
if line:
if nonblank_seen and blank == i - 1: # last line blank
match = self.attribution_pattern.match(line)
if match:
attribution_end, indent = self.check_attribution(
indented, i)
if attribution_end:
a_lines = indented[i:attribution_end]
a_lines.trim_left(match.end(), end=1)
a_lines.trim_left(indent, start=1)
return (indented[:i], a_lines,
i, indented[attribution_end:],
line_offset + attribution_end)
nonblank_seen = True
else:
blank = i
else:
return (indented, None, None, None, None)
def check_attribution(self, indented, attribution_start):
"""
Check attribution shape.
Return the index past the end of the attribution, and the indent.
"""
indent = None
i = attribution_start + 1
for i in range(attribution_start + 1, len(indented)):
line = indented[i].rstrip()
if not line:
break
if indent is None:
indent = len(line) - len(line.lstrip())
elif len(line) - len(line.lstrip()) != indent:
return None, None # bad shape; not an attribution
else:
# return index of line after last attribution line:
i += 1
return i, (indent or 0)
def parse_attribution(self, indented, line_offset):
text = '\n'.join(indented).rstrip()
lineno = self.state_machine.abs_line_number() + line_offset
textnodes, messages = self.inline_text(text, lineno)
node = nodes.attribution(text, '', *textnodes)
node.line = lineno
# report with source and source-line results in
# ``IndexError: list index out of range``
# node.source, node.line = self.state_machine.get_source_and_line(lineno)
return node, messages
def bullet(self, match, context, next_state):
"""Bullet list item."""
bulletlist = nodes.bullet_list()
self.parent += bulletlist
bulletlist['bullet'] = match.string[0]
i, blank_finish = self.list_item(match.end())
bulletlist += i
offset = self.state_machine.line_offset + 1 # next line
new_line_offset, blank_finish = self.nested_list_parse(
self.state_machine.input_lines[offset:],
input_offset=self.state_machine.abs_line_offset() + 1,
node=bulletlist, initial_state='BulletList',
blank_finish=blank_finish)
self.goto_line(new_line_offset)
if not blank_finish:
self.parent += self.unindent_warning('Bullet list')
return [], next_state, []
def list_item(self, indent):
if self.state_machine.line[indent:]:
indented, line_offset, blank_finish = (
self.state_machine.get_known_indented(indent))
else:
indented, indent, line_offset, blank_finish = (
self.state_machine.get_first_known_indented(indent))
listitem = nodes.list_item('\n'.join(indented))
if indented:
self.nested_parse(indented, input_offset=line_offset,
node=listitem)
return listitem, blank_finish
def enumerator(self, match, context, next_state):
"""Enumerated List Item"""
format, sequence, text, ordinal = self.parse_enumerator(match)
if not self.is_enumerated_list_item(ordinal, sequence, format):
raise statemachine.TransitionCorrection('text')
enumlist = nodes.enumerated_list()
self.parent += enumlist
if sequence == '#':
enumlist['enumtype'] = 'arabic'
else:
enumlist['enumtype'] = sequence
enumlist['prefix'] = self.enum.formatinfo[format].prefix
enumlist['suffix'] = self.enum.formatinfo[format].suffix
if ordinal != 1:
enumlist['start'] = ordinal
src, srcline = self.state_machine.get_source_and_line()
msg = self.reporter.info(
'Enumerated list start value not ordinal-1: "%s" (ordinal %s)'
% (text, ordinal), source=src, line=srcline)
self.parent += msg
listitem, blank_finish = self.list_item(match.end())
enumlist += listitem
offset = self.state_machine.line_offset + 1 # next line
newline_offset, blank_finish = self.nested_list_parse(
self.state_machine.input_lines[offset:],
input_offset=self.state_machine.abs_line_offset() + 1,
node=enumlist, initial_state='EnumeratedList',
blank_finish=blank_finish,
extra_settings={'lastordinal': ordinal,
'format': format,
'auto': sequence == '#'})
self.goto_line(newline_offset)
if not blank_finish:
self.parent += self.unindent_warning('Enumerated list')
return [], next_state, []
def parse_enumerator(self, match, expected_sequence=None):
"""
Analyze an enumerator and return the results.
:Return:
- the enumerator format ('period', 'parens', or 'rparen'),
- the sequence used ('arabic', 'loweralpha', 'upperroman', etc.),
- the text of the enumerator, stripped of formatting, and
- the ordinal value of the enumerator ('a' -> 1, 'ii' -> 2, etc.;
``None`` is returned for invalid enumerator text).
The enumerator format has already been determined by the regular
expression match. If `expected_sequence` is given, that sequence is
tried first. If not, we check for Roman numeral 1. This way,
single-character Roman numerals (which are also alphabetical) can be
matched. If no sequence has been matched, all sequences are checked in
order.
"""
groupdict = match.groupdict()
sequence = ''
for format in self.enum.formats:
if groupdict[format]: # was this the format matched?
break # yes; keep `format`
else: # shouldn't happen
raise ParserError('enumerator format not matched')
text = groupdict[format][self.enum.formatinfo[format].start
:self.enum.formatinfo[format].end]
if text == '#':
sequence = '#'
elif expected_sequence:
try:
if self.enum.sequenceregexps[expected_sequence].match(text):
sequence = expected_sequence
except KeyError: # shouldn't happen
raise ParserError('unknown enumerator sequence: %s'
% sequence)
elif text == 'i':
sequence = 'lowerroman'
elif text == 'I':
sequence = 'upperroman'
if not sequence:
for sequence in self.enum.sequences:
if self.enum.sequenceregexps[sequence].match(text):
break
else: # shouldn't happen
raise ParserError('enumerator sequence not matched')
if sequence == '#':
ordinal = 1
else:
try:
ordinal = self.enum.converters[sequence](text)
except roman.InvalidRomanNumeralError:
ordinal = None
return format, sequence, text, ordinal
def is_enumerated_list_item(self, ordinal, sequence, format):
"""
Check validity based on the ordinal value and the second line.
Return true if the ordinal is valid and the second line is blank,
indented, or starts with the next enumerator or an auto-enumerator.
"""
if ordinal is None:
return None
try:
next_line = self.state_machine.next_line()
except EOFError: # end of input lines
self.state_machine.previous_line()
return 1
else:
self.state_machine.previous_line()
if not next_line[:1].strip(): # blank or indented
return 1
result = self.make_enumerator(ordinal + 1, sequence, format)
if result:
next_enumerator, auto_enumerator = result
try:
if ( next_line.startswith(next_enumerator) or
next_line.startswith(auto_enumerator) ):
return 1
except TypeError:
pass
return None
def make_enumerator(self, ordinal, sequence, format):
"""
Construct and return the next enumerated list item marker, and an
auto-enumerator ("#" instead of the regular enumerator).
Return ``None`` for invalid (out of range) ordinals.
""" #"
if sequence == '#':
enumerator = '#'
elif sequence == 'arabic':
enumerator = str(ordinal)
else:
if sequence.endswith('alpha'):
if ordinal > 26:
return None
enumerator = chr(ordinal + ord('a') - 1)
elif sequence.endswith('roman'):
try:
enumerator = roman.toRoman(ordinal)
except roman.RomanError:
return None
else: # shouldn't happen
raise ParserError('unknown enumerator sequence: "%s"'
% sequence)
if sequence.startswith('lower'):
enumerator = enumerator.lower()
elif sequence.startswith('upper'):
enumerator = enumerator.upper()
else: # shouldn't happen
raise ParserError('unknown enumerator sequence: "%s"'
% sequence)
formatinfo = self.enum.formatinfo[format]
next_enumerator = (formatinfo.prefix + enumerator + formatinfo.suffix
+ ' ')
auto_enumerator = formatinfo.prefix + '#' + formatinfo.suffix + ' '
return next_enumerator, auto_enumerator
def field_marker(self, match, context, next_state):
"""Field list item."""
field_list = nodes.field_list()
self.parent += field_list
field, blank_finish = self.field(match)
field_list += field
offset = self.state_machine.line_offset + 1 # next line
newline_offset, blank_finish = self.nested_list_parse(
self.state_machine.input_lines[offset:],
input_offset=self.state_machine.abs_line_offset() + 1,
node=field_list, initial_state='FieldList',
blank_finish=blank_finish)
self.goto_line(newline_offset)
if not blank_finish:
self.parent += self.unindent_warning('Field list')
return [], next_state, []
def field(self, match):
name = self.parse_field_marker(match)
src, srcline = self.state_machine.get_source_and_line()
lineno = self.state_machine.abs_line_number()
indented, indent, line_offset, blank_finish = \
self.state_machine.get_first_known_indented(match.end())
field_node = nodes.field()
field_node.source = src
field_node.line = srcline
name_nodes, name_messages = self.inline_text(name, lineno)
field_node += nodes.field_name(name, '', *name_nodes)
field_body = nodes.field_body('\n'.join(indented), *name_messages)
field_node += field_body
if indented:
self.parse_field_body(indented, line_offset, field_body)
return field_node, blank_finish
def parse_field_marker(self, match):
"""Extract & return field name from a field marker match."""
field = match.group()[1:] # strip off leading ':'
field = field[:field.rfind(':')] # strip off trailing ':' etc.
return field
def parse_field_body(self, indented, offset, node):
self.nested_parse(indented, input_offset=offset, node=node)
def option_marker(self, match, context, next_state):
"""Option list item."""
optionlist = nodes.option_list()
try:
listitem, blank_finish = self.option_list_item(match)
except MarkupError, error:
# This shouldn't happen; pattern won't match.
src, srcline = self.state_machine.get_source_and_line()
msg = self.reporter.error(u'Invalid option list marker: %s' %
error, source=src, line=srcline)
self.parent += msg
indented, indent, line_offset, blank_finish = \
self.state_machine.get_first_known_indented(match.end())
elements = self.block_quote(indented, line_offset)
self.parent += elements
if not blank_finish:
self.parent += self.unindent_warning('Option list')
return [], next_state, []
self.parent += optionlist
optionlist += listitem
offset = self.state_machine.line_offset + 1 # next line
newline_offset, blank_finish = self.nested_list_parse(
self.state_machine.input_lines[offset:],
input_offset=self.state_machine.abs_line_offset() + 1,
node=optionlist, initial_state='OptionList',
blank_finish=blank_finish)
self.goto_line(newline_offset)
if not blank_finish:
self.parent += self.unindent_warning('Option list')
return [], next_state, []
def option_list_item(self, match):
offset = self.state_machine.abs_line_offset()
options = self.parse_option_marker(match)
indented, indent, line_offset, blank_finish = \
self.state_machine.get_first_known_indented(match.end())
if not indented: # not an option list item
self.goto_line(offset)
raise statemachine.TransitionCorrection('text')
option_group = nodes.option_group('', *options)
description = nodes.description('\n'.join(indented))
option_list_item = nodes.option_list_item('', option_group,
description)
if indented:
self.nested_parse(indented, input_offset=line_offset,
node=description)
return option_list_item, blank_finish
def parse_option_marker(self, match):
"""
Return a list of `node.option` and `node.option_argument` objects,
parsed from an option marker match.
:Exception: `MarkupError` for invalid option markers.
"""
optlist = []
optionstrings = match.group().rstrip().split(', ')
for optionstring in optionstrings:
tokens = optionstring.split()
delimiter = ' '
firstopt = tokens[0].split('=')
if len(firstopt) > 1:
# "--opt=value" form
tokens[:1] = firstopt
delimiter = '='
elif (len(tokens[0]) > 2
and ((tokens[0].startswith('-')
and not tokens[0].startswith('--'))
or tokens[0].startswith('+'))):
# "-ovalue" form
tokens[:1] = [tokens[0][:2], tokens[0][2:]]
delimiter = ''
if len(tokens) > 1 and (tokens[1].startswith('<')
and tokens[-1].endswith('>')):
# "-o <value1 value2>" form; join all values into one token
tokens[1:] = [' '.join(tokens[1:])]
if 0 < len(tokens) <= 2:
option = nodes.option(optionstring)
option += nodes.option_string(tokens[0], tokens[0])
if len(tokens) > 1:
option += nodes.option_argument(tokens[1], tokens[1],
delimiter=delimiter)
optlist.append(option)
else:
raise MarkupError(
'wrong number of option tokens (=%s), should be 1 or 2: '
'"%s"' % (len(tokens), optionstring))
return optlist
def doctest(self, match, context, next_state):
data = '\n'.join(self.state_machine.get_text_block())
self.parent += nodes.doctest_block(data, data)
return [], next_state, []
def line_block(self, match, context, next_state):
"""First line of a line block."""
block = nodes.line_block()
self.parent += block
lineno = self.state_machine.abs_line_number()
line, messages, blank_finish = self.line_block_line(match, lineno)
block += line
self.parent += messages
if not blank_finish:
offset = self.state_machine.line_offset + 1 # next line
new_line_offset, blank_finish = self.nested_list_parse(
self.state_machine.input_lines[offset:],
input_offset=self.state_machine.abs_line_offset() + 1,
node=block, initial_state='LineBlock',
blank_finish=0)
self.goto_line(new_line_offset)
if not blank_finish:
src, srcline = self.state_machine.get_source_and_line()
self.parent += self.reporter.warning(
'Line block ends without a blank line.',
source=src, line=srcline+1)
if len(block):
if block[0].indent is None:
block[0].indent = 0
self.nest_line_block_lines(block)
return [], next_state, []
def line_block_line(self, match, lineno):
"""Return one line element of a line_block."""
indented, indent, line_offset, blank_finish = \
self.state_machine.get_first_known_indented(match.end(),
until_blank=1)
text = u'\n'.join(indented)
text_nodes, messages = self.inline_text(text, lineno)
line = nodes.line(text, '', *text_nodes)
if match.string.rstrip() != '|': # not empty
line.indent = len(match.group(1)) - 1
return line, messages, blank_finish
def nest_line_block_lines(self, block):
for index in range(1, len(block)):
if block[index].indent is None:
block[index].indent = block[index - 1].indent
self.nest_line_block_segment(block)
def nest_line_block_segment(self, block):
indents = [item.indent for item in block]
least = min(indents)
new_items = []
new_block = nodes.line_block()
for item in block:
if item.indent > least:
new_block.append(item)
else:
if len(new_block):
self.nest_line_block_segment(new_block)
new_items.append(new_block)
new_block = nodes.line_block()
new_items.append(item)
if len(new_block):
self.nest_line_block_segment(new_block)
new_items.append(new_block)
block[:] = new_items
def grid_table_top(self, match, context, next_state):
"""Top border of a full table."""
return self.table_top(match, context, next_state,
self.isolate_grid_table,
tableparser.GridTableParser)
def simple_table_top(self, match, context, next_state):
"""Top border of a simple table."""
return self.table_top(match, context, next_state,
self.isolate_simple_table,
tableparser.SimpleTableParser)
def table_top(self, match, context, next_state,
isolate_function, parser_class):
"""Top border of a generic table."""
nodelist, blank_finish = self.table(isolate_function, parser_class)
self.parent += nodelist
if not blank_finish:
src, srcline = self.state_machine.get_source_and_line()
msg = self.reporter.warning(
'Blank line required after table.',
source=src, line=srcline+1)
self.parent += msg
return [], next_state, []
def table(self, isolate_function, parser_class):
"""Parse a table."""
block, messages, blank_finish = isolate_function()
if block:
try:
parser = parser_class()
tabledata = parser.parse(block)
tableline = (self.state_machine.abs_line_number() - len(block)
+ 1)
table = self.build_table(tabledata, tableline)
nodelist = [table] + messages
except tableparser.TableMarkupError, detail:
nodelist = self.malformed_table(
block, ' '.join(detail.args)) + messages
else:
nodelist = messages
return nodelist, blank_finish
def isolate_grid_table(self):
messages = []
blank_finish = 1
try:
block = self.state_machine.get_text_block(flush_left=1)
except statemachine.UnexpectedIndentationError, instance:
block, src, srcline = instance.args
messages.append(self.reporter.error('Unexpected indentation.',
source=src, line=srcline))
blank_finish = 0
block.disconnect()
# for East Asian chars:
block.pad_double_width(self.double_width_pad_char)
width = len(block[0].strip())
for i in range(len(block)):
block[i] = block[i].strip()
if block[i][0] not in '+|': # check left edge
blank_finish = 0
self.state_machine.previous_line(len(block) - i)
del block[i:]
break
if not self.grid_table_top_pat.match(block[-1]): # find bottom
blank_finish = 0
# from second-last to third line of table:
for i in range(len(block) - 2, 1, -1):
if self.grid_table_top_pat.match(block[i]):
self.state_machine.previous_line(len(block) - i + 1)
del block[i+1:]
break
else:
messages.extend(self.malformed_table(block))
return [], messages, blank_finish
for i in range(len(block)): # check right edge
if len(block[i]) != width or block[i][-1] not in '+|':
messages.extend(self.malformed_table(block))
return [], messages, blank_finish
return block, messages, blank_finish
def isolate_simple_table(self):
start = self.state_machine.line_offset
lines = self.state_machine.input_lines
limit = len(lines) - 1
toplen = len(lines[start].strip())
pattern_match = self.simple_table_border_pat.match
found = 0
found_at = None
i = start + 1
while i <= limit:
line = lines[i]
match = pattern_match(line)
if match:
if len(line.strip()) != toplen:
self.state_machine.next_line(i - start)
messages = self.malformed_table(
lines[start:i+1], 'Bottom/header table border does '
'not match top border.')
return [], messages, i == limit or not lines[i+1].strip()
found += 1
found_at = i
if found == 2 or i == limit or not lines[i+1].strip():
end = i
break
i += 1
else: # reached end of input_lines
if found:
extra = ' or no blank line after table bottom'
self.state_machine.next_line(found_at - start)
block = lines[start:found_at+1]
else:
extra = ''
self.state_machine.next_line(i - start - 1)
block = lines[start:]
messages = self.malformed_table(
block, 'No bottom table border found%s.' % extra)
return [], messages, not extra
self.state_machine.next_line(end - start)
block = lines[start:end+1]
# for East Asian chars:
block.pad_double_width(self.double_width_pad_char)
return block, [], end == limit or not lines[end+1].strip()
def malformed_table(self, block, detail=''):
block.replace(self.double_width_pad_char, '')
data = '\n'.join(block)
message = 'Malformed table.'
startline = self.state_machine.abs_line_number() - len(block) + 1
src, srcline = self.state_machine.get_source_and_line(startline)
if detail:
message += '\n' + detail
error = self.reporter.error(message, nodes.literal_block(data, data),
source=src, line=srcline)
return [error]
def build_table(self, tabledata, tableline, stub_columns=0):
colwidths, headrows, bodyrows = tabledata
table = nodes.table()
tgroup = nodes.tgroup(cols=len(colwidths))
table += tgroup
for colwidth in colwidths:
colspec = nodes.colspec(colwidth=colwidth)
if stub_columns:
colspec.attributes['stub'] = 1
stub_columns -= 1
tgroup += colspec
if headrows:
thead = nodes.thead()
tgroup += thead
for row in headrows:
thead += self.build_table_row(row, tableline)
tbody = nodes.tbody()
tgroup += tbody
for row in bodyrows:
tbody += self.build_table_row(row, tableline)
return table
def build_table_row(self, rowdata, tableline):
row = nodes.row()
for cell in rowdata:
if cell is None:
continue
morerows, morecols, offset, cellblock = cell
attributes = {}
if morerows:
attributes['morerows'] = morerows
if morecols:
attributes['morecols'] = morecols
entry = nodes.entry(**attributes)
row += entry
if ''.join(cellblock):
self.nested_parse(cellblock, input_offset=tableline+offset,
node=entry)
return row
explicit = Struct()
"""Patterns and constants used for explicit markup recognition."""
explicit.patterns = Struct(
target=re.compile(r"""
(
_ # anonymous target
| # *OR*
(?!_) # no underscore at the beginning
(?P<quote>`?) # optional open quote
(?![ `]) # first char. not space or
# backquote
(?P<name> # reference name
.+?
)
%(non_whitespace_escape_before)s
(?P=quote) # close quote if open quote used
)
(?<!(?<!\x00):) # no unescaped colon at end
%(non_whitespace_escape_before)s
[ ]? # optional space
: # end of reference name
([ ]+|$) # followed by whitespace
""" % vars(Inliner), re.VERBOSE),
reference=re.compile(r"""
(
(?P<simple>%(simplename)s)_
| # *OR*
` # open backquote
(?![ ]) # not space
(?P<phrase>.+?) # hyperlink phrase
%(non_whitespace_escape_before)s
`_ # close backquote,
# reference mark
)
$ # end of string
""" % vars(Inliner), re.VERBOSE | re.UNICODE),
substitution=re.compile(r"""
(
(?![ ]) # first char. not space
(?P<name>.+?) # substitution text
%(non_whitespace_escape_before)s
\| # close delimiter
)
([ ]+|$) # followed by whitespace
""" % vars(Inliner), re.VERBOSE),)
def footnote(self, match):
src, srcline = self.state_machine.get_source_and_line()
indented, indent, offset, blank_finish = \
self.state_machine.get_first_known_indented(match.end())
label = match.group(1)
name = normalize_name(label)
footnote = nodes.footnote('\n'.join(indented))
footnote.source = src
footnote.line = srcline
if name[0] == '#': # auto-numbered
name = name[1:] # autonumber label
footnote['auto'] = 1
if name:
footnote['names'].append(name)
self.document.note_autofootnote(footnote)
elif name == '*': # auto-symbol
name = ''
footnote['auto'] = '*'
self.document.note_symbol_footnote(footnote)
else: # manually numbered
footnote += nodes.label('', label)
footnote['names'].append(name)
self.document.note_footnote(footnote)
if name:
self.document.note_explicit_target(footnote, footnote)
else:
self.document.set_id(footnote, footnote)
if indented:
self.nested_parse(indented, input_offset=offset, node=footnote)
return [footnote], blank_finish
def citation(self, match):
src, srcline = self.state_machine.get_source_and_line()
indented, indent, offset, blank_finish = \
self.state_machine.get_first_known_indented(match.end())
label = match.group(1)
name = normalize_name(label)
citation = nodes.citation('\n'.join(indented))
citation.source = src
citation.line = srcline
citation += nodes.label('', label)
citation['names'].append(name)
self.document.note_citation(citation)
self.document.note_explicit_target(citation, citation)
if indented:
self.nested_parse(indented, input_offset=offset, node=citation)
return [citation], blank_finish
def hyperlink_target(self, match):
pattern = self.explicit.patterns.target
lineno = self.state_machine.abs_line_number()
src, srcline = self.state_machine.get_source_and_line()
block, indent, offset, blank_finish = \
self.state_machine.get_first_known_indented(
match.end(), until_blank=1, strip_indent=0)
blocktext = match.string[:match.end()] + '\n'.join(block)
block = [escape2null(line) for line in block]
escaped = block[0]
blockindex = 0
while 1:
targetmatch = pattern.match(escaped)
if targetmatch:
break
blockindex += 1
try:
escaped += block[blockindex]
except IndexError:
raise MarkupError('malformed hyperlink target.')
del block[:blockindex]
block[0] = (block[0] + ' ')[targetmatch.end()-len(escaped)-1:].strip()
target = self.make_target(block, blocktext, lineno,
targetmatch.group('name'))
return [target], blank_finish
def make_target(self, block, block_text, lineno, target_name):
target_type, data = self.parse_target(block, block_text, lineno)
if target_type == 'refname':
target = nodes.target(block_text, '', refname=normalize_name(data))
target.indirect_reference_name = data
self.add_target(target_name, '', target, lineno)
self.document.note_indirect_target(target)
return target
elif target_type == 'refuri':
target = nodes.target(block_text, '')
self.add_target(target_name, data, target, lineno)
return target
else:
return data
def parse_target(self, block, block_text, lineno):
"""
Determine the type of reference of a target.
:Return: A 2-tuple, one of:
- 'refname' and the indirect reference name
- 'refuri' and the URI
- 'malformed' and a system_message node
"""
if block and block[-1].strip()[-1:] == '_': # possible indirect target
reference = ' '.join([line.strip() for line in block])
refname = self.is_reference(reference)
if refname:
return 'refname', refname
reference = ''.join([''.join(line.split()) for line in block])
return 'refuri', unescape(reference)
def is_reference(self, reference):
match = self.explicit.patterns.reference.match(
whitespace_normalize_name(reference))
if not match:
return None
return unescape(match.group('simple') or match.group('phrase'))
def add_target(self, targetname, refuri, target, lineno):
target.line = lineno
if targetname:
name = normalize_name(unescape(targetname))
target['names'].append(name)
if refuri:
uri = self.inliner.adjust_uri(refuri)
if uri:
target['refuri'] = uri
else:
raise ApplicationError('problem with URI: %r' % refuri)
self.document.note_explicit_target(target, self.parent)
else: # anonymous target
if refuri:
target['refuri'] = refuri
target['anonymous'] = 1
self.document.note_anonymous_target(target)
def substitution_def(self, match):
pattern = self.explicit.patterns.substitution
src, srcline = self.state_machine.get_source_and_line()
block, indent, offset, blank_finish = \
self.state_machine.get_first_known_indented(match.end(),
strip_indent=0)
blocktext = (match.string[:match.end()] + '\n'.join(block))
block.disconnect()
escaped = escape2null(block[0].rstrip())
blockindex = 0
while 1:
subdefmatch = pattern.match(escaped)
if subdefmatch:
break
blockindex += 1
try:
escaped = escaped + ' ' + escape2null(block[blockindex].strip())
except IndexError:
raise MarkupError('malformed substitution definition.')
del block[:blockindex] # strip out the substitution marker
block[0] = (block[0].strip() + ' ')[subdefmatch.end()-len(escaped)-1:-1]
if not block[0]:
del block[0]
offset += 1
while block and not block[-1].strip():
block.pop()
subname = subdefmatch.group('name')
substitution_node = nodes.substitution_definition(blocktext)
substitution_node.source = src
substitution_node.line = srcline
if not block:
msg = self.reporter.warning(
'Substitution definition "%s" missing contents.' % subname,
nodes.literal_block(blocktext, blocktext),
source=src, line=srcline)
return [msg], blank_finish
block[0] = block[0].strip()
substitution_node['names'].append(
nodes.whitespace_normalize_name(subname))
new_abs_offset, blank_finish = self.nested_list_parse(
block, input_offset=offset, node=substitution_node,
initial_state='SubstitutionDef', blank_finish=blank_finish)
i = 0
for node in substitution_node[:]:
if not (isinstance(node, nodes.Inline) or
isinstance(node, nodes.Text)):
self.parent += substitution_node[i]
del substitution_node[i]
else:
i += 1
for node in substitution_node.traverse(nodes.Element):
if self.disallowed_inside_substitution_definitions(node):
pformat = nodes.literal_block('', node.pformat().rstrip())
msg = self.reporter.error(
'Substitution definition contains illegal element:',
pformat, nodes.literal_block(blocktext, blocktext),
source=src, line=srcline)
return [msg], blank_finish
if len(substitution_node) == 0:
msg = self.reporter.warning(
'Substitution definition "%s" empty or invalid.' % subname,
nodes.literal_block(blocktext, blocktext),
source=src, line=srcline)
return [msg], blank_finish
self.document.note_substitution_def(
substitution_node, subname, self.parent)
return [substitution_node], blank_finish
def disallowed_inside_substitution_definitions(self, node):
if (node['ids'] or
isinstance(node, nodes.reference) and node.get('anonymous') or
isinstance(node, nodes.footnote_reference) and node.get('auto')):
return 1
else:
return 0
def directive(self, match, **option_presets):
"""Returns a 2-tuple: list of nodes, and a "blank finish" boolean."""
type_name = match.group(1)
directive_class, messages = directives.directive(
type_name, self.memo.language, self.document)
self.parent += messages
if directive_class:
return self.run_directive(
directive_class, match, type_name, option_presets)
else:
return self.unknown_directive(type_name)
def run_directive(self, directive, match, type_name, option_presets):
"""
Parse a directive then run its directive function.
Parameters:
- `directive`: The class implementing the directive. Must be
a subclass of `rst.Directive`.
- `match`: A regular expression match object which matched the first
line of the directive.
- `type_name`: The directive name, as used in the source text.
- `option_presets`: A dictionary of preset options, defaults for the
directive options. Currently, only an "alt" option is passed by
substitution definitions (value: the substitution name), which may
be used by an embedded image directive.
Returns a 2-tuple: list of nodes, and a "blank finish" boolean.
"""
if isinstance(directive, (FunctionType, MethodType)):
from docutils.parsers.rst import convert_directive_function
directive = convert_directive_function(directive)
lineno = self.state_machine.abs_line_number()
src, srcline = self.state_machine.get_source_and_line()
initial_line_offset = self.state_machine.line_offset
indented, indent, line_offset, blank_finish \
= self.state_machine.get_first_known_indented(match.end(),
strip_top=0)
block_text = '\n'.join(self.state_machine.input_lines[
initial_line_offset : self.state_machine.line_offset + 1])
try:
arguments, options, content, content_offset = (
self.parse_directive_block(indented, line_offset,
directive, option_presets))
except MarkupError, detail:
error = self.reporter.error(
'Error in "%s" directive:\n%s.' % (type_name,
' '.join(detail.args)),
nodes.literal_block(block_text, block_text),
source=src, line=srcline)
return [error], blank_finish
directive_instance = directive(
type_name, arguments, options, content, lineno,
content_offset, block_text, self, self.state_machine)
try:
result = directive_instance.run()
except docutils.parsers.rst.DirectiveError, error:
msg_node = self.reporter.system_message(error.level, error.msg,
source=src, line=srcline)
msg_node += nodes.literal_block(block_text, block_text)
result = [msg_node]
assert isinstance(result, list), \
'Directive "%s" must return a list of nodes.' % type_name
for i in range(len(result)):
assert isinstance(result[i], nodes.Node), \
('Directive "%s" returned non-Node object (index %s): %r'
% (type_name, i, result[i]))
return (result,
blank_finish or self.state_machine.is_next_line_blank())
def parse_directive_block(self, indented, line_offset, directive,
option_presets):
option_spec = directive.option_spec
has_content = directive.has_content
if indented and not indented[0].strip():
indented.trim_start()
line_offset += 1
while indented and not indented[-1].strip():
indented.trim_end()
if indented and (directive.required_arguments
or directive.optional_arguments
or option_spec):
for i, line in enumerate(indented):
if not line.strip():
break
else:
i += 1
arg_block = indented[:i]
content = indented[i+1:]
content_offset = line_offset + i + 1
else:
content = indented
content_offset = line_offset
arg_block = []
if option_spec:
options, arg_block = self.parse_directive_options(
option_presets, option_spec, arg_block)
else:
options = {}
if arg_block and not (directive.required_arguments
or directive.optional_arguments):
content = arg_block + indented[i:]
content_offset = line_offset
arg_block = []
while content and not content[0].strip():
content.trim_start()
content_offset += 1
if directive.required_arguments or directive.optional_arguments:
arguments = self.parse_directive_arguments(
directive, arg_block)
else:
arguments = []
if content and not has_content:
raise MarkupError('no content permitted')
return (arguments, options, content, content_offset)
def parse_directive_options(self, option_presets, option_spec, arg_block):
options = option_presets.copy()
for i in range(len(arg_block)):
if arg_block[i][:1] == ':':
opt_block = arg_block[i:]
arg_block = arg_block[:i]
break
else:
opt_block = []
if opt_block:
success, data = self.parse_extension_options(option_spec,
opt_block)
if success: # data is a dict of options
options.update(data)
else: # data is an error string
raise MarkupError(data)
return options, arg_block
def parse_directive_arguments(self, directive, arg_block):
required = directive.required_arguments
optional = directive.optional_arguments
arg_text = '\n'.join(arg_block)
arguments = arg_text.split()
if len(arguments) < required:
raise MarkupError('%s argument(s) required, %s supplied'
% (required, len(arguments)))
elif len(arguments) > required + optional:
if directive.final_argument_whitespace:
arguments = arg_text.split(None, required + optional - 1)
else:
raise MarkupError(
'maximum %s argument(s) allowed, %s supplied'
% (required + optional, len(arguments)))
return arguments
def parse_extension_options(self, option_spec, datalines):
"""
Parse `datalines` for a field list containing extension options
matching `option_spec`.
:Parameters:
- `option_spec`: a mapping of option name to conversion
function, which should raise an exception on bad input.
- `datalines`: a list of input strings.
:Return:
- Success value, 1 or 0.
- An option dictionary on success, an error string on failure.
"""
node = nodes.field_list()
newline_offset, blank_finish = self.nested_list_parse(
datalines, 0, node, initial_state='ExtensionOptions',
blank_finish=1)
if newline_offset != len(datalines): # incomplete parse of block
return 0, 'invalid option block'
try:
options = utils.extract_extension_options(node, option_spec)
except KeyError, detail:
return 0, ('unknown option: "%s"' % detail.args[0])
except (ValueError, TypeError), detail:
return 0, ('invalid option value: %s' % ' '.join(detail.args))
except utils.ExtensionOptionError, detail:
return 0, ('invalid option data: %s' % ' '.join(detail.args))
if blank_finish:
return 1, options
else:
return 0, 'option data incompletely parsed'
def unknown_directive(self, type_name):
src, srcline = self.state_machine.get_source_and_line()
indented, indent, offset, blank_finish = \
self.state_machine.get_first_known_indented(0, strip_indent=0)
text = '\n'.join(indented)
error = self.reporter.error(
'Unknown directive type "%s".' % type_name,
nodes.literal_block(text, text), source=src, line=srcline)
return [error], blank_finish
def comment(self, match):
if not match.string[match.end():].strip() \
and self.state_machine.is_next_line_blank(): # an empty comment?
return [nodes.comment()], 1 # "A tiny but practical wart."
indented, indent, offset, blank_finish = \
self.state_machine.get_first_known_indented(match.end())
while indented and not indented[-1].strip():
indented.trim_end()
text = '\n'.join(indented)
return [nodes.comment(text, text)], blank_finish
explicit.constructs = [
(footnote,
re.compile(r"""
\.\.[ ]+ # explicit markup start
\[
( # footnote label:
[0-9]+ # manually numbered footnote
| # *OR*
\# # anonymous auto-numbered footnote
| # *OR*
\#%s # auto-number ed?) footnote label
| # *OR*
\* # auto-symbol footnote
)
\]
([ ]+|$) # whitespace or end of line
""" % Inliner.simplename, re.VERBOSE | re.UNICODE)),
(citation,
re.compile(r"""
\.\.[ ]+ # explicit markup start
\[(%s)\] # citation label
([ ]+|$) # whitespace or end of line
""" % Inliner.simplename, re.VERBOSE | re.UNICODE)),
(hyperlink_target,
re.compile(r"""
\.\.[ ]+ # explicit markup start
_ # target indicator
(?![ ]|$) # first char. not space or EOL
""", re.VERBOSE)),
(substitution_def,
re.compile(r"""
\.\.[ ]+ # explicit markup start
\| # substitution indicator
(?![ ]|$) # first char. not space or EOL
""", re.VERBOSE)),
(directive,
re.compile(r"""
\.\.[ ]+ # explicit markup start
(%s) # directive name
[ ]? # optional space
:: # directive delimiter
([ ]+|$) # whitespace or end of line
""" % Inliner.simplename, re.VERBOSE | re.UNICODE))]
def explicit_markup(self, match, context, next_state):
"""Footnotes, hyperlink targets, directives, comments."""
nodelist, blank_finish = self.explicit_construct(match)
self.parent += nodelist
self.explicit_list(blank_finish)
return [], next_state, []
def explicit_construct(self, match):
"""Determine which explicit construct this is, parse & return it."""
errors = []
for method, pattern in self.explicit.constructs:
expmatch = pattern.match(match.string)
if expmatch:
try:
return method(self, expmatch)
except MarkupError, error: # never reached?
message = ' '.join(error.args)
src, srcline = self.state_machine.get_source_and_line()
errors.append(self.reporter.warning(
message, source=src, line=srcline))
break
nodelist, blank_finish = self.comment(match)
return nodelist + errors, blank_finish
def explicit_list(self, blank_finish):
"""
Create a nested state machine for a series of explicit markup
constructs (including anonymous hyperlink targets).
"""
offset = self.state_machine.line_offset + 1 # next line
newline_offset, blank_finish = self.nested_list_parse(
self.state_machine.input_lines[offset:],
input_offset=self.state_machine.abs_line_offset() + 1,
node=self.parent, initial_state='Explicit',
blank_finish=blank_finish,
match_titles=self.state_machine.match_titles)
self.goto_line(newline_offset)
if not blank_finish:
self.parent += self.unindent_warning('Explicit markup')
def anonymous(self, match, context, next_state):
"""Anonymous hyperlink targets."""
nodelist, blank_finish = self.anonymous_target(match)
self.parent += nodelist
self.explicit_list(blank_finish)
return [], next_state, []
def anonymous_target(self, match):
lineno = self.state_machine.abs_line_number()
block, indent, offset, blank_finish \
= self.state_machine.get_first_known_indented(match.end(),
until_blank=1)
blocktext = match.string[:match.end()] + '\n'.join(block)
block = [escape2null(line) for line in block]
target = self.make_target(block, blocktext, lineno, '')
return [target], blank_finish
def line(self, match, context, next_state):
"""Section title overline or transition marker."""
if self.state_machine.match_titles:
return [match.string], 'Line', []
elif match.string.strip() == '::':
raise statemachine.TransitionCorrection('text')
elif len(match.string.strip()) < 4:
msg = self.reporter.info(
'Unexpected possible title overline or transition.\n'
"Treating it as ordinary text because it's so short.",
line=self.state_machine.abs_line_number())
self.parent += msg
raise statemachine.TransitionCorrection('text')
else:
blocktext = self.state_machine.line
msg = self.reporter.severe(
'Unexpected section title or transition.',
nodes.literal_block(blocktext, blocktext),
line=self.state_machine.abs_line_number())
self.parent += msg
return [], next_state, []
def text(self, match, context, next_state):
"""Titles, definition lists, paragraphs."""
return [match.string], 'Text', []
class RFC2822Body(Body):
"""
RFC2822 headers are only valid as the first constructs in documents. As
soon as anything else appears, the `Body` state should take over.
"""
patterns = Body.patterns.copy() # can't modify the original
patterns['rfc2822'] = r'[!-9;-~]+:( +|$)'
initial_transitions = [(name, 'Body')
for name in Body.initial_transitions]
initial_transitions.insert(-1, ('rfc2822', 'Body')) # just before 'text'
def rfc2822(self, match, context, next_state):
"""RFC2822-style field list item."""
fieldlist = nodes.field_list(classes=['rfc2822'])
self.parent += fieldlist
field, blank_finish = self.rfc2822_field(match)
fieldlist += field
offset = self.state_machine.line_offset + 1 # next line
newline_offset, blank_finish = self.nested_list_parse(
self.state_machine.input_lines[offset:],
input_offset=self.state_machine.abs_line_offset() + 1,
node=fieldlist, initial_state='RFC2822List',
blank_finish=blank_finish)
self.goto_line(newline_offset)
if not blank_finish:
self.parent += self.unindent_warning(
'RFC2822-style field list')
return [], next_state, []
def rfc2822_field(self, match):
name = match.string[:match.string.find(':')]
indented, indent, line_offset, blank_finish = \
self.state_machine.get_first_known_indented(match.end(),
until_blank=1)
fieldnode = nodes.field()
fieldnode += nodes.field_name(name, name)
fieldbody = nodes.field_body('\n'.join(indented))
fieldnode += fieldbody
if indented:
self.nested_parse(indented, input_offset=line_offset,
node=fieldbody)
return fieldnode, blank_finish
class SpecializedBody(Body):
"""
Superclass for second and subsequent compound element members. Compound
elements are lists and list-like constructs.
All transition methods are disabled (redefined as `invalid_input`).
Override individual methods in subclasses to re-enable.
For example, once an initial bullet list item, say, is recognized, the
`BulletList` subclass takes over, with a "bullet_list" node as its
container. Upon encountering the initial bullet list item, `Body.bullet`
calls its ``self.nested_list_parse`` (`RSTState.nested_list_parse`), which
starts up a nested parsing session with `BulletList` as the initial state.
Only the ``bullet`` transition method is enabled in `BulletList`; as long
as only bullet list items are encountered, they are parsed and inserted
into the container. The first construct which is *not* a bullet list item
triggers the `invalid_input` method, which ends the nested parse and
closes the container. `BulletList` needs to recognize input that is
invalid in the context of a bullet list, which means everything *other
than* bullet list items, so it inherits the transition list created in
`Body`.
"""
def invalid_input(self, match=None, context=None, next_state=None):
"""Not a compound element member. Abort this state machine."""
self.state_machine.previous_line() # back up so parent SM can reassess
raise EOFError
indent = invalid_input
bullet = invalid_input
enumerator = invalid_input
field_marker = invalid_input
option_marker = invalid_input
doctest = invalid_input
line_block = invalid_input
grid_table_top = invalid_input
simple_table_top = invalid_input
explicit_markup = invalid_input
anonymous = invalid_input
line = invalid_input
text = invalid_input
class BulletList(SpecializedBody):
"""Second and subsequent bullet_list list_items."""
def bullet(self, match, context, next_state):
"""Bullet list item."""
if match.string[0] != self.parent['bullet']:
# different bullet: new list
self.invalid_input()
listitem, blank_finish = self.list_item(match.end())
self.parent += listitem
self.blank_finish = blank_finish
return [], next_state, []
class DefinitionList(SpecializedBody):
"""Second and subsequent definition_list_items."""
def text(self, match, context, next_state):
"""Definition lists."""
return [match.string], 'Definition', []
class EnumeratedList(SpecializedBody):
"""Second and subsequent enumerated_list list_items."""
def enumerator(self, match, context, next_state):
"""Enumerated list item."""
format, sequence, text, ordinal = self.parse_enumerator(
match, self.parent['enumtype'])
if ( format != self.format
or (sequence != '#' and (sequence != self.parent['enumtype']
or self.auto
or ordinal != (self.lastordinal + 1)))
or not self.is_enumerated_list_item(ordinal, sequence, format)):
# different enumeration: new list
self.invalid_input()
if sequence == '#':
self.auto = 1
listitem, blank_finish = self.list_item(match.end())
self.parent += listitem
self.blank_finish = blank_finish
self.lastordinal = ordinal
return [], next_state, []
class FieldList(SpecializedBody):
"""Second and subsequent field_list fields."""
def field_marker(self, match, context, next_state):
"""Field list field."""
field, blank_finish = self.field(match)
self.parent += field
self.blank_finish = blank_finish
return [], next_state, []
class OptionList(SpecializedBody):
"""Second and subsequent option_list option_list_items."""
def option_marker(self, match, context, next_state):
"""Option list item."""
try:
option_list_item, blank_finish = self.option_list_item(match)
except MarkupError:
self.invalid_input()
self.parent += option_list_item
self.blank_finish = blank_finish
return [], next_state, []
class RFC2822List(SpecializedBody, RFC2822Body):
"""Second and subsequent RFC2822-style field_list fields."""
patterns = RFC2822Body.patterns
initial_transitions = RFC2822Body.initial_transitions
def rfc2822(self, match, context, next_state):
"""RFC2822-style field list item."""
field, blank_finish = self.rfc2822_field(match)
self.parent += field
self.blank_finish = blank_finish
return [], 'RFC2822List', []
blank = SpecializedBody.invalid_input
class ExtensionOptions(FieldList):
"""
Parse field_list fields for extension options.
No nested parsing is done (including inline markup parsing).
"""
def parse_field_body(self, indented, offset, node):
"""Override `Body.parse_field_body` for simpler parsing."""
lines = []
for line in list(indented) + ['']:
if line.strip():
lines.append(line)
elif lines:
text = '\n'.join(lines)
node += nodes.paragraph(text, text)
lines = []
class LineBlock(SpecializedBody):
"""Second and subsequent lines of a line_block."""
blank = SpecializedBody.invalid_input
def line_block(self, match, context, next_state):
"""New line of line block."""
lineno = self.state_machine.abs_line_number()
line, messages, blank_finish = self.line_block_line(match, lineno)
self.parent += line
self.parent.parent += messages
self.blank_finish = blank_finish
return [], next_state, []
class Explicit(SpecializedBody):
"""Second and subsequent explicit markup construct."""
def explicit_markup(self, match, context, next_state):
"""Footnotes, hyperlink targets, directives, comments."""
nodelist, blank_finish = self.explicit_construct(match)
self.parent += nodelist
self.blank_finish = blank_finish
return [], next_state, []
def anonymous(self, match, context, next_state):
"""Anonymous hyperlink targets."""
nodelist, blank_finish = self.anonymous_target(match)
self.parent += nodelist
self.blank_finish = blank_finish
return [], next_state, []
blank = SpecializedBody.invalid_input
class SubstitutionDef(Body):
"""
Parser for the contents of a substitution_definition element.
"""
patterns = {
'embedded_directive': re.compile(r'(%s)::( +|$)'
% Inliner.simplename, re.UNICODE),
'text': r''}
initial_transitions = ['embedded_directive', 'text']
def embedded_directive(self, match, context, next_state):
nodelist, blank_finish = self.directive(match,
alt=self.parent['names'][0])
self.parent += nodelist
if not self.state_machine.at_eof():
self.blank_finish = blank_finish
raise EOFError
def text(self, match, context, next_state):
if not self.state_machine.at_eof():
self.blank_finish = self.state_machine.is_next_line_blank()
raise EOFError
class Text(RSTState):
"""
Classifier of second line of a text block.
Could be a paragraph, a definition list item, or a title.
"""
patterns = {'underline': Body.patterns['line'],
'text': r''}
initial_transitions = [('underline', 'Body'), ('text', 'Body')]
def blank(self, match, context, next_state):
"""End of paragraph."""
# NOTE: self.paragraph returns [ node, system_message(s) ], literalnext
paragraph, literalnext = self.paragraph(
context, self.state_machine.abs_line_number() - 1)
self.parent += paragraph
if literalnext:
self.parent += self.literal_block()
return [], 'Body', []
def eof(self, context):
if context:
self.blank(None, context, None)
return []
def indent(self, match, context, next_state):
"""Definition list item."""
definitionlist = nodes.definition_list()
definitionlistitem, blank_finish = self.definition_list_item(context)
definitionlist += definitionlistitem
self.parent += definitionlist
offset = self.state_machine.line_offset + 1 # next line
newline_offset, blank_finish = self.nested_list_parse(
self.state_machine.input_lines[offset:],
input_offset=self.state_machine.abs_line_offset() + 1,
node=definitionlist, initial_state='DefinitionList',
blank_finish=blank_finish, blank_finish_state='Definition')
self.goto_line(newline_offset)
if not blank_finish:
self.parent += self.unindent_warning('Definition list')
return [], 'Body', []
def underline(self, match, context, next_state):
"""Section title."""
lineno = self.state_machine.abs_line_number()
src, srcline = self.state_machine.get_source_and_line()
title = context[0].rstrip()
underline = match.string.rstrip()
source = title + '\n' + underline
messages = []
if column_width(title) > len(underline):
if len(underline) < 4:
if self.state_machine.match_titles:
msg = self.reporter.info(
'Possible title underline, too short for the title.\n'
"Treating it as ordinary text because it's so short.",
source=src, line=srcline)
self.parent += msg
raise statemachine.TransitionCorrection('text')
else:
blocktext = context[0] + '\n' + self.state_machine.line
msg = self.reporter.warning(
'Title underline too short.',
nodes.literal_block(blocktext, blocktext),
source=src, line=srcline)
messages.append(msg)
if not self.state_machine.match_titles:
blocktext = context[0] + '\n' + self.state_machine.line
msg = self.reporter.severe(
'Unexpected section title.',
nodes.literal_block(blocktext, blocktext),
source=src, line=srcline)
self.parent += messages
self.parent += msg
return [], next_state, []
style = underline[0]
context[:] = []
self.section(title, source, style, lineno - 1, messages)
return [], next_state, []
def text(self, match, context, next_state):
"""Paragraph."""
startline = self.state_machine.abs_line_number() - 1
msg = None
try:
block = self.state_machine.get_text_block(flush_left=1)
except statemachine.UnexpectedIndentationError, instance:
block, src, srcline = instance.args
msg = self.reporter.error('Unexpected indentation.',
source=src, line=srcline)
lines = context + list(block)
paragraph, literalnext = self.paragraph(lines, startline)
self.parent += paragraph
self.parent += msg
if literalnext:
try:
self.state_machine.next_line()
except EOFError:
pass
self.parent += self.literal_block()
return [], next_state, []
def literal_block(self):
"""Return a list of nodes."""
indented, indent, offset, blank_finish = \
self.state_machine.get_indented()
while indented and not indented[-1].strip():
indented.trim_end()
if not indented:
return self.quoted_literal_block()
data = '\n'.join(indented)
literal_block = nodes.literal_block(data, data)
literal_block.line = offset + 1
nodelist = [literal_block]
if not blank_finish:
nodelist.append(self.unindent_warning('Literal block'))
return nodelist
def quoted_literal_block(self):
abs_line_offset = self.state_machine.abs_line_offset()
offset = self.state_machine.line_offset
parent_node = nodes.Element()
new_abs_offset = self.nested_parse(
self.state_machine.input_lines[offset:],
input_offset=abs_line_offset, node=parent_node, match_titles=0,
state_machine_kwargs={'state_classes': (QuotedLiteralBlock,),
'initial_state': 'QuotedLiteralBlock'})
self.goto_line(new_abs_offset)
return parent_node.children
def definition_list_item(self, termline):
indented, indent, line_offset, blank_finish = \
self.state_machine.get_indented()
definitionlistitem = nodes.definition_list_item(
'\n'.join(termline + list(indented)))
lineno = self.state_machine.abs_line_number() - 1
src, srcline = self.state_machine.get_source_and_line()
definitionlistitem.source = src
definitionlistitem.line = srcline - 1
termlist, messages = self.term(termline, lineno)
definitionlistitem += termlist
definition = nodes.definition('', *messages)
definitionlistitem += definition
if termline[0][-2:] == '::':
definition += self.reporter.info(
'Blank line missing before literal block (after the "::")? '
'Interpreted as a definition list item.',
source=src, line=srcline)
self.nested_parse(indented, input_offset=line_offset, node=definition)
return definitionlistitem, blank_finish
classifier_delimiter = re.compile(' +: +')
def term(self, lines, lineno):
"""Return a definition_list's term and optional classifiers."""
assert len(lines) == 1
text_nodes, messages = self.inline_text(lines[0], lineno)
term_node = nodes.term()
node_list = [term_node]
for i in range(len(text_nodes)):
node = text_nodes[i]
if isinstance(node, nodes.Text):
parts = self.classifier_delimiter.split(node.rawsource)
if len(parts) == 1:
node_list[-1] += node
else:
node_list[-1] += nodes.Text(parts[0].rstrip())
for part in parts[1:]:
classifier_node = nodes.classifier('', part)
node_list.append(classifier_node)
else:
node_list[-1] += node
return node_list, messages
class SpecializedText(Text):
"""
Superclass for second and subsequent lines of Text-variants.
All transition methods are disabled. Override individual methods in
subclasses to re-enable.
"""
def eof(self, context):
"""Incomplete construct."""
return []
def invalid_input(self, match=None, context=None, next_state=None):
"""Not a compound element member. Abort this state machine."""
raise EOFError
blank = invalid_input
indent = invalid_input
underline = invalid_input
text = invalid_input
class Definition(SpecializedText):
"""Second line of potential definition_list_item."""
def eof(self, context):
"""Not a definition."""
self.state_machine.previous_line(2) # so parent SM can reassess
return []
def indent(self, match, context, next_state):
"""Definition list item."""
definitionlistitem, blank_finish = self.definition_list_item(context)
self.parent += definitionlistitem
self.blank_finish = blank_finish
return [], 'DefinitionList', []
class Line(SpecializedText):
"""
Second line of over- & underlined section title or transition marker.
"""
eofcheck = 1 # @@@ ???
"""Set to 0 while parsing sections, so that we don't catch the EOF."""
def eof(self, context):
"""Transition marker at end of section or document."""
marker = context[0].strip()
if self.memo.section_bubble_up_kludge:
self.memo.section_bubble_up_kludge = 0
elif len(marker) < 4:
self.state_correction(context)
if self.eofcheck: # ignore EOFError with sections
lineno = self.state_machine.abs_line_number() - 1
transition = nodes.transition(rawsource=context[0])
transition.line = lineno
self.parent += transition
self.eofcheck = 1
return []
def blank(self, match, context, next_state):
"""Transition marker."""
src, srcline = self.state_machine.get_source_and_line()
marker = context[0].strip()
if len(marker) < 4:
self.state_correction(context)
transition = nodes.transition(rawsource=marker)
transition.source = src
transition.line = srcline - 1
self.parent += transition
return [], 'Body', []
def text(self, match, context, next_state):
"""Potential over- & underlined title."""
lineno = self.state_machine.abs_line_number() - 1
src, srcline = self.state_machine.get_source_and_line()
overline = context[0]
title = match.string
underline = ''
try:
underline = self.state_machine.next_line()
except EOFError:
blocktext = overline + '\n' + title
if len(overline.rstrip()) < 4:
self.short_overline(context, blocktext, lineno, 2)
else:
msg = self.reporter.severe(
'Incomplete section title.',
nodes.literal_block(blocktext, blocktext),
source=src, line=srcline-1)
self.parent += msg
return [], 'Body', []
source = '%s\n%s\n%s' % (overline, title, underline)
overline = overline.rstrip()
underline = underline.rstrip()
if not self.transitions['underline'][0].match(underline):
blocktext = overline + '\n' + title + '\n' + underline
if len(overline.rstrip()) < 4:
self.short_overline(context, blocktext, lineno, 2)
else:
msg = self.reporter.severe(
'Missing matching underline for section title overline.',
nodes.literal_block(source, source),
source=src, line=srcline-1)
self.parent += msg
return [], 'Body', []
elif overline != underline:
blocktext = overline + '\n' + title + '\n' + underline
if len(overline.rstrip()) < 4:
self.short_overline(context, blocktext, lineno, 2)
else:
msg = self.reporter.severe(
'Title overline & underline mismatch.',
nodes.literal_block(source, source),
source=src, line=srcline-1)
self.parent += msg
return [], 'Body', []
title = title.rstrip()
messages = []
if column_width(title) > len(overline):
blocktext = overline + '\n' + title + '\n' + underline
if len(overline.rstrip()) < 4:
self.short_overline(context, blocktext, lineno, 2)
else:
msg = self.reporter.warning(
'Title overline too short.',
nodes.literal_block(source, source),
source=src, line=srcline-1)
messages.append(msg)
style = (overline[0], underline[0])
self.eofcheck = 0 # @@@ not sure this is correct
self.section(title.lstrip(), source, style, lineno + 1, messages)
self.eofcheck = 1
return [], 'Body', []
indent = text # indented title
def underline(self, match, context, next_state):
overline = context[0]
blocktext = overline + '\n' + self.state_machine.line
lineno = self.state_machine.abs_line_number() - 1
src, srcline = self.state_machine.get_source_and_line()
if len(overline.rstrip()) < 4:
self.short_overline(context, blocktext, lineno, 1)
msg = self.reporter.error(
'Invalid section title or transition marker.',
nodes.literal_block(blocktext, blocktext),
source=src, line=srcline-1)
self.parent += msg
return [], 'Body', []
def short_overline(self, context, blocktext, lineno, lines=1):
src, srcline = self.state_machine.get_source_and_line(lineno)
msg = self.reporter.info(
'Possible incomplete section title.\nTreating the overline as '
"ordinary text because it's so short.",
source=src, line=srcline)
self.parent += msg
self.state_correction(context, lines)
def state_correction(self, context, lines=1):
self.state_machine.previous_line(lines)
context[:] = []
raise statemachine.StateCorrection('Body', 'text')
class QuotedLiteralBlock(RSTState):
"""
Nested parse handler for quoted (unindented) literal blocks.
Special-purpose. Not for inclusion in `state_classes`.
"""
patterns = {'initial_quoted': r'(%(nonalphanum7bit)s)' % Body.pats,
'text': r''}
initial_transitions = ('initial_quoted', 'text')
def __init__(self, state_machine, debug=0):
RSTState.__init__(self, state_machine, debug)
self.messages = []
self.initial_lineno = None
def blank(self, match, context, next_state):
if context:
raise EOFError
else:
return context, next_state, []
def eof(self, context):
if context:
src, srcline = self.state_machine.get_source_and_line(
self.initial_lineno)
text = '\n'.join(context)
literal_block = nodes.literal_block(text, text)
literal_block.source = src
literal_block.line = srcline
self.parent += literal_block
else:
self.parent += self.reporter.warning(
'Literal block expected; none found.',
line=self.state_machine.abs_line_number())
# src not available, because statemachine.input_lines is empty
self.state_machine.previous_line()
self.parent += self.messages
return []
def indent(self, match, context, next_state):
assert context, ('QuotedLiteralBlock.indent: context should not '
'be empty!')
self.messages.append(
self.reporter.error('Unexpected indentation.',
line=self.state_machine.abs_line_number()))
self.state_machine.previous_line()
raise EOFError
def initial_quoted(self, match, context, next_state):
"""Match arbitrary quote character on the first line only."""
self.remove_transition('initial_quoted')
quote = match.string[0]
pattern = re.compile(re.escape(quote))
# New transition matches consistent quotes only:
self.add_transition('quoted',
(pattern, self.quoted, self.__class__.__name__))
self.initial_lineno = self.state_machine.abs_line_number()
return [match.string], next_state, []
def quoted(self, match, context, next_state):
"""Match consistent quotes on subsequent lines."""
context.append(match.string)
return context, next_state, []
def text(self, match, context, next_state):
if context:
src, srcline = self.state_machine.get_source_and_line()
self.messages.append(
self.reporter.error('Inconsistent literal block quoting.',
source=src, line=srcline))
self.state_machine.previous_line()
raise EOFError
state_classes = (Body, BulletList, DefinitionList, EnumeratedList, FieldList,
OptionList, LineBlock, ExtensionOptions, Explicit, Text,
Definition, Line, SubstitutionDef, RFC2822Body, RFC2822List)
"""Standard set of State classes used to start `RSTStateMachine`."""
| {
"repo_name": "cuongthai/cuongthai-s-blog",
"path": "docutils/parsers/rst/states.py",
"copies": "2",
"size": "132278",
"license": "bsd-3-clause",
"hash": -5155641622815298000,
"line_mean": 41.2705266601,
"line_max": 81,
"alpha_frac": 0.5347525666,
"autogenerated": false,
"ratio": 4.439305970399705,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5974058536999705,
"avg_score": null,
"num_lines": null
} |
"""
This is the ``docutils.parsers.rst.states`` module, the core of
the reStructuredText parser. It defines the following:
:Classes:
- `RSTStateMachine`: reStructuredText parser's entry point.
- `NestedStateMachine`: recursive StateMachine.
- `RSTState`: reStructuredText State superclass.
- `Inliner`: For parsing inline markup.
- `Body`: Generic classifier of the first line of a block.
- `SpecializedBody`: Superclass for compound element members.
- `BulletList`: Second and subsequent bullet_list list_items
- `DefinitionList`: Second+ definition_list_items.
- `EnumeratedList`: Second+ enumerated_list list_items.
- `FieldList`: Second+ fields.
- `OptionList`: Second+ option_list_items.
- `RFC2822List`: Second+ RFC2822-style fields.
- `ExtensionOptions`: Parses directive option fields.
- `Explicit`: Second+ explicit markup constructs.
- `SubstitutionDef`: For embedded directives in substitution definitions.
- `Text`: Classifier of second line of a text block.
- `SpecializedText`: Superclass for continuation lines of Text-variants.
- `Definition`: Second line of potential definition_list_item.
- `Line`: Second line of overlined section title or transition marker.
- `Struct`: An auxiliary collection class.
:Exception classes:
- `MarkupError`
- `ParserError`
- `MarkupMismatch`
:Functions:
- `escape2null()`: Return a string, escape-backslashes converted to nulls.
- `unescape()`: Return a string, nulls removed or restored to backslashes.
:Attributes:
- `state_classes`: set of State classes used with `RSTStateMachine`.
Parser Overview
===============
The reStructuredText parser is implemented as a recursive state machine,
examining its input one line at a time. To understand how the parser works,
please first become familiar with the `docutils.statemachine` module. In the
description below, references are made to classes defined in this module;
please see the individual classes for details.
Parsing proceeds as follows:
1. The state machine examines each line of input, checking each of the
transition patterns of the state `Body`, in order, looking for a match.
The implicit transitions (blank lines and indentation) are checked before
any others. The 'text' transition is a catch-all (matches anything).
2. The method associated with the matched transition pattern is called.
A. Some transition methods are self-contained, appending elements to the
document tree (`Body.doctest` parses a doctest block). The parser's
current line index is advanced to the end of the element, and parsing
continues with step 1.
B. Other transition methods trigger the creation of a nested state machine,
whose job is to parse a compound construct ('indent' does a block quote,
'bullet' does a bullet list, 'overline' does a section [first checking
for a valid section header], etc.).
- In the case of lists and explicit markup, a one-off state machine is
created and run to parse contents of the first item.
- A new state machine is created and its initial state is set to the
appropriate specialized state (`BulletList` in the case of the
'bullet' transition; see `SpecializedBody` for more detail). This
state machine is run to parse the compound element (or series of
explicit markup elements), and returns as soon as a non-member element
is encountered. For example, the `BulletList` state machine ends as
soon as it encounters an element which is not a list item of that
bullet list. The optional omission of inter-element blank lines is
enabled by this nested state machine.
- The current line index is advanced to the end of the elements parsed,
and parsing continues with step 1.
C. The result of the 'text' transition depends on the next line of text.
The current state is changed to `Text`, under which the second line is
examined. If the second line is:
- Indented: The element is a definition list item, and parsing proceeds
similarly to step 2.B, using the `DefinitionList` state.
- A line of uniform punctuation characters: The element is a section
header; again, parsing proceeds as in step 2.B, and `Body` is still
used.
- Anything else: The element is a paragraph, which is examined for
inline markup and appended to the parent element. Processing
continues with step 1.
"""
__docformat__ = 'reStructuredText'
import sys
import re
try:
import roman
except ImportError:
import docutils.utils.roman as roman
from types import FunctionType, MethodType
from docutils import nodes, statemachine, utils, urischemes
from docutils import ApplicationError, DataError
from docutils.statemachine import StateMachineWS, StateWS
from docutils.nodes import fully_normalize_name as normalize_name
from docutils.nodes import whitespace_normalize_name
import docutils.parsers.rst
from docutils.parsers.rst import directives, languages, tableparser, roles
from docutils.parsers.rst.languages import en as _fallback_language_module
from docutils.utils import escape2null, unescape, column_width
from docutils.utils import punctuation_chars
class MarkupError(DataError): pass
class UnknownInterpretedRoleError(DataError): pass
class InterpretedRoleNotImplementedError(DataError): pass
class ParserError(ApplicationError): pass
class MarkupMismatch(Exception): pass
class Struct:
"""Stores data attributes for dotted-attribute access."""
def __init__(self, **keywordargs):
self.__dict__.update(keywordargs)
class RSTStateMachine(StateMachineWS):
"""
reStructuredText's master StateMachine.
The entry point to reStructuredText parsing is the `run()` method.
"""
def run(self, input_lines, document, input_offset=0, match_titles=True,
inliner=None):
"""
Parse `input_lines` and modify the `document` node in place.
Extend `StateMachineWS.run()`: set up parse-global data and
run the StateMachine.
"""
self.language = languages.get_language(
document.settings.language_code)
self.match_titles = match_titles
if inliner is None:
inliner = Inliner()
inliner.init_customizations(document.settings)
self.memo = Struct(document=document,
reporter=document.reporter,
language=self.language,
title_styles=[],
section_level=0,
section_bubble_up_kludge=False,
inliner=inliner)
self.document = document
self.attach_observer(document.note_source)
self.reporter = self.memo.reporter
self.node = document
results = StateMachineWS.run(self, input_lines, input_offset,
input_source=document['source'])
assert results == [], 'RSTStateMachine.run() results should be empty!'
self.node = self.memo = None # remove unneeded references
class NestedStateMachine(StateMachineWS):
"""
StateMachine run from within other StateMachine runs, to parse nested
document structures.
"""
def run(self, input_lines, input_offset, memo, node, match_titles=True):
"""
Parse `input_lines` and populate a `docutils.nodes.document` instance.
Extend `StateMachineWS.run()`: set up document-wide data.
"""
self.match_titles = match_titles
self.memo = memo
self.document = memo.document
self.attach_observer(self.document.note_source)
self.reporter = memo.reporter
self.language = memo.language
self.node = node
results = StateMachineWS.run(self, input_lines, input_offset)
assert results == [], ('NestedStateMachine.run() results should be '
'empty!')
return results
class RSTState(StateWS):
"""
reStructuredText State superclass.
Contains methods used by all State subclasses.
"""
nested_sm = NestedStateMachine
nested_sm_cache = []
def __init__(self, state_machine, debug=False):
self.nested_sm_kwargs = {'state_classes': state_classes,
'initial_state': 'Body'}
StateWS.__init__(self, state_machine, debug)
def runtime_init(self):
StateWS.runtime_init(self)
memo = self.state_machine.memo
self.memo = memo
self.reporter = memo.reporter
self.inliner = memo.inliner
self.document = memo.document
self.parent = self.state_machine.node
# enable the reporter to determine source and source-line
if not hasattr(self.reporter, 'get_source_and_line'):
self.reporter.get_source_and_line = self.state_machine.get_source_and_line
# print "adding get_source_and_line to reporter", self.state_machine.input_offset
def goto_line(self, abs_line_offset):
"""
Jump to input line `abs_line_offset`, ignoring jumps past the end.
"""
try:
self.state_machine.goto_line(abs_line_offset)
except EOFError:
pass
def no_match(self, context, transitions):
"""
Override `StateWS.no_match` to generate a system message.
This code should never be run.
"""
self.reporter.severe(
'Internal error: no transition pattern match. State: "%s"; '
'transitions: %s; context: %s; current line: %r.'
% (self.__class__.__name__, transitions, context,
self.state_machine.line))
return context, None, []
def bof(self, context):
"""Called at beginning of file."""
return [], []
def nested_parse(self, block, input_offset, node, match_titles=False,
state_machine_class=None, state_machine_kwargs=None):
"""
Create a new StateMachine rooted at `node` and run it over the input
`block`.
"""
use_default = 0
if state_machine_class is None:
state_machine_class = self.nested_sm
use_default += 1
if state_machine_kwargs is None:
state_machine_kwargs = self.nested_sm_kwargs
use_default += 1
block_length = len(block)
state_machine = None
if use_default == 2:
try:
state_machine = self.nested_sm_cache.pop()
except IndexError:
pass
if not state_machine:
state_machine = state_machine_class(debug=self.debug,
**state_machine_kwargs)
state_machine.run(block, input_offset, memo=self.memo,
node=node, match_titles=match_titles)
if use_default == 2:
self.nested_sm_cache.append(state_machine)
else:
state_machine.unlink()
new_offset = state_machine.abs_line_offset()
# No `block.parent` implies disconnected -- lines aren't in sync:
if block.parent and (len(block) - block_length) != 0:
# Adjustment for block if modified in nested parse:
self.state_machine.next_line(len(block) - block_length)
return new_offset
def nested_list_parse(self, block, input_offset, node, initial_state,
blank_finish,
blank_finish_state=None,
extra_settings={},
match_titles=False,
state_machine_class=None,
state_machine_kwargs=None):
"""
Create a new StateMachine rooted at `node` and run it over the input
`block`. Also keep track of optional intermediate blank lines and the
required final one.
"""
if state_machine_class is None:
state_machine_class = self.nested_sm
if state_machine_kwargs is None:
state_machine_kwargs = self.nested_sm_kwargs.copy()
state_machine_kwargs['initial_state'] = initial_state
state_machine = state_machine_class(debug=self.debug,
**state_machine_kwargs)
if blank_finish_state is None:
blank_finish_state = initial_state
state_machine.states[blank_finish_state].blank_finish = blank_finish
for key, value in extra_settings.items():
setattr(state_machine.states[initial_state], key, value)
state_machine.run(block, input_offset, memo=self.memo,
node=node, match_titles=match_titles)
blank_finish = state_machine.states[blank_finish_state].blank_finish
state_machine.unlink()
return state_machine.abs_line_offset(), blank_finish
def section(self, title, source, style, lineno, messages):
"""Check for a valid subsection and create one if it checks out."""
if self.check_subsection(source, style, lineno):
self.new_subsection(title, lineno, messages)
def check_subsection(self, source, style, lineno):
"""
Check for a valid subsection header. Return 1 (true) or None (false).
When a new section is reached that isn't a subsection of the current
section, back up the line count (use ``previous_line(-x)``), then
``raise EOFError``. The current StateMachine will finish, then the
calling StateMachine can re-examine the title. This will work its way
back up the calling chain until the correct section level isreached.
@@@ Alternative: Evaluate the title, store the title info & level, and
back up the chain until that level is reached. Store in memo? Or
return in results?
:Exception: `EOFError` when a sibling or supersection encountered.
"""
memo = self.memo
title_styles = memo.title_styles
mylevel = memo.section_level
try: # check for existing title style
level = title_styles.index(style) + 1
except ValueError: # new title style
if len(title_styles) == memo.section_level: # new subsection
title_styles.append(style)
return 1
else: # not at lowest level
self.parent += self.title_inconsistent(source, lineno)
return None
if level <= mylevel: # sibling or supersection
memo.section_level = level # bubble up to parent section
if len(style) == 2:
memo.section_bubble_up_kludge = True
# back up 2 lines for underline title, 3 for overline title
self.state_machine.previous_line(len(style) + 1)
raise EOFError # let parent section re-evaluate
if level == mylevel + 1: # immediate subsection
return 1
else: # invalid subsection
self.parent += self.title_inconsistent(source, lineno)
return None
def title_inconsistent(self, sourcetext, lineno):
error = self.reporter.severe(
'Title level inconsistent:', nodes.literal_block('', sourcetext),
line=lineno)
return error
def new_subsection(self, title, lineno, messages):
"""Append new subsection to document tree. On return, check level."""
memo = self.memo
mylevel = memo.section_level
memo.section_level += 1
section_node = nodes.section()
self.parent += section_node
textnodes, title_messages = self.inline_text(title, lineno)
titlenode = nodes.title(title, '', *textnodes)
name = normalize_name(titlenode.astext())
section_node['names'].append(name)
section_node += titlenode
section_node += messages
section_node += title_messages
self.document.note_implicit_target(section_node, section_node)
offset = self.state_machine.line_offset + 1
absoffset = self.state_machine.abs_line_offset() + 1
newabsoffset = self.nested_parse(
self.state_machine.input_lines[offset:], input_offset=absoffset,
node=section_node, match_titles=True)
self.goto_line(newabsoffset)
if memo.section_level <= mylevel: # can't handle next section?
raise EOFError # bubble up to supersection
# reset section_level; next pass will detect it properly
memo.section_level = mylevel
def paragraph(self, lines, lineno):
"""
Return a list (paragraph & messages) & a boolean: literal_block next?
"""
data = '\n'.join(lines).rstrip()
if re.search(r'(?<!\\)(\\\\)*::$', data):
if len(data) == 2:
return [], 1
elif data[-3] in ' \n':
text = data[:-3].rstrip()
else:
text = data[:-1]
literalnext = 1
else:
text = data
literalnext = 0
textnodes, messages = self.inline_text(text, lineno)
p = nodes.paragraph(data, '', *textnodes)
p.source, p.line = self.state_machine.get_source_and_line(lineno)
return [p] + messages, literalnext
def inline_text(self, text, lineno):
"""
Return 2 lists: nodes (text and inline elements), and system_messages.
"""
return self.inliner.parse(text, lineno, self.memo, self.parent)
def unindent_warning(self, node_name):
# the actual problem is one line below the current line
lineno = self.state_machine.abs_line_number()+1
return self.reporter.warning('%s ends without a blank line; '
'unexpected unindent.' % node_name,
line=lineno)
def build_regexp(definition, compile=True):
"""
Build, compile and return a regular expression based on `definition`.
:Parameter: `definition`: a 4-tuple (group name, prefix, suffix, parts),
where "parts" is a list of regular expressions and/or regular
expression definitions to be joined into an or-group.
"""
name, prefix, suffix, parts = definition
part_strings = []
for part in parts:
if type(part) is tuple:
part_strings.append(build_regexp(part, None))
else:
part_strings.append(part)
or_group = '|'.join(part_strings)
regexp = '%(prefix)s(?P<%(name)s>%(or_group)s)%(suffix)s' % locals()
if compile:
return re.compile(regexp, re.UNICODE)
else:
return regexp
class Inliner:
"""
Parse inline markup; call the `parse()` method.
"""
def __init__(self):
self.implicit_dispatch = [(self.patterns.uri, self.standalone_uri),]
"""List of (pattern, bound method) tuples, used by
`self.implicit_inline`."""
def init_customizations(self, settings):
"""Setting-based customizations; run when parsing begins."""
if settings.pep_references:
self.implicit_dispatch.append((self.patterns.pep,
self.pep_reference))
if settings.rfc_references:
self.implicit_dispatch.append((self.patterns.rfc,
self.rfc_reference))
def parse(self, text, lineno, memo, parent):
# Needs to be refactored for nested inline markup.
# Add nested_parse() method?
"""
Return 2 lists: nodes (text and inline elements), and system_messages.
Using `self.patterns.initial`, a pattern which matches start-strings
(emphasis, strong, interpreted, phrase reference, literal,
substitution reference, and inline target) and complete constructs
(simple reference, footnote reference), search for a candidate. When
one is found, check for validity (e.g., not a quoted '*' character).
If valid, search for the corresponding end string if applicable, and
check it for validity. If not found or invalid, generate a warning
and ignore the start-string. Implicit inline markup (e.g. standalone
URIs) is found last.
"""
self.reporter = memo.reporter
self.document = memo.document
self.language = memo.language
self.parent = parent
pattern_search = self.patterns.initial.search
dispatch = self.dispatch
remaining = escape2null(text)
processed = []
unprocessed = []
messages = []
while remaining:
match = pattern_search(remaining)
if match:
groups = match.groupdict()
method = dispatch[groups['start'] or groups['backquote']
or groups['refend'] or groups['fnend']]
before, inlines, remaining, sysmessages = method(self, match,
lineno)
unprocessed.append(before)
messages += sysmessages
if inlines:
processed += self.implicit_inline(''.join(unprocessed),
lineno)
processed += inlines
unprocessed = []
else:
break
remaining = ''.join(unprocessed) + remaining
if remaining:
processed += self.implicit_inline(remaining, lineno)
return processed, messages
# Inline object recognition
# -------------------------
# lookahead and look-behind expressions for inline markup rules
start_string_prefix = (u'(^|(?<=\\s|[%s%s]))' %
(punctuation_chars.openers,
punctuation_chars.delimiters))
end_string_suffix = (u'($|(?=\\s|[\x00%s%s%s]))' %
(punctuation_chars.closing_delimiters,
punctuation_chars.delimiters,
punctuation_chars.closers))
# print start_string_prefix.encode('utf8')
# TODO: support non-ASCII whitespace in the following 4 patterns?
non_whitespace_before = r'(?<![ \n])'
non_whitespace_escape_before = r'(?<![ \n\x00])'
non_unescaped_whitespace_escape_before = r'(?<!(?<!\x00)[ \n\x00])'
non_whitespace_after = r'(?![ \n])'
# Alphanumerics with isolated internal [-._+:] chars (i.e. not 2 together):
simplename = r'(?:(?!_)\w)+(?:[-._+:](?:(?!_)\w)+)*'
# Valid URI characters (see RFC 2396 & RFC 2732);
# final \x00 allows backslash escapes in URIs:
uric = r"""[-_.!~*'()[\];/:@&=+$,%a-zA-Z0-9\x00]"""
# Delimiter indicating the end of a URI (not part of the URI):
uri_end_delim = r"""[>]"""
# Last URI character; same as uric but no punctuation:
urilast = r"""[_~*/=+a-zA-Z0-9]"""
# End of a URI (either 'urilast' or 'uric followed by a
# uri_end_delim'):
uri_end = r"""(?:%(urilast)s|%(uric)s(?=%(uri_end_delim)s))""" % locals()
emailc = r"""[-_!~*'{|}/#?^`&=+$%a-zA-Z0-9\x00]"""
email_pattern = r"""
%(emailc)s+(?:\.%(emailc)s+)* # name
(?<!\x00)@ # at
%(emailc)s+(?:\.%(emailc)s*)* # host
%(uri_end)s # final URI char
"""
parts = ('initial_inline', start_string_prefix, '',
[('start', '', non_whitespace_after, # simple start-strings
[r'\*\*', # strong
r'\*(?!\*)', # emphasis but not strong
r'``', # literal
r'_`', # inline internal target
r'\|(?!\|)'] # substitution reference
),
('whole', '', end_string_suffix, # whole constructs
[# reference name & end-string
r'(?P<refname>%s)(?P<refend>__?)' % simplename,
('footnotelabel', r'\[', r'(?P<fnend>\]_)',
[r'[0-9]+', # manually numbered
r'\#(%s)?' % simplename, # auto-numbered (w/ label?)
r'\*', # auto-symbol
r'(?P<citationlabel>%s)' % simplename] # citation reference
)
]
),
('backquote', # interpreted text or phrase reference
'(?P<role>(:%s:)?)' % simplename, # optional role
non_whitespace_after,
['`(?!`)'] # but not literal
)
]
)
patterns = Struct(
initial=build_regexp(parts),
emphasis=re.compile(non_whitespace_escape_before
+ r'(\*)' + end_string_suffix, re.UNICODE),
strong=re.compile(non_whitespace_escape_before
+ r'(\*\*)' + end_string_suffix, re.UNICODE),
interpreted_or_phrase_ref=re.compile(
r"""
%(non_unescaped_whitespace_escape_before)s
(
`
(?P<suffix>
(?P<role>:%(simplename)s:)?
(?P<refend>__?)?
)
)
%(end_string_suffix)s
""" % locals(), re.VERBOSE | re.UNICODE),
embedded_uri=re.compile(
r"""
(
(?:[ \n]+|^) # spaces or beginning of line/string
< # open bracket
%(non_whitespace_after)s
([^<>\x00]+) # anything but angle brackets & nulls
%(non_whitespace_before)s
> # close bracket w/o whitespace before
)
$ # end of string
""" % locals(), re.VERBOSE | re.UNICODE),
literal=re.compile(non_whitespace_before + '(``)'
+ end_string_suffix),
target=re.compile(non_whitespace_escape_before
+ r'(`)' + end_string_suffix),
substitution_ref=re.compile(non_whitespace_escape_before
+ r'(\|_{0,2})'
+ end_string_suffix),
email=re.compile(email_pattern % locals() + '$',
re.VERBOSE | re.UNICODE),
uri=re.compile(
(r"""
%(start_string_prefix)s
(?P<whole>
(?P<absolute> # absolute URI
(?P<scheme> # scheme (http, ftp, mailto)
[a-zA-Z][a-zA-Z0-9.+-]*
)
:
(
( # either:
(//?)? # hierarchical URI
%(uric)s* # URI characters
%(uri_end)s # final URI char
)
( # optional query
\?%(uric)s*
%(uri_end)s
)?
( # optional fragment
\#%(uric)s*
%(uri_end)s
)?
)
)
| # *OR*
(?P<email> # email address
""" + email_pattern + r"""
)
)
%(end_string_suffix)s
""") % locals(), re.VERBOSE | re.UNICODE),
pep=re.compile(
r"""
%(start_string_prefix)s
(
(pep-(?P<pepnum1>\d+)(.txt)?) # reference to source file
|
(PEP\s+(?P<pepnum2>\d+)) # reference by name
)
%(end_string_suffix)s""" % locals(), re.VERBOSE | re.UNICODE),
rfc=re.compile(
r"""
%(start_string_prefix)s
(RFC(-|\s+)?(?P<rfcnum>\d+))
%(end_string_suffix)s""" % locals(), re.VERBOSE | re.UNICODE))
def quoted_start(self, match):
"""Test if inline markup start-string is 'quoted'.
'Quoted' in this context means the start-string is enclosed in a pair
of matching opening/closing delimiters (not necessarily quotes)
or at the end of the match.
"""
string = match.string
start = match.start()
if start == 0: # start-string at beginning of text
return False
prestart = string[start - 1]
try:
poststart = string[match.end()]
except IndexError: # start-string at end of text
return True # not "quoted" but no markup start-string either
return punctuation_chars.match_chars(prestart, poststart)
def inline_obj(self, match, lineno, end_pattern, nodeclass,
restore_backslashes=False):
string = match.string
matchstart = match.start('start')
matchend = match.end('start')
if self.quoted_start(match):
return (string[:matchend], [], string[matchend:], [], '')
endmatch = end_pattern.search(string[matchend:])
if endmatch and endmatch.start(1): # 1 or more chars
text = unescape(endmatch.string[:endmatch.start(1)],
restore_backslashes)
textend = matchend + endmatch.end(1)
rawsource = unescape(string[matchstart:textend], 1)
return (string[:matchstart], [nodeclass(rawsource, text)],
string[textend:], [], endmatch.group(1))
msg = self.reporter.warning(
'Inline %s start-string without end-string.'
% nodeclass.__name__, line=lineno)
text = unescape(string[matchstart:matchend], 1)
rawsource = unescape(string[matchstart:matchend], 1)
prb = self.problematic(text, rawsource, msg)
return string[:matchstart], [prb], string[matchend:], [msg], ''
def problematic(self, text, rawsource, message):
msgid = self.document.set_id(message, self.parent)
problematic = nodes.problematic(rawsource, text, refid=msgid)
prbid = self.document.set_id(problematic)
message.add_backref(prbid)
return problematic
def emphasis(self, match, lineno):
before, inlines, remaining, sysmessages, endstring = self.inline_obj(
match, lineno, self.patterns.emphasis, nodes.emphasis)
return before, inlines, remaining, sysmessages
def strong(self, match, lineno):
before, inlines, remaining, sysmessages, endstring = self.inline_obj(
match, lineno, self.patterns.strong, nodes.strong)
return before, inlines, remaining, sysmessages
def interpreted_or_phrase_ref(self, match, lineno):
end_pattern = self.patterns.interpreted_or_phrase_ref
string = match.string
matchstart = match.start('backquote')
matchend = match.end('backquote')
rolestart = match.start('role')
role = match.group('role')
position = ''
if role:
role = role[1:-1]
position = 'prefix'
elif self.quoted_start(match):
return (string[:matchend], [], string[matchend:], [])
endmatch = end_pattern.search(string[matchend:])
if endmatch and endmatch.start(1): # 1 or more chars
textend = matchend + endmatch.end()
if endmatch.group('role'):
if role:
msg = self.reporter.warning(
'Multiple roles in interpreted text (both '
'prefix and suffix present; only one allowed).',
line=lineno)
text = unescape(string[rolestart:textend], 1)
prb = self.problematic(text, text, msg)
return string[:rolestart], [prb], string[textend:], [msg]
role = endmatch.group('suffix')[1:-1]
position = 'suffix'
escaped = endmatch.string[:endmatch.start(1)]
rawsource = unescape(string[matchstart:textend], 1)
if rawsource[-1:] == '_':
if role:
msg = self.reporter.warning(
'Mismatch: both interpreted text role %s and '
'reference suffix.' % position, line=lineno)
text = unescape(string[rolestart:textend], 1)
prb = self.problematic(text, text, msg)
return string[:rolestart], [prb], string[textend:], [msg]
return self.phrase_ref(string[:matchstart], string[textend:],
rawsource, escaped, unescape(escaped))
else:
rawsource = unescape(string[rolestart:textend], 1)
nodelist, messages = self.interpreted(rawsource, escaped, role,
lineno)
return (string[:rolestart], nodelist,
string[textend:], messages)
msg = self.reporter.warning(
'Inline interpreted text or phrase reference start-string '
'without end-string.', line=lineno)
text = unescape(string[matchstart:matchend], 1)
prb = self.problematic(text, text, msg)
return string[:matchstart], [prb], string[matchend:], [msg]
def phrase_ref(self, before, after, rawsource, escaped, text):
match = self.patterns.embedded_uri.search(escaped)
if match:
text = unescape(escaped[:match.start(0)])
uri_text = match.group(2)
uri = ''.join(uri_text.split())
uri = self.adjust_uri(uri)
if uri:
target = nodes.target(match.group(1), refuri=uri)
target.referenced = 1
else:
raise ApplicationError('problem with URI: %r' % uri_text)
if not text:
text = uri
else:
target = None
refname = normalize_name(text)
reference = nodes.reference(rawsource, text,
name=whitespace_normalize_name(text))
node_list = [reference]
if rawsource[-2:] == '__':
if target:
reference['refuri'] = uri
else:
reference['anonymous'] = 1
else:
if target:
reference['refuri'] = uri
target['names'].append(refname)
self.document.note_explicit_target(target, self.parent)
node_list.append(target)
else:
reference['refname'] = refname
self.document.note_refname(reference)
return before, node_list, after, []
def adjust_uri(self, uri):
match = self.patterns.email.match(uri)
if match:
return 'mailto:' + uri
else:
return uri
def interpreted(self, rawsource, text, role, lineno):
role_fn, messages = roles.role(role, self.language, lineno,
self.reporter)
if role_fn:
nodes, messages2 = role_fn(role, rawsource, text, lineno, self)
return nodes, messages + messages2
else:
msg = self.reporter.error(
'Unknown interpreted text role "%s".' % role,
line=lineno)
return ([self.problematic(rawsource, rawsource, msg)],
messages + [msg])
def literal(self, match, lineno):
before, inlines, remaining, sysmessages, endstring = self.inline_obj(
match, lineno, self.patterns.literal, nodes.literal,
restore_backslashes=True)
return before, inlines, remaining, sysmessages
def inline_internal_target(self, match, lineno):
before, inlines, remaining, sysmessages, endstring = self.inline_obj(
match, lineno, self.patterns.target, nodes.target)
if inlines and isinstance(inlines[0], nodes.target):
assert len(inlines) == 1
target = inlines[0]
name = normalize_name(target.astext())
target['names'].append(name)
self.document.note_explicit_target(target, self.parent)
return before, inlines, remaining, sysmessages
def substitution_reference(self, match, lineno):
before, inlines, remaining, sysmessages, endstring = self.inline_obj(
match, lineno, self.patterns.substitution_ref,
nodes.substitution_reference)
if len(inlines) == 1:
subref_node = inlines[0]
if isinstance(subref_node, nodes.substitution_reference):
subref_text = subref_node.astext()
self.document.note_substitution_ref(subref_node, subref_text)
if endstring[-1:] == '_':
reference_node = nodes.reference(
'|%s%s' % (subref_text, endstring), '')
if endstring[-2:] == '__':
reference_node['anonymous'] = 1
else:
reference_node['refname'] = normalize_name(subref_text)
self.document.note_refname(reference_node)
reference_node += subref_node
inlines = [reference_node]
return before, inlines, remaining, sysmessages
def footnote_reference(self, match, lineno):
"""
Handles `nodes.footnote_reference` and `nodes.citation_reference`
elements.
"""
label = match.group('footnotelabel')
refname = normalize_name(label)
string = match.string
before = string[:match.start('whole')]
remaining = string[match.end('whole'):]
if match.group('citationlabel'):
refnode = nodes.citation_reference('[%s]_' % label,
refname=refname)
refnode += nodes.Text(label)
self.document.note_citation_ref(refnode)
else:
refnode = nodes.footnote_reference('[%s]_' % label)
if refname[0] == '#':
refname = refname[1:]
refnode['auto'] = 1
self.document.note_autofootnote_ref(refnode)
elif refname == '*':
refname = ''
refnode['auto'] = '*'
self.document.note_symbol_footnote_ref(
refnode)
else:
refnode += nodes.Text(label)
if refname:
refnode['refname'] = refname
self.document.note_footnote_ref(refnode)
if utils.get_trim_footnote_ref_space(self.document.settings):
before = before.rstrip()
return (before, [refnode], remaining, [])
def reference(self, match, lineno, anonymous=False):
referencename = match.group('refname')
refname = normalize_name(referencename)
referencenode = nodes.reference(
referencename + match.group('refend'), referencename,
name=whitespace_normalize_name(referencename))
if anonymous:
referencenode['anonymous'] = 1
else:
referencenode['refname'] = refname
self.document.note_refname(referencenode)
string = match.string
matchstart = match.start('whole')
matchend = match.end('whole')
return (string[:matchstart], [referencenode], string[matchend:], [])
def anonymous_reference(self, match, lineno):
return self.reference(match, lineno, anonymous=1)
def standalone_uri(self, match, lineno):
if (not match.group('scheme')
or match.group('scheme').lower() in urischemes.schemes):
if match.group('email'):
addscheme = 'mailto:'
else:
addscheme = ''
text = match.group('whole')
unescaped = unescape(text, 0)
return [nodes.reference(unescape(text, 1), unescaped,
refuri=addscheme + unescaped)]
else: # not a valid scheme
raise MarkupMismatch
def pep_reference(self, match, lineno):
text = match.group(0)
if text.startswith('pep-'):
pepnum = int(match.group('pepnum1'))
elif text.startswith('PEP'):
pepnum = int(match.group('pepnum2'))
else:
raise MarkupMismatch
ref = (self.document.settings.pep_base_url
+ self.document.settings.pep_file_url_template % pepnum)
unescaped = unescape(text, 0)
return [nodes.reference(unescape(text, 1), unescaped, refuri=ref)]
rfc_url = 'rfc%d.html'
def rfc_reference(self, match, lineno):
text = match.group(0)
if text.startswith('RFC'):
rfcnum = int(match.group('rfcnum'))
ref = self.document.settings.rfc_base_url + self.rfc_url % rfcnum
else:
raise MarkupMismatch
unescaped = unescape(text, 0)
return [nodes.reference(unescape(text, 1), unescaped, refuri=ref)]
def implicit_inline(self, text, lineno):
"""
Check each of the patterns in `self.implicit_dispatch` for a match,
and dispatch to the stored method for the pattern. Recursively check
the text before and after the match. Return a list of `nodes.Text`
and inline element nodes.
"""
if not text:
return []
for pattern, method in self.implicit_dispatch:
match = pattern.search(text)
if match:
try:
# Must recurse on strings before *and* after the match;
# there may be multiple patterns.
return (self.implicit_inline(text[:match.start()], lineno)
+ method(match, lineno) +
self.implicit_inline(text[match.end():], lineno))
except MarkupMismatch:
pass
return [nodes.Text(unescape(text), rawsource=unescape(text, 1))]
dispatch = {'*': emphasis,
'**': strong,
'`': interpreted_or_phrase_ref,
'``': literal,
'_`': inline_internal_target,
']_': footnote_reference,
'|': substitution_reference,
'_': reference,
'__': anonymous_reference}
def _loweralpha_to_int(s, _zero=(ord('a')-1)):
return ord(s) - _zero
def _upperalpha_to_int(s, _zero=(ord('A')-1)):
return ord(s) - _zero
def _lowerroman_to_int(s):
return roman.fromRoman(s.upper())
class Body(RSTState):
"""
Generic classifier of the first line of a block.
"""
double_width_pad_char = tableparser.TableParser.double_width_pad_char
"""Padding character for East Asian double-width text."""
enum = Struct()
"""Enumerated list parsing information."""
enum.formatinfo = {
'parens': Struct(prefix='(', suffix=')', start=1, end=-1),
'rparen': Struct(prefix='', suffix=')', start=0, end=-1),
'period': Struct(prefix='', suffix='.', start=0, end=-1)}
enum.formats = enum.formatinfo.keys()
enum.sequences = ['arabic', 'loweralpha', 'upperalpha',
'lowerroman', 'upperroman'] # ORDERED!
enum.sequencepats = {'arabic': '[0-9]+',
'loweralpha': '[a-z]',
'upperalpha': '[A-Z]',
'lowerroman': '[ivxlcdm]+',
'upperroman': '[IVXLCDM]+',}
enum.converters = {'arabic': int,
'loweralpha': _loweralpha_to_int,
'upperalpha': _upperalpha_to_int,
'lowerroman': _lowerroman_to_int,
'upperroman': roman.fromRoman}
enum.sequenceregexps = {}
for sequence in enum.sequences:
enum.sequenceregexps[sequence] = re.compile(
enum.sequencepats[sequence] + '$', re.UNICODE)
grid_table_top_pat = re.compile(r'\+-[-+]+-\+ *$')
"""Matches the top (& bottom) of a full table)."""
simple_table_top_pat = re.compile('=+( +=+)+ *$')
"""Matches the top of a simple table."""
simple_table_border_pat = re.compile('=+[ =]*$')
"""Matches the bottom & header bottom of a simple table."""
pats = {}
"""Fragments of patterns used by transitions."""
pats['nonalphanum7bit'] = '[!-/:-@[-`{-~]'
pats['alpha'] = '[a-zA-Z]'
pats['alphanum'] = '[a-zA-Z0-9]'
pats['alphanumplus'] = '[a-zA-Z0-9_-]'
pats['enum'] = ('(%(arabic)s|%(loweralpha)s|%(upperalpha)s|%(lowerroman)s'
'|%(upperroman)s|#)' % enum.sequencepats)
pats['optname'] = '%(alphanum)s%(alphanumplus)s*' % pats
# @@@ Loosen up the pattern? Allow Unicode?
pats['optarg'] = '(%(alpha)s%(alphanumplus)s*|<[^<>]+>)' % pats
pats['shortopt'] = r'(-|\+)%(alphanum)s( ?%(optarg)s)?' % pats
pats['longopt'] = r'(--|/)%(optname)s([ =]%(optarg)s)?' % pats
pats['option'] = r'(%(shortopt)s|%(longopt)s)' % pats
for format in enum.formats:
pats[format] = '(?P<%s>%s%s%s)' % (
format, re.escape(enum.formatinfo[format].prefix),
pats['enum'], re.escape(enum.formatinfo[format].suffix))
patterns = {
'bullet': u'[-+*\u2022\u2023\u2043]( +|$)',
'enumerator': r'(%(parens)s|%(rparen)s|%(period)s)( +|$)' % pats,
'field_marker': r':(?![: ])([^:\\]|\\.)*(?<! ):( +|$)',
'option_marker': r'%(option)s(, %(option)s)*( +| ?$)' % pats,
'doctest': r'>>>( +|$)',
'line_block': r'\|( +|$)',
'grid_table_top': grid_table_top_pat,
'simple_table_top': simple_table_top_pat,
'explicit_markup': r'\.\.( +|$)',
'anonymous': r'__( +|$)',
'line': r'(%(nonalphanum7bit)s)\1* *$' % pats,
'text': r''}
initial_transitions = (
'bullet',
'enumerator',
'field_marker',
'option_marker',
'doctest',
'line_block',
'grid_table_top',
'simple_table_top',
'explicit_markup',
'anonymous',
'line',
'text')
def indent(self, match, context, next_state):
"""Block quote."""
indented, indent, line_offset, blank_finish = \
self.state_machine.get_indented()
elements = self.block_quote(indented, line_offset)
self.parent += elements
if not blank_finish:
self.parent += self.unindent_warning('Block quote')
return context, next_state, []
def block_quote(self, indented, line_offset):
elements = []
while indented:
(blockquote_lines,
attribution_lines,
attribution_offset,
indented,
new_line_offset) = self.split_attribution(indented, line_offset)
blockquote = nodes.block_quote()
self.nested_parse(blockquote_lines, line_offset, blockquote)
elements.append(blockquote)
if attribution_lines:
attribution, messages = self.parse_attribution(
attribution_lines, attribution_offset)
blockquote += attribution
elements += messages
line_offset = new_line_offset
while indented and not indented[0]:
indented = indented[1:]
line_offset += 1
return elements
# U+2014 is an em-dash:
attribution_pattern = re.compile(u'(---?(?!-)|\u2014) *(?=[^ \\n])',
re.UNICODE)
def split_attribution(self, indented, line_offset):
"""
Check for a block quote attribution and split it off:
* First line after a blank line must begin with a dash ("--", "---",
em-dash; matches `self.attribution_pattern`).
* Every line after that must have consistent indentation.
* Attributions must be preceded by block quote content.
Return a tuple of: (block quote content lines, content offset,
attribution lines, attribution offset, remaining indented lines).
"""
blank = None
nonblank_seen = False
for i in range(len(indented)):
line = indented[i].rstrip()
if line:
if nonblank_seen and blank == i - 1: # last line blank
match = self.attribution_pattern.match(line)
if match:
attribution_end, indent = self.check_attribution(
indented, i)
if attribution_end:
a_lines = indented[i:attribution_end]
a_lines.trim_left(match.end(), end=1)
a_lines.trim_left(indent, start=1)
return (indented[:i], a_lines,
i, indented[attribution_end:],
line_offset + attribution_end)
nonblank_seen = True
else:
blank = i
else:
return (indented, None, None, None, None)
def check_attribution(self, indented, attribution_start):
"""
Check attribution shape.
Return the index past the end of the attribution, and the indent.
"""
indent = None
i = attribution_start + 1
for i in range(attribution_start + 1, len(indented)):
line = indented[i].rstrip()
if not line:
break
if indent is None:
indent = len(line) - len(line.lstrip())
elif len(line) - len(line.lstrip()) != indent:
return None, None # bad shape; not an attribution
else:
# return index of line after last attribution line:
i += 1
return i, (indent or 0)
def parse_attribution(self, indented, line_offset):
text = '\n'.join(indented).rstrip()
lineno = self.state_machine.abs_line_number() + line_offset
textnodes, messages = self.inline_text(text, lineno)
node = nodes.attribution(text, '', *textnodes)
node.source, node.line = self.state_machine.get_source_and_line(lineno)
return node, messages
def bullet(self, match, context, next_state):
"""Bullet list item."""
bulletlist = nodes.bullet_list()
self.parent += bulletlist
bulletlist['bullet'] = match.string[0]
i, blank_finish = self.list_item(match.end())
bulletlist += i
offset = self.state_machine.line_offset + 1 # next line
new_line_offset, blank_finish = self.nested_list_parse(
self.state_machine.input_lines[offset:],
input_offset=self.state_machine.abs_line_offset() + 1,
node=bulletlist, initial_state='BulletList',
blank_finish=blank_finish)
self.goto_line(new_line_offset)
if not blank_finish:
self.parent += self.unindent_warning('Bullet list')
return [], next_state, []
def list_item(self, indent):
if self.state_machine.line[indent:]:
indented, line_offset, blank_finish = (
self.state_machine.get_known_indented(indent))
else:
indented, indent, line_offset, blank_finish = (
self.state_machine.get_first_known_indented(indent))
listitem = nodes.list_item('\n'.join(indented))
if indented:
self.nested_parse(indented, input_offset=line_offset,
node=listitem)
return listitem, blank_finish
def enumerator(self, match, context, next_state):
"""Enumerated List Item"""
format, sequence, text, ordinal = self.parse_enumerator(match)
if not self.is_enumerated_list_item(ordinal, sequence, format):
raise statemachine.TransitionCorrection('text')
enumlist = nodes.enumerated_list()
self.parent += enumlist
if sequence == '#':
enumlist['enumtype'] = 'arabic'
else:
enumlist['enumtype'] = sequence
enumlist['prefix'] = self.enum.formatinfo[format].prefix
enumlist['suffix'] = self.enum.formatinfo[format].suffix
if ordinal != 1:
enumlist['start'] = ordinal
msg = self.reporter.info(
'Enumerated list start value not ordinal-1: "%s" (ordinal %s)'
% (text, ordinal))
self.parent += msg
listitem, blank_finish = self.list_item(match.end())
enumlist += listitem
offset = self.state_machine.line_offset + 1 # next line
newline_offset, blank_finish = self.nested_list_parse(
self.state_machine.input_lines[offset:],
input_offset=self.state_machine.abs_line_offset() + 1,
node=enumlist, initial_state='EnumeratedList',
blank_finish=blank_finish,
extra_settings={'lastordinal': ordinal,
'format': format,
'auto': sequence == '#'})
self.goto_line(newline_offset)
if not blank_finish:
self.parent += self.unindent_warning('Enumerated list')
return [], next_state, []
def parse_enumerator(self, match, expected_sequence=None):
"""
Analyze an enumerator and return the results.
:Return:
- the enumerator format ('period', 'parens', or 'rparen'),
- the sequence used ('arabic', 'loweralpha', 'upperroman', etc.),
- the text of the enumerator, stripped of formatting, and
- the ordinal value of the enumerator ('a' -> 1, 'ii' -> 2, etc.;
``None`` is returned for invalid enumerator text).
The enumerator format has already been determined by the regular
expression match. If `expected_sequence` is given, that sequence is
tried first. If not, we check for Roman numeral 1. This way,
single-character Roman numerals (which are also alphabetical) can be
matched. If no sequence has been matched, all sequences are checked in
order.
"""
groupdict = match.groupdict()
sequence = ''
for format in self.enum.formats:
if groupdict[format]: # was this the format matched?
break # yes; keep `format`
else: # shouldn't happen
raise ParserError('enumerator format not matched')
text = groupdict[format][self.enum.formatinfo[format].start
:self.enum.formatinfo[format].end]
if text == '#':
sequence = '#'
elif expected_sequence:
try:
if self.enum.sequenceregexps[expected_sequence].match(text):
sequence = expected_sequence
except KeyError: # shouldn't happen
raise ParserError('unknown enumerator sequence: %s'
% sequence)
elif text == 'i':
sequence = 'lowerroman'
elif text == 'I':
sequence = 'upperroman'
if not sequence:
for sequence in self.enum.sequences:
if self.enum.sequenceregexps[sequence].match(text):
break
else: # shouldn't happen
raise ParserError('enumerator sequence not matched')
if sequence == '#':
ordinal = 1
else:
try:
ordinal = self.enum.converters[sequence](text)
except roman.InvalidRomanNumeralError:
ordinal = None
return format, sequence, text, ordinal
def is_enumerated_list_item(self, ordinal, sequence, format):
"""
Check validity based on the ordinal value and the second line.
Return true if the ordinal is valid and the second line is blank,
indented, or starts with the next enumerator or an auto-enumerator.
"""
if ordinal is None:
return None
try:
next_line = self.state_machine.next_line()
except EOFError: # end of input lines
self.state_machine.previous_line()
return 1
else:
self.state_machine.previous_line()
if not next_line[:1].strip(): # blank or indented
return 1
result = self.make_enumerator(ordinal + 1, sequence, format)
if result:
next_enumerator, auto_enumerator = result
try:
if ( next_line.startswith(next_enumerator) or
next_line.startswith(auto_enumerator) ):
return 1
except TypeError:
pass
return None
def make_enumerator(self, ordinal, sequence, format):
"""
Construct and return the next enumerated list item marker, and an
auto-enumerator ("#" instead of the regular enumerator).
Return ``None`` for invalid (out of range) ordinals.
""" #"
if sequence == '#':
enumerator = '#'
elif sequence == 'arabic':
enumerator = str(ordinal)
else:
if sequence.endswith('alpha'):
if ordinal > 26:
return None
enumerator = chr(ordinal + ord('a') - 1)
elif sequence.endswith('roman'):
try:
enumerator = roman.toRoman(ordinal)
except roman.RomanError:
return None
else: # shouldn't happen
raise ParserError('unknown enumerator sequence: "%s"'
% sequence)
if sequence.startswith('lower'):
enumerator = enumerator.lower()
elif sequence.startswith('upper'):
enumerator = enumerator.upper()
else: # shouldn't happen
raise ParserError('unknown enumerator sequence: "%s"'
% sequence)
formatinfo = self.enum.formatinfo[format]
next_enumerator = (formatinfo.prefix + enumerator + formatinfo.suffix
+ ' ')
auto_enumerator = formatinfo.prefix + '#' + formatinfo.suffix + ' '
return next_enumerator, auto_enumerator
def field_marker(self, match, context, next_state):
"""Field list item."""
field_list = nodes.field_list()
self.parent += field_list
field, blank_finish = self.field(match)
field_list += field
offset = self.state_machine.line_offset + 1 # next line
newline_offset, blank_finish = self.nested_list_parse(
self.state_machine.input_lines[offset:],
input_offset=self.state_machine.abs_line_offset() + 1,
node=field_list, initial_state='FieldList',
blank_finish=blank_finish)
self.goto_line(newline_offset)
if not blank_finish:
self.parent += self.unindent_warning('Field list')
return [], next_state, []
def field(self, match):
name = self.parse_field_marker(match)
src, srcline = self.state_machine.get_source_and_line()
lineno = self.state_machine.abs_line_number()
indented, indent, line_offset, blank_finish = \
self.state_machine.get_first_known_indented(match.end())
field_node = nodes.field()
field_node.source = src
field_node.line = srcline
name_nodes, name_messages = self.inline_text(name, lineno)
field_node += nodes.field_name(name, '', *name_nodes)
field_body = nodes.field_body('\n'.join(indented), *name_messages)
field_node += field_body
if indented:
self.parse_field_body(indented, line_offset, field_body)
return field_node, blank_finish
def parse_field_marker(self, match):
"""Extract & return field name from a field marker match."""
field = match.group()[1:] # strip off leading ':'
field = field[:field.rfind(':')] # strip off trailing ':' etc.
return field
def parse_field_body(self, indented, offset, node):
self.nested_parse(indented, input_offset=offset, node=node)
def option_marker(self, match, context, next_state):
"""Option list item."""
optionlist = nodes.option_list()
try:
listitem, blank_finish = self.option_list_item(match)
except MarkupError, error:
# This shouldn't happen; pattern won't match.
msg = self.reporter.error(u'Invalid option list marker: %s' %
error)
self.parent += msg
indented, indent, line_offset, blank_finish = \
self.state_machine.get_first_known_indented(match.end())
elements = self.block_quote(indented, line_offset)
self.parent += elements
if not blank_finish:
self.parent += self.unindent_warning('Option list')
return [], next_state, []
self.parent += optionlist
optionlist += listitem
offset = self.state_machine.line_offset + 1 # next line
newline_offset, blank_finish = self.nested_list_parse(
self.state_machine.input_lines[offset:],
input_offset=self.state_machine.abs_line_offset() + 1,
node=optionlist, initial_state='OptionList',
blank_finish=blank_finish)
self.goto_line(newline_offset)
if not blank_finish:
self.parent += self.unindent_warning('Option list')
return [], next_state, []
def option_list_item(self, match):
offset = self.state_machine.abs_line_offset()
options = self.parse_option_marker(match)
indented, indent, line_offset, blank_finish = \
self.state_machine.get_first_known_indented(match.end())
if not indented: # not an option list item
self.goto_line(offset)
raise statemachine.TransitionCorrection('text')
option_group = nodes.option_group('', *options)
description = nodes.description('\n'.join(indented))
option_list_item = nodes.option_list_item('', option_group,
description)
if indented:
self.nested_parse(indented, input_offset=line_offset,
node=description)
return option_list_item, blank_finish
def parse_option_marker(self, match):
"""
Return a list of `node.option` and `node.option_argument` objects,
parsed from an option marker match.
:Exception: `MarkupError` for invalid option markers.
"""
optlist = []
optionstrings = match.group().rstrip().split(', ')
for optionstring in optionstrings:
tokens = optionstring.split()
delimiter = ' '
firstopt = tokens[0].split('=', 1)
if len(firstopt) > 1:
# "--opt=value" form
tokens[:1] = firstopt
delimiter = '='
elif (len(tokens[0]) > 2
and ((tokens[0].startswith('-')
and not tokens[0].startswith('--'))
or tokens[0].startswith('+'))):
# "-ovalue" form
tokens[:1] = [tokens[0][:2], tokens[0][2:]]
delimiter = ''
if len(tokens) > 1 and (tokens[1].startswith('<')
and tokens[-1].endswith('>')):
# "-o <value1 value2>" form; join all values into one token
tokens[1:] = [' '.join(tokens[1:])]
if 0 < len(tokens) <= 2:
option = nodes.option(optionstring)
option += nodes.option_string(tokens[0], tokens[0])
if len(tokens) > 1:
option += nodes.option_argument(tokens[1], tokens[1],
delimiter=delimiter)
optlist.append(option)
else:
raise MarkupError(
'wrong number of option tokens (=%s), should be 1 or 2: '
'"%s"' % (len(tokens), optionstring))
return optlist
def doctest(self, match, context, next_state):
data = '\n'.join(self.state_machine.get_text_block())
self.parent += nodes.doctest_block(data, data)
return [], next_state, []
def line_block(self, match, context, next_state):
"""First line of a line block."""
block = nodes.line_block()
self.parent += block
lineno = self.state_machine.abs_line_number()
line, messages, blank_finish = self.line_block_line(match, lineno)
block += line
self.parent += messages
if not blank_finish:
offset = self.state_machine.line_offset + 1 # next line
new_line_offset, blank_finish = self.nested_list_parse(
self.state_machine.input_lines[offset:],
input_offset=self.state_machine.abs_line_offset() + 1,
node=block, initial_state='LineBlock',
blank_finish=0)
self.goto_line(new_line_offset)
if not blank_finish:
self.parent += self.reporter.warning(
'Line block ends without a blank line.',
line=lineno+1)
if len(block):
if block[0].indent is None:
block[0].indent = 0
self.nest_line_block_lines(block)
return [], next_state, []
def line_block_line(self, match, lineno):
"""Return one line element of a line_block."""
indented, indent, line_offset, blank_finish = \
self.state_machine.get_first_known_indented(match.end(),
until_blank=True)
text = u'\n'.join(indented)
text_nodes, messages = self.inline_text(text, lineno)
line = nodes.line(text, '', *text_nodes)
if match.string.rstrip() != '|': # not empty
line.indent = len(match.group(1)) - 1
return line, messages, blank_finish
def nest_line_block_lines(self, block):
for index in range(1, len(block)):
if block[index].indent is None:
block[index].indent = block[index - 1].indent
self.nest_line_block_segment(block)
def nest_line_block_segment(self, block):
indents = [item.indent for item in block]
least = min(indents)
new_items = []
new_block = nodes.line_block()
for item in block:
if item.indent > least:
new_block.append(item)
else:
if len(new_block):
self.nest_line_block_segment(new_block)
new_items.append(new_block)
new_block = nodes.line_block()
new_items.append(item)
if len(new_block):
self.nest_line_block_segment(new_block)
new_items.append(new_block)
block[:] = new_items
def grid_table_top(self, match, context, next_state):
"""Top border of a full table."""
return self.table_top(match, context, next_state,
self.isolate_grid_table,
tableparser.GridTableParser)
def simple_table_top(self, match, context, next_state):
"""Top border of a simple table."""
return self.table_top(match, context, next_state,
self.isolate_simple_table,
tableparser.SimpleTableParser)
def table_top(self, match, context, next_state,
isolate_function, parser_class):
"""Top border of a generic table."""
nodelist, blank_finish = self.table(isolate_function, parser_class)
self.parent += nodelist
if not blank_finish:
msg = self.reporter.warning(
'Blank line required after table.',
line=self.state_machine.abs_line_number()+1)
self.parent += msg
return [], next_state, []
def table(self, isolate_function, parser_class):
"""Parse a table."""
block, messages, blank_finish = isolate_function()
if block:
try:
parser = parser_class()
tabledata = parser.parse(block)
tableline = (self.state_machine.abs_line_number() - len(block)
+ 1)
table = self.build_table(tabledata, tableline)
nodelist = [table] + messages
except tableparser.TableMarkupError, err:
nodelist = self.malformed_table(block, ' '.join(err.args),
offset=err.offset) + messages
else:
nodelist = messages
return nodelist, blank_finish
def isolate_grid_table(self):
messages = []
blank_finish = 1
try:
block = self.state_machine.get_text_block(flush_left=True)
except statemachine.UnexpectedIndentationError, err:
block, src, srcline = err.args
messages.append(self.reporter.error('Unexpected indentation.',
source=src, line=srcline))
blank_finish = 0
block.disconnect()
# for East Asian chars:
block.pad_double_width(self.double_width_pad_char)
width = len(block[0].strip())
for i in range(len(block)):
block[i] = block[i].strip()
if block[i][0] not in '+|': # check left edge
blank_finish = 0
self.state_machine.previous_line(len(block) - i)
del block[i:]
break
if not self.grid_table_top_pat.match(block[-1]): # find bottom
blank_finish = 0
# from second-last to third line of table:
for i in range(len(block) - 2, 1, -1):
if self.grid_table_top_pat.match(block[i]):
self.state_machine.previous_line(len(block) - i + 1)
del block[i+1:]
break
else:
messages.extend(self.malformed_table(block))
return [], messages, blank_finish
for i in range(len(block)): # check right edge
if len(block[i]) != width or block[i][-1] not in '+|':
messages.extend(self.malformed_table(block))
return [], messages, blank_finish
return block, messages, blank_finish
def isolate_simple_table(self):
start = self.state_machine.line_offset
lines = self.state_machine.input_lines
limit = len(lines) - 1
toplen = len(lines[start].strip())
pattern_match = self.simple_table_border_pat.match
found = 0
found_at = None
i = start + 1
while i <= limit:
line = lines[i]
match = pattern_match(line)
if match:
if len(line.strip()) != toplen:
self.state_machine.next_line(i - start)
messages = self.malformed_table(
lines[start:i+1], 'Bottom/header table border does '
'not match top border.')
return [], messages, i == limit or not lines[i+1].strip()
found += 1
found_at = i
if found == 2 or i == limit or not lines[i+1].strip():
end = i
break
i += 1
else: # reached end of input_lines
if found:
extra = ' or no blank line after table bottom'
self.state_machine.next_line(found_at - start)
block = lines[start:found_at+1]
else:
extra = ''
self.state_machine.next_line(i - start - 1)
block = lines[start:]
messages = self.malformed_table(
block, 'No bottom table border found%s.' % extra)
return [], messages, not extra
self.state_machine.next_line(end - start)
block = lines[start:end+1]
# for East Asian chars:
block.pad_double_width(self.double_width_pad_char)
return block, [], end == limit or not lines[end+1].strip()
def malformed_table(self, block, detail='', offset=0):
block.replace(self.double_width_pad_char, '')
data = '\n'.join(block)
message = 'Malformed table.'
startline = self.state_machine.abs_line_number() - len(block) + 1
if detail:
message += '\n' + detail
error = self.reporter.error(message, nodes.literal_block(data, data),
line=startline+offset)
return [error]
def build_table(self, tabledata, tableline, stub_columns=0):
colwidths, headrows, bodyrows = tabledata
table = nodes.table()
tgroup = nodes.tgroup(cols=len(colwidths))
table += tgroup
for colwidth in colwidths:
colspec = nodes.colspec(colwidth=colwidth)
if stub_columns:
colspec.attributes['stub'] = 1
stub_columns -= 1
tgroup += colspec
if headrows:
thead = nodes.thead()
tgroup += thead
for row in headrows:
thead += self.build_table_row(row, tableline)
tbody = nodes.tbody()
tgroup += tbody
for row in bodyrows:
tbody += self.build_table_row(row, tableline)
return table
def build_table_row(self, rowdata, tableline):
row = nodes.row()
for cell in rowdata:
if cell is None:
continue
morerows, morecols, offset, cellblock = cell
attributes = {}
if morerows:
attributes['morerows'] = morerows
if morecols:
attributes['morecols'] = morecols
entry = nodes.entry(**attributes)
row += entry
if ''.join(cellblock):
self.nested_parse(cellblock, input_offset=tableline+offset,
node=entry)
return row
explicit = Struct()
"""Patterns and constants used for explicit markup recognition."""
explicit.patterns = Struct(
target=re.compile(r"""
(
_ # anonymous target
| # *OR*
(?!_) # no underscore at the beginning
(?P<quote>`?) # optional open quote
(?![ `]) # first char. not space or
# backquote
(?P<name> # reference name
.+?
)
%(non_whitespace_escape_before)s
(?P=quote) # close quote if open quote used
)
(?<!(?<!\x00):) # no unescaped colon at end
%(non_whitespace_escape_before)s
[ ]? # optional space
: # end of reference name
([ ]+|$) # followed by whitespace
""" % vars(Inliner), re.VERBOSE | re.UNICODE),
reference=re.compile(r"""
(
(?P<simple>%(simplename)s)_
| # *OR*
` # open backquote
(?![ ]) # not space
(?P<phrase>.+?) # hyperlink phrase
%(non_whitespace_escape_before)s
`_ # close backquote,
# reference mark
)
$ # end of string
""" % vars(Inliner), re.VERBOSE | re.UNICODE),
substitution=re.compile(r"""
(
(?![ ]) # first char. not space
(?P<name>.+?) # substitution text
%(non_whitespace_escape_before)s
\| # close delimiter
)
([ ]+|$) # followed by whitespace
""" % vars(Inliner),
re.VERBOSE | re.UNICODE),)
def footnote(self, match):
src, srcline = self.state_machine.get_source_and_line()
indented, indent, offset, blank_finish = \
self.state_machine.get_first_known_indented(match.end())
label = match.group(1)
name = normalize_name(label)
footnote = nodes.footnote('\n'.join(indented))
footnote.source = src
footnote.line = srcline
if name[0] == '#': # auto-numbered
name = name[1:] # autonumber label
footnote['auto'] = 1
if name:
footnote['names'].append(name)
self.document.note_autofootnote(footnote)
elif name == '*': # auto-symbol
name = ''
footnote['auto'] = '*'
self.document.note_symbol_footnote(footnote)
else: # manually numbered
footnote += nodes.label('', label)
footnote['names'].append(name)
self.document.note_footnote(footnote)
if name:
self.document.note_explicit_target(footnote, footnote)
else:
self.document.set_id(footnote, footnote)
if indented:
self.nested_parse(indented, input_offset=offset, node=footnote)
return [footnote], blank_finish
def citation(self, match):
src, srcline = self.state_machine.get_source_and_line()
indented, indent, offset, blank_finish = \
self.state_machine.get_first_known_indented(match.end())
label = match.group(1)
name = normalize_name(label)
citation = nodes.citation('\n'.join(indented))
citation.source = src
citation.line = srcline
citation += nodes.label('', label)
citation['names'].append(name)
self.document.note_citation(citation)
self.document.note_explicit_target(citation, citation)
if indented:
self.nested_parse(indented, input_offset=offset, node=citation)
return [citation], blank_finish
def hyperlink_target(self, match):
pattern = self.explicit.patterns.target
lineno = self.state_machine.abs_line_number()
block, indent, offset, blank_finish = \
self.state_machine.get_first_known_indented(
match.end(), until_blank=True, strip_indent=False)
blocktext = match.string[:match.end()] + '\n'.join(block)
block = [escape2null(line) for line in block]
escaped = block[0]
blockindex = 0
while True:
targetmatch = pattern.match(escaped)
if targetmatch:
break
blockindex += 1
try:
escaped += block[blockindex]
except IndexError:
raise MarkupError('malformed hyperlink target.')
del block[:blockindex]
block[0] = (block[0] + ' ')[targetmatch.end()-len(escaped)-1:].strip()
target = self.make_target(block, blocktext, lineno,
targetmatch.group('name'))
return [target], blank_finish
def make_target(self, block, block_text, lineno, target_name):
target_type, data = self.parse_target(block, block_text, lineno)
if target_type == 'refname':
target = nodes.target(block_text, '', refname=normalize_name(data))
target.indirect_reference_name = data
self.add_target(target_name, '', target, lineno)
self.document.note_indirect_target(target)
return target
elif target_type == 'refuri':
target = nodes.target(block_text, '')
self.add_target(target_name, data, target, lineno)
return target
else:
return data
def parse_target(self, block, block_text, lineno):
"""
Determine the type of reference of a target.
:Return: A 2-tuple, one of:
- 'refname' and the indirect reference name
- 'refuri' and the URI
- 'malformed' and a system_message node
"""
if block and block[-1].strip()[-1:] == '_': # possible indirect target
reference = ' '.join([line.strip() for line in block])
refname = self.is_reference(reference)
if refname:
return 'refname', refname
reference = ''.join([''.join(line.split()) for line in block])
return 'refuri', unescape(reference)
def is_reference(self, reference):
match = self.explicit.patterns.reference.match(
whitespace_normalize_name(reference))
if not match:
return None
return unescape(match.group('simple') or match.group('phrase'))
def add_target(self, targetname, refuri, target, lineno):
target.line = lineno
if targetname:
name = normalize_name(unescape(targetname))
target['names'].append(name)
if refuri:
uri = self.inliner.adjust_uri(refuri)
if uri:
target['refuri'] = uri
else:
raise ApplicationError('problem with URI: %r' % refuri)
self.document.note_explicit_target(target, self.parent)
else: # anonymous target
if refuri:
target['refuri'] = refuri
target['anonymous'] = 1
self.document.note_anonymous_target(target)
def substitution_def(self, match):
pattern = self.explicit.patterns.substitution
src, srcline = self.state_machine.get_source_and_line()
block, indent, offset, blank_finish = \
self.state_machine.get_first_known_indented(match.end(),
strip_indent=False)
blocktext = (match.string[:match.end()] + '\n'.join(block))
block.disconnect()
escaped = escape2null(block[0].rstrip())
blockindex = 0
while True:
subdefmatch = pattern.match(escaped)
if subdefmatch:
break
blockindex += 1
try:
escaped = escaped + ' ' + escape2null(block[blockindex].strip())
except IndexError:
raise MarkupError('malformed substitution definition.')
del block[:blockindex] # strip out the substitution marker
block[0] = (block[0].strip() + ' ')[subdefmatch.end()-len(escaped)-1:-1]
if not block[0]:
del block[0]
offset += 1
while block and not block[-1].strip():
block.pop()
subname = subdefmatch.group('name')
substitution_node = nodes.substitution_definition(blocktext)
substitution_node.source = src
substitution_node.line = srcline
if not block:
msg = self.reporter.warning(
'Substitution definition "%s" missing contents.' % subname,
nodes.literal_block(blocktext, blocktext),
source=src, line=srcline)
return [msg], blank_finish
block[0] = block[0].strip()
substitution_node['names'].append(
nodes.whitespace_normalize_name(subname))
new_abs_offset, blank_finish = self.nested_list_parse(
block, input_offset=offset, node=substitution_node,
initial_state='SubstitutionDef', blank_finish=blank_finish)
i = 0
for node in substitution_node[:]:
if not (isinstance(node, nodes.Inline) or
isinstance(node, nodes.Text)):
self.parent += substitution_node[i]
del substitution_node[i]
else:
i += 1
for node in substitution_node.traverse(nodes.Element):
if self.disallowed_inside_substitution_definitions(node):
pformat = nodes.literal_block('', node.pformat().rstrip())
msg = self.reporter.error(
'Substitution definition contains illegal element:',
pformat, nodes.literal_block(blocktext, blocktext),
source=src, line=srcline)
return [msg], blank_finish
if len(substitution_node) == 0:
msg = self.reporter.warning(
'Substitution definition "%s" empty or invalid.' % subname,
nodes.literal_block(blocktext, blocktext),
source=src, line=srcline)
return [msg], blank_finish
self.document.note_substitution_def(
substitution_node, subname, self.parent)
return [substitution_node], blank_finish
def disallowed_inside_substitution_definitions(self, node):
if (node['ids'] or
isinstance(node, nodes.reference) and node.get('anonymous') or
isinstance(node, nodes.footnote_reference) and node.get('auto')):
return 1
else:
return 0
def directive(self, match, **option_presets):
"""Returns a 2-tuple: list of nodes, and a "blank finish" boolean."""
type_name = match.group(1)
directive_class, messages = directives.directive(
type_name, self.memo.language, self.document)
self.parent += messages
if directive_class:
return self.run_directive(
directive_class, match, type_name, option_presets)
else:
return self.unknown_directive(type_name)
def run_directive(self, directive, match, type_name, option_presets):
"""
Parse a directive then run its directive function.
Parameters:
- `directive`: The class implementing the directive. Must be
a subclass of `rst.Directive`.
- `match`: A regular expression match object which matched the first
line of the directive.
- `type_name`: The directive name, as used in the source text.
- `option_presets`: A dictionary of preset options, defaults for the
directive options. Currently, only an "alt" option is passed by
substitution definitions (value: the substitution name), which may
be used by an embedded image directive.
Returns a 2-tuple: list of nodes, and a "blank finish" boolean.
"""
if isinstance(directive, (FunctionType, MethodType)):
from docutils.parsers.rst import convert_directive_function
directive = convert_directive_function(directive)
lineno = self.state_machine.abs_line_number()
initial_line_offset = self.state_machine.line_offset
indented, indent, line_offset, blank_finish \
= self.state_machine.get_first_known_indented(match.end(),
strip_top=0)
block_text = '\n'.join(self.state_machine.input_lines[
initial_line_offset : self.state_machine.line_offset + 1])
try:
arguments, options, content, content_offset = (
self.parse_directive_block(indented, line_offset,
directive, option_presets))
except MarkupError, detail:
error = self.reporter.error(
'Error in "%s" directive:\n%s.' % (type_name,
' '.join(detail.args)),
nodes.literal_block(block_text, block_text), line=lineno)
return [error], blank_finish
directive_instance = directive(
type_name, arguments, options, content, lineno,
content_offset, block_text, self, self.state_machine)
try:
result = directive_instance.run()
except docutils.parsers.rst.DirectiveError, error:
msg_node = self.reporter.system_message(error.level, error.msg,
line=lineno)
msg_node += nodes.literal_block(block_text, block_text)
result = [msg_node]
assert isinstance(result, list), \
'Directive "%s" must return a list of nodes.' % type_name
for i in range(len(result)):
assert isinstance(result[i], nodes.Node), \
('Directive "%s" returned non-Node object (index %s): %r'
% (type_name, i, result[i]))
return (result,
blank_finish or self.state_machine.is_next_line_blank())
def parse_directive_block(self, indented, line_offset, directive,
option_presets):
option_spec = directive.option_spec
has_content = directive.has_content
if indented and not indented[0].strip():
indented.trim_start()
line_offset += 1
while indented and not indented[-1].strip():
indented.trim_end()
if indented and (directive.required_arguments
or directive.optional_arguments
or option_spec):
for i, line in enumerate(indented):
if not line.strip():
break
else:
i += 1
arg_block = indented[:i]
content = indented[i+1:]
content_offset = line_offset + i + 1
else:
content = indented
content_offset = line_offset
arg_block = []
if option_spec:
options, arg_block = self.parse_directive_options(
option_presets, option_spec, arg_block)
else:
options = {}
if arg_block and not (directive.required_arguments
or directive.optional_arguments):
content = arg_block + indented[i:]
content_offset = line_offset
arg_block = []
while content and not content[0].strip():
content.trim_start()
content_offset += 1
if directive.required_arguments or directive.optional_arguments:
arguments = self.parse_directive_arguments(
directive, arg_block)
else:
arguments = []
if content and not has_content:
raise MarkupError('no content permitted')
return (arguments, options, content, content_offset)
def parse_directive_options(self, option_presets, option_spec, arg_block):
options = option_presets.copy()
for i, line in enumerate(arg_block):
if re.match(Body.patterns['field_marker'], line):
opt_block = arg_block[i:]
arg_block = arg_block[:i]
break
else:
opt_block = []
if opt_block:
success, data = self.parse_extension_options(option_spec,
opt_block)
if success: # data is a dict of options
options.update(data)
else: # data is an error string
raise MarkupError(data)
return options, arg_block
def parse_directive_arguments(self, directive, arg_block):
required = directive.required_arguments
optional = directive.optional_arguments
arg_text = '\n'.join(arg_block)
arguments = arg_text.split()
if len(arguments) < required:
raise MarkupError('%s argument(s) required, %s supplied'
% (required, len(arguments)))
elif len(arguments) > required + optional:
if directive.final_argument_whitespace:
arguments = arg_text.split(None, required + optional - 1)
else:
raise MarkupError(
'maximum %s argument(s) allowed, %s supplied'
% (required + optional, len(arguments)))
return arguments
def parse_extension_options(self, option_spec, datalines):
"""
Parse `datalines` for a field list containing extension options
matching `option_spec`.
:Parameters:
- `option_spec`: a mapping of option name to conversion
function, which should raise an exception on bad input.
- `datalines`: a list of input strings.
:Return:
- Success value, 1 or 0.
- An option dictionary on success, an error string on failure.
"""
node = nodes.field_list()
newline_offset, blank_finish = self.nested_list_parse(
datalines, 0, node, initial_state='ExtensionOptions',
blank_finish=True)
if newline_offset != len(datalines): # incomplete parse of block
return 0, 'invalid option block'
try:
options = utils.extract_extension_options(node, option_spec)
except KeyError, detail:
return 0, ('unknown option: "%s"' % detail.args[0])
except (ValueError, TypeError), detail:
return 0, ('invalid option value: %s' % ' '.join(detail.args))
except utils.ExtensionOptionError, detail:
return 0, ('invalid option data: %s' % ' '.join(detail.args))
if blank_finish:
return 1, options
else:
return 0, 'option data incompletely parsed'
def unknown_directive(self, type_name):
lineno = self.state_machine.abs_line_number()
indented, indent, offset, blank_finish = \
self.state_machine.get_first_known_indented(0, strip_indent=False)
text = '\n'.join(indented)
error = self.reporter.error(
'Unknown directive type "%s".' % type_name,
nodes.literal_block(text, text), line=lineno)
return [error], blank_finish
def comment(self, match):
if not match.string[match.end():].strip() \
and self.state_machine.is_next_line_blank(): # an empty comment?
return [nodes.comment()], 1 # "A tiny but practical wart."
indented, indent, offset, blank_finish = \
self.state_machine.get_first_known_indented(match.end())
while indented and not indented[-1].strip():
indented.trim_end()
text = '\n'.join(indented)
return [nodes.comment(text, text)], blank_finish
explicit.constructs = [
(footnote,
re.compile(r"""
\.\.[ ]+ # explicit markup start
\[
( # footnote label:
[0-9]+ # manually numbered footnote
| # *OR*
\# # anonymous auto-numbered footnote
| # *OR*
\#%s # auto-number ed?) footnote label
| # *OR*
\* # auto-symbol footnote
)
\]
([ ]+|$) # whitespace or end of line
""" % Inliner.simplename, re.VERBOSE | re.UNICODE)),
(citation,
re.compile(r"""
\.\.[ ]+ # explicit markup start
\[(%s)\] # citation label
([ ]+|$) # whitespace or end of line
""" % Inliner.simplename, re.VERBOSE | re.UNICODE)),
(hyperlink_target,
re.compile(r"""
\.\.[ ]+ # explicit markup start
_ # target indicator
(?![ ]|$) # first char. not space or EOL
""", re.VERBOSE | re.UNICODE)),
(substitution_def,
re.compile(r"""
\.\.[ ]+ # explicit markup start
\| # substitution indicator
(?![ ]|$) # first char. not space or EOL
""", re.VERBOSE | re.UNICODE)),
(directive,
re.compile(r"""
\.\.[ ]+ # explicit markup start
(%s) # directive name
[ ]? # optional space
:: # directive delimiter
([ ]+|$) # whitespace or end of line
""" % Inliner.simplename, re.VERBOSE | re.UNICODE))]
def explicit_markup(self, match, context, next_state):
"""Footnotes, hyperlink targets, directives, comments."""
nodelist, blank_finish = self.explicit_construct(match)
self.parent += nodelist
self.explicit_list(blank_finish)
return [], next_state, []
def explicit_construct(self, match):
"""Determine which explicit construct this is, parse & return it."""
errors = []
for method, pattern in self.explicit.constructs:
expmatch = pattern.match(match.string)
if expmatch:
try:
return method(self, expmatch)
except MarkupError, error:
lineno = self.state_machine.abs_line_number()
message = ' '.join(error.args)
errors.append(self.reporter.warning(message, line=lineno))
break
nodelist, blank_finish = self.comment(match)
return nodelist + errors, blank_finish
def explicit_list(self, blank_finish):
"""
Create a nested state machine for a series of explicit markup
constructs (including anonymous hyperlink targets).
"""
offset = self.state_machine.line_offset + 1 # next line
newline_offset, blank_finish = self.nested_list_parse(
self.state_machine.input_lines[offset:],
input_offset=self.state_machine.abs_line_offset() + 1,
node=self.parent, initial_state='Explicit',
blank_finish=blank_finish,
match_titles=self.state_machine.match_titles)
self.goto_line(newline_offset)
if not blank_finish:
self.parent += self.unindent_warning('Explicit markup')
def anonymous(self, match, context, next_state):
"""Anonymous hyperlink targets."""
nodelist, blank_finish = self.anonymous_target(match)
self.parent += nodelist
self.explicit_list(blank_finish)
return [], next_state, []
def anonymous_target(self, match):
lineno = self.state_machine.abs_line_number()
block, indent, offset, blank_finish \
= self.state_machine.get_first_known_indented(match.end(),
until_blank=True)
blocktext = match.string[:match.end()] + '\n'.join(block)
block = [escape2null(line) for line in block]
target = self.make_target(block, blocktext, lineno, '')
return [target], blank_finish
def line(self, match, context, next_state):
"""Section title overline or transition marker."""
if self.state_machine.match_titles:
return [match.string], 'Line', []
elif match.string.strip() == '::':
raise statemachine.TransitionCorrection('text')
elif len(match.string.strip()) < 4:
msg = self.reporter.info(
'Unexpected possible title overline or transition.\n'
"Treating it as ordinary text because it's so short.",
line=self.state_machine.abs_line_number())
self.parent += msg
raise statemachine.TransitionCorrection('text')
else:
blocktext = self.state_machine.line
msg = self.reporter.severe(
'Unexpected section title or transition.',
nodes.literal_block(blocktext, blocktext),
line=self.state_machine.abs_line_number())
self.parent += msg
return [], next_state, []
def text(self, match, context, next_state):
"""Titles, definition lists, paragraphs."""
return [match.string], 'Text', []
class RFC2822Body(Body):
"""
RFC2822 headers are only valid as the first constructs in documents. As
soon as anything else appears, the `Body` state should take over.
"""
patterns = Body.patterns.copy() # can't modify the original
patterns['rfc2822'] = r'[!-9;-~]+:( +|$)'
initial_transitions = [(name, 'Body')
for name in Body.initial_transitions]
initial_transitions.insert(-1, ('rfc2822', 'Body')) # just before 'text'
def rfc2822(self, match, context, next_state):
"""RFC2822-style field list item."""
fieldlist = nodes.field_list(classes=['rfc2822'])
self.parent += fieldlist
field, blank_finish = self.rfc2822_field(match)
fieldlist += field
offset = self.state_machine.line_offset + 1 # next line
newline_offset, blank_finish = self.nested_list_parse(
self.state_machine.input_lines[offset:],
input_offset=self.state_machine.abs_line_offset() + 1,
node=fieldlist, initial_state='RFC2822List',
blank_finish=blank_finish)
self.goto_line(newline_offset)
if not blank_finish:
self.parent += self.unindent_warning(
'RFC2822-style field list')
return [], next_state, []
def rfc2822_field(self, match):
name = match.string[:match.string.find(':')]
indented, indent, line_offset, blank_finish = \
self.state_machine.get_first_known_indented(match.end(),
until_blank=True)
fieldnode = nodes.field()
fieldnode += nodes.field_name(name, name)
fieldbody = nodes.field_body('\n'.join(indented))
fieldnode += fieldbody
if indented:
self.nested_parse(indented, input_offset=line_offset,
node=fieldbody)
return fieldnode, blank_finish
class SpecializedBody(Body):
"""
Superclass for second and subsequent compound element members. Compound
elements are lists and list-like constructs.
All transition methods are disabled (redefined as `invalid_input`).
Override individual methods in subclasses to re-enable.
For example, once an initial bullet list item, say, is recognized, the
`BulletList` subclass takes over, with a "bullet_list" node as its
container. Upon encountering the initial bullet list item, `Body.bullet`
calls its ``self.nested_list_parse`` (`RSTState.nested_list_parse`), which
starts up a nested parsing session with `BulletList` as the initial state.
Only the ``bullet`` transition method is enabled in `BulletList`; as long
as only bullet list items are encountered, they are parsed and inserted
into the container. The first construct which is *not* a bullet list item
triggers the `invalid_input` method, which ends the nested parse and
closes the container. `BulletList` needs to recognize input that is
invalid in the context of a bullet list, which means everything *other
than* bullet list items, so it inherits the transition list created in
`Body`.
"""
def invalid_input(self, match=None, context=None, next_state=None):
"""Not a compound element member. Abort this state machine."""
self.state_machine.previous_line() # back up so parent SM can reassess
raise EOFError
indent = invalid_input
bullet = invalid_input
enumerator = invalid_input
field_marker = invalid_input
option_marker = invalid_input
doctest = invalid_input
line_block = invalid_input
grid_table_top = invalid_input
simple_table_top = invalid_input
explicit_markup = invalid_input
anonymous = invalid_input
line = invalid_input
text = invalid_input
class BulletList(SpecializedBody):
"""Second and subsequent bullet_list list_items."""
def bullet(self, match, context, next_state):
"""Bullet list item."""
if match.string[0] != self.parent['bullet']:
# different bullet: new list
self.invalid_input()
listitem, blank_finish = self.list_item(match.end())
self.parent += listitem
self.blank_finish = blank_finish
return [], next_state, []
class DefinitionList(SpecializedBody):
"""Second and subsequent definition_list_items."""
def text(self, match, context, next_state):
"""Definition lists."""
return [match.string], 'Definition', []
class EnumeratedList(SpecializedBody):
"""Second and subsequent enumerated_list list_items."""
def enumerator(self, match, context, next_state):
"""Enumerated list item."""
format, sequence, text, ordinal = self.parse_enumerator(
match, self.parent['enumtype'])
if ( format != self.format
or (sequence != '#' and (sequence != self.parent['enumtype']
or self.auto
or ordinal != (self.lastordinal + 1)))
or not self.is_enumerated_list_item(ordinal, sequence, format)):
# different enumeration: new list
self.invalid_input()
if sequence == '#':
self.auto = 1
listitem, blank_finish = self.list_item(match.end())
self.parent += listitem
self.blank_finish = blank_finish
self.lastordinal = ordinal
return [], next_state, []
class FieldList(SpecializedBody):
"""Second and subsequent field_list fields."""
def field_marker(self, match, context, next_state):
"""Field list field."""
field, blank_finish = self.field(match)
self.parent += field
self.blank_finish = blank_finish
return [], next_state, []
class OptionList(SpecializedBody):
"""Second and subsequent option_list option_list_items."""
def option_marker(self, match, context, next_state):
"""Option list item."""
try:
option_list_item, blank_finish = self.option_list_item(match)
except MarkupError:
self.invalid_input()
self.parent += option_list_item
self.blank_finish = blank_finish
return [], next_state, []
class RFC2822List(SpecializedBody, RFC2822Body):
"""Second and subsequent RFC2822-style field_list fields."""
patterns = RFC2822Body.patterns
initial_transitions = RFC2822Body.initial_transitions
def rfc2822(self, match, context, next_state):
"""RFC2822-style field list item."""
field, blank_finish = self.rfc2822_field(match)
self.parent += field
self.blank_finish = blank_finish
return [], 'RFC2822List', []
blank = SpecializedBody.invalid_input
class ExtensionOptions(FieldList):
"""
Parse field_list fields for extension options.
No nested parsing is done (including inline markup parsing).
"""
def parse_field_body(self, indented, offset, node):
"""Override `Body.parse_field_body` for simpler parsing."""
lines = []
for line in list(indented) + ['']:
if line.strip():
lines.append(line)
elif lines:
text = '\n'.join(lines)
node += nodes.paragraph(text, text)
lines = []
class LineBlock(SpecializedBody):
"""Second and subsequent lines of a line_block."""
blank = SpecializedBody.invalid_input
def line_block(self, match, context, next_state):
"""New line of line block."""
lineno = self.state_machine.abs_line_number()
line, messages, blank_finish = self.line_block_line(match, lineno)
self.parent += line
self.parent.parent += messages
self.blank_finish = blank_finish
return [], next_state, []
class Explicit(SpecializedBody):
"""Second and subsequent explicit markup construct."""
def explicit_markup(self, match, context, next_state):
"""Footnotes, hyperlink targets, directives, comments."""
nodelist, blank_finish = self.explicit_construct(match)
self.parent += nodelist
self.blank_finish = blank_finish
return [], next_state, []
def anonymous(self, match, context, next_state):
"""Anonymous hyperlink targets."""
nodelist, blank_finish = self.anonymous_target(match)
self.parent += nodelist
self.blank_finish = blank_finish
return [], next_state, []
blank = SpecializedBody.invalid_input
class SubstitutionDef(Body):
"""
Parser for the contents of a substitution_definition element.
"""
patterns = {
'embedded_directive': re.compile(r'(%s)::( +|$)'
% Inliner.simplename, re.UNICODE),
'text': r''}
initial_transitions = ['embedded_directive', 'text']
def embedded_directive(self, match, context, next_state):
nodelist, blank_finish = self.directive(match,
alt=self.parent['names'][0])
self.parent += nodelist
if not self.state_machine.at_eof():
self.blank_finish = blank_finish
raise EOFError
def text(self, match, context, next_state):
if not self.state_machine.at_eof():
self.blank_finish = self.state_machine.is_next_line_blank()
raise EOFError
class Text(RSTState):
"""
Classifier of second line of a text block.
Could be a paragraph, a definition list item, or a title.
"""
patterns = {'underline': Body.patterns['line'],
'text': r''}
initial_transitions = [('underline', 'Body'), ('text', 'Body')]
def blank(self, match, context, next_state):
"""End of paragraph."""
# NOTE: self.paragraph returns [ node, system_message(s) ], literalnext
paragraph, literalnext = self.paragraph(
context, self.state_machine.abs_line_number() - 1)
self.parent += paragraph
if literalnext:
self.parent += self.literal_block()
return [], 'Body', []
def eof(self, context):
if context:
self.blank(None, context, None)
return []
def indent(self, match, context, next_state):
"""Definition list item."""
definitionlist = nodes.definition_list()
definitionlistitem, blank_finish = self.definition_list_item(context)
definitionlist += definitionlistitem
self.parent += definitionlist
offset = self.state_machine.line_offset + 1 # next line
newline_offset, blank_finish = self.nested_list_parse(
self.state_machine.input_lines[offset:],
input_offset=self.state_machine.abs_line_offset() + 1,
node=definitionlist, initial_state='DefinitionList',
blank_finish=blank_finish, blank_finish_state='Definition')
self.goto_line(newline_offset)
if not blank_finish:
self.parent += self.unindent_warning('Definition list')
return [], 'Body', []
def underline(self, match, context, next_state):
"""Section title."""
lineno = self.state_machine.abs_line_number()
title = context[0].rstrip()
underline = match.string.rstrip()
source = title + '\n' + underline
messages = []
if column_width(title) > len(underline):
if len(underline) < 4:
if self.state_machine.match_titles:
msg = self.reporter.info(
'Possible title underline, too short for the title.\n'
"Treating it as ordinary text because it's so short.",
line=lineno)
self.parent += msg
raise statemachine.TransitionCorrection('text')
else:
blocktext = context[0] + '\n' + self.state_machine.line
msg = self.reporter.warning('Title underline too short.',
nodes.literal_block(blocktext, blocktext), line=lineno)
messages.append(msg)
if not self.state_machine.match_titles:
blocktext = context[0] + '\n' + self.state_machine.line
# We need get_source_and_line() here to report correctly
src, srcline = self.state_machine.get_source_and_line()
# TODO: why is abs_line_number() == srcline+1
# if the error is in a table (try with test_tables.py)?
# print "get_source_and_line", srcline
# print "abs_line_number", self.state_machine.abs_line_number()
msg = self.reporter.severe('Unexpected section title.',
nodes.literal_block(blocktext, blocktext),
source=src, line=srcline)
self.parent += messages
self.parent += msg
return [], next_state, []
style = underline[0]
context[:] = []
self.section(title, source, style, lineno - 1, messages)
return [], next_state, []
def text(self, match, context, next_state):
"""Paragraph."""
startline = self.state_machine.abs_line_number() - 1
msg = None
try:
block = self.state_machine.get_text_block(flush_left=True)
except statemachine.UnexpectedIndentationError, err:
block, src, srcline = err.args
msg = self.reporter.error('Unexpected indentation.',
source=src, line=srcline)
lines = context + list(block)
paragraph, literalnext = self.paragraph(lines, startline)
self.parent += paragraph
self.parent += msg
if literalnext:
try:
self.state_machine.next_line()
except EOFError:
pass
self.parent += self.literal_block()
return [], next_state, []
def literal_block(self):
"""Return a list of nodes."""
indented, indent, offset, blank_finish = \
self.state_machine.get_indented()
while indented and not indented[-1].strip():
indented.trim_end()
if not indented:
return self.quoted_literal_block()
data = '\n'.join(indented)
literal_block = nodes.literal_block(data, data)
literal_block.line = offset + 1
nodelist = [literal_block]
if not blank_finish:
nodelist.append(self.unindent_warning('Literal block'))
return nodelist
def quoted_literal_block(self):
abs_line_offset = self.state_machine.abs_line_offset()
offset = self.state_machine.line_offset
parent_node = nodes.Element()
new_abs_offset = self.nested_parse(
self.state_machine.input_lines[offset:],
input_offset=abs_line_offset, node=parent_node, match_titles=False,
state_machine_kwargs={'state_classes': (QuotedLiteralBlock,),
'initial_state': 'QuotedLiteralBlock'})
self.goto_line(new_abs_offset)
return parent_node.children
def definition_list_item(self, termline):
indented, indent, line_offset, blank_finish = \
self.state_machine.get_indented()
itemnode = nodes.definition_list_item(
'\n'.join(termline + list(indented)))
lineno = self.state_machine.abs_line_number() - 1
(itemnode.source,
itemnode.line) = self.state_machine.get_source_and_line(lineno)
termlist, messages = self.term(termline, lineno)
itemnode += termlist
definition = nodes.definition('', *messages)
itemnode += definition
if termline[0][-2:] == '::':
definition += self.reporter.info(
'Blank line missing before literal block (after the "::")? '
'Interpreted as a definition list item.',
line=lineno+1)
self.nested_parse(indented, input_offset=line_offset, node=definition)
return itemnode, blank_finish
classifier_delimiter = re.compile(' +: +')
def term(self, lines, lineno):
"""Return a definition_list's term and optional classifiers."""
assert len(lines) == 1
text_nodes, messages = self.inline_text(lines[0], lineno)
term_node = nodes.term()
node_list = [term_node]
for i in range(len(text_nodes)):
node = text_nodes[i]
if isinstance(node, nodes.Text):
parts = self.classifier_delimiter.split(node.rawsource)
if len(parts) == 1:
node_list[-1] += node
else:
node_list[-1] += nodes.Text(parts[0].rstrip())
for part in parts[1:]:
classifier_node = nodes.classifier('', part)
node_list.append(classifier_node)
else:
node_list[-1] += node
return node_list, messages
class SpecializedText(Text):
"""
Superclass for second and subsequent lines of Text-variants.
All transition methods are disabled. Override individual methods in
subclasses to re-enable.
"""
def eof(self, context):
"""Incomplete construct."""
return []
def invalid_input(self, match=None, context=None, next_state=None):
"""Not a compound element member. Abort this state machine."""
raise EOFError
blank = invalid_input
indent = invalid_input
underline = invalid_input
text = invalid_input
class Definition(SpecializedText):
"""Second line of potential definition_list_item."""
def eof(self, context):
"""Not a definition."""
self.state_machine.previous_line(2) # so parent SM can reassess
return []
def indent(self, match, context, next_state):
"""Definition list item."""
itemnode, blank_finish = self.definition_list_item(context)
self.parent += itemnode
self.blank_finish = blank_finish
return [], 'DefinitionList', []
class Line(SpecializedText):
"""
Second line of over- & underlined section title or transition marker.
"""
eofcheck = 1 # @@@ ???
"""Set to 0 while parsing sections, so that we don't catch the EOF."""
def eof(self, context):
"""Transition marker at end of section or document."""
marker = context[0].strip()
if self.memo.section_bubble_up_kludge:
self.memo.section_bubble_up_kludge = False
elif len(marker) < 4:
self.state_correction(context)
if self.eofcheck: # ignore EOFError with sections
lineno = self.state_machine.abs_line_number() - 1
transition = nodes.transition(rawsource=context[0])
transition.line = lineno
self.parent += transition
self.eofcheck = 1
return []
def blank(self, match, context, next_state):
"""Transition marker."""
src, srcline = self.state_machine.get_source_and_line()
marker = context[0].strip()
if len(marker) < 4:
self.state_correction(context)
transition = nodes.transition(rawsource=marker)
transition.source = src
transition.line = srcline - 1
self.parent += transition
return [], 'Body', []
def text(self, match, context, next_state):
"""Potential over- & underlined title."""
lineno = self.state_machine.abs_line_number() - 1
overline = context[0]
title = match.string
underline = ''
try:
underline = self.state_machine.next_line()
except EOFError:
blocktext = overline + '\n' + title
if len(overline.rstrip()) < 4:
self.short_overline(context, blocktext, lineno, 2)
else:
msg = self.reporter.severe(
'Incomplete section title.',
nodes.literal_block(blocktext, blocktext),
line=lineno)
self.parent += msg
return [], 'Body', []
source = '%s\n%s\n%s' % (overline, title, underline)
overline = overline.rstrip()
underline = underline.rstrip()
if not self.transitions['underline'][0].match(underline):
blocktext = overline + '\n' + title + '\n' + underline
if len(overline.rstrip()) < 4:
self.short_overline(context, blocktext, lineno, 2)
else:
msg = self.reporter.severe(
'Missing matching underline for section title overline.',
nodes.literal_block(source, source),
line=lineno)
self.parent += msg
return [], 'Body', []
elif overline != underline:
blocktext = overline + '\n' + title + '\n' + underline
if len(overline.rstrip()) < 4:
self.short_overline(context, blocktext, lineno, 2)
else:
msg = self.reporter.severe(
'Title overline & underline mismatch.',
nodes.literal_block(source, source),
line=lineno)
self.parent += msg
return [], 'Body', []
title = title.rstrip()
messages = []
if column_width(title) > len(overline):
blocktext = overline + '\n' + title + '\n' + underline
if len(overline.rstrip()) < 4:
self.short_overline(context, blocktext, lineno, 2)
else:
msg = self.reporter.warning(
'Title overline too short.',
nodes.literal_block(source, source),
line=lineno)
messages.append(msg)
style = (overline[0], underline[0])
self.eofcheck = 0 # @@@ not sure this is correct
self.section(title.lstrip(), source, style, lineno + 1, messages)
self.eofcheck = 1
return [], 'Body', []
indent = text # indented title
def underline(self, match, context, next_state):
overline = context[0]
blocktext = overline + '\n' + self.state_machine.line
lineno = self.state_machine.abs_line_number() - 1
if len(overline.rstrip()) < 4:
self.short_overline(context, blocktext, lineno, 1)
msg = self.reporter.error(
'Invalid section title or transition marker.',
nodes.literal_block(blocktext, blocktext),
line=lineno)
self.parent += msg
return [], 'Body', []
def short_overline(self, context, blocktext, lineno, lines=1):
msg = self.reporter.info(
'Possible incomplete section title.\nTreating the overline as '
"ordinary text because it's so short.",
line=lineno)
self.parent += msg
self.state_correction(context, lines)
def state_correction(self, context, lines=1):
self.state_machine.previous_line(lines)
context[:] = []
raise statemachine.StateCorrection('Body', 'text')
class QuotedLiteralBlock(RSTState):
"""
Nested parse handler for quoted (unindented) literal blocks.
Special-purpose. Not for inclusion in `state_classes`.
"""
patterns = {'initial_quoted': r'(%(nonalphanum7bit)s)' % Body.pats,
'text': r''}
initial_transitions = ('initial_quoted', 'text')
def __init__(self, state_machine, debug=False):
RSTState.__init__(self, state_machine, debug)
self.messages = []
self.initial_lineno = None
def blank(self, match, context, next_state):
if context:
raise EOFError
else:
return context, next_state, []
def eof(self, context):
if context:
src, srcline = self.state_machine.get_source_and_line(
self.initial_lineno)
text = '\n'.join(context)
literal_block = nodes.literal_block(text, text)
literal_block.source = src
literal_block.line = srcline
self.parent += literal_block
else:
self.parent += self.reporter.warning(
'Literal block expected; none found.',
line=self.state_machine.abs_line_number())
# src not available, because statemachine.input_lines is empty
self.state_machine.previous_line()
self.parent += self.messages
return []
def indent(self, match, context, next_state):
assert context, ('QuotedLiteralBlock.indent: context should not '
'be empty!')
self.messages.append(
self.reporter.error('Unexpected indentation.',
line=self.state_machine.abs_line_number()))
self.state_machine.previous_line()
raise EOFError
def initial_quoted(self, match, context, next_state):
"""Match arbitrary quote character on the first line only."""
self.remove_transition('initial_quoted')
quote = match.string[0]
pattern = re.compile(re.escape(quote), re.UNICODE)
# New transition matches consistent quotes only:
self.add_transition('quoted',
(pattern, self.quoted, self.__class__.__name__))
self.initial_lineno = self.state_machine.abs_line_number()
return [match.string], next_state, []
def quoted(self, match, context, next_state):
"""Match consistent quotes on subsequent lines."""
context.append(match.string)
return context, next_state, []
def text(self, match, context, next_state):
if context:
self.messages.append(
self.reporter.error('Inconsistent literal block quoting.',
line=self.state_machine.abs_line_number()))
self.state_machine.previous_line()
raise EOFError
state_classes = (Body, BulletList, DefinitionList, EnumeratedList, FieldList,
OptionList, LineBlock, ExtensionOptions, Explicit, Text,
Definition, Line, SubstitutionDef, RFC2822Body, RFC2822List)
"""Standard set of State classes used to start `RSTStateMachine`."""
| {
"repo_name": "neumerance/deploy",
"path": ".venv/lib/python2.7/site-packages/docutils/parsers/rst/states.py",
"copies": "4",
"size": "128822",
"license": "apache-2.0",
"hash": -4708381589462039000,
"line_mean": 41.2505739587,
"line_max": 93,
"alpha_frac": 0.5474297868,
"autogenerated": false,
"ratio": 4.396955423578401,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00037527489010945493,
"num_lines": 3049
} |
"""A widget class, which can display information of any kind using a
stack technique.
"""
from datetime import datetime
from Container import Container
from Constants import *
from StyleInformation import StyleInformation
import base
_TIMER = 50
class StatusBar (Container):
"""StatusBar () -> StatusBar
A widget class, which can display information using a stack.
The StatusBar widget class can display the current date and info
messages using a message stack.
Info messages can be enabled or disabled using the 'tips' attribute
or show_tips() method. If the tips are enabled, only the topmost
(at last added) tip will be displayed.
statusbar.tips = True
statusbar.show_tips (False)
The push_tip() method will put the message on the top of the stack,
while the pop_tip() method will remove the topmost message. If there
are several messages on the stack, pop_tip() will reveal previously
added messages, which will be displayed then.
statusbar.push_tip ('This is a message') # 'This is a message' is shown
statusbar.push_tip ('Another message') # 'Another message' is shown
statusbar.pop_tip () # 'This is a message' is shown
To get the currently displayed message, the 'current_tip' attribute
or get_current_tip() method can be used.
current = statusbar.current_tip
current = statusbar.get_current_tip ()
The width of the area, that displays the tips can be adjusted using
the 'tip_width' attribute or set_tip_width () method.
If the rendered text exceeds the width of the area, it will be cut down
to its width.
statusbar.tip_width = 50
statusbar.set_tip_width (50)
Displaying the actual date can be enabled or disabled with the
'date' attribute or show_date() method.
statusbar.date = True
statusbar.show_date (False)
The display format of it can be adjusted through the 'date_format'
attribute or set_date_format() method. The format has to be a string
in a format, which can be understood by the strftime() method.
statusbar.date_format = '%x'
statusbar.set_date_format ('%H: %M')
The current date (which can differ from the displayed one) can be
fetched through the 'current_date' attribute. Usually the update resolution
for the date is set to one second. Due to the internal drawing and update
code it can happen that the the displayed and current date differ by one
second.
The width of the area, that displays the date can be adjusted using
the 'date_width' attribute or set_date_width () method.
If the rendered date exceeds the width of the area, it will be cut
down to its width.
statusbar.date_width = 50
statusbar.set_date_width (50)
Additionally you are able to place widgets on the StatusBar through
the usual Container methods. The widgets will be placed between the
info message text and the date.
Note:
The StatusBar updates itself every 500 ms. Thus you will not have a
real time clock in the date display, which updates itself correctly
every second.
Default action (invoked by activate()):
None
Mnemonic action (invoked by activate_mnemonic()):
None
Attributes:
tips - Indicates, whether the info messages should be shown.
current_tip - The current tooltip to display.
tip_width - The width of the tip area. Default is 80.
date - Indicates, whether the date should be shown.
current_date - The current date to display.
date_format - The display format of the date. Default is '%x %H:%M'
date_width - The width of the date area. Default is 90.
"""
def __init__ (self):
Container.__init__ (self)
# Info values.
self._showtips = True
self._tips = []
self._tipwidth = 80
self._timer = _TIMER
self._xoffset = 0
# Date.
self._showdate = True
self._dateformat = "%x %H:%M:%S"
self._datewidth = 90
self._signals[SIG_TICK] = None
# Default size.
self.minsize = 204, 24
def set_focus (self, focus=True):
"""S.set_focus (focus=True) -> None
Overrides the set_focus() behaviour for the StatusBar.
The StatusBar class is not focusable by default.
"""
return False
def set_tip_width (self, width):
"""S.set_tip_width (...) -> None
Sets the width of the tip area.
Raises a TypeError, if the passed argument is not a positive
integer.
"""
if (type (width) != int) or (width < 0):
raise TypeError ("width must be a positive integer")
self._tipwidth = width
self.dirty = True
def set_date_width (self, width):
"""S.set_date_width (...) -> None
Sets the width of the date area.
Raises a TypeError, if the passed argument is not a positive
integer.
"""
if (type (width) != int) or (width < 0):
raise TypeError ("width must be a positive integer")
self._datewidth = width
self.dirty = True
def show_tips (self, show):
"""S.show_tips (...) -> None
Shows or hides the info message display of the StatusBar.
"""
if show != self._showtips:
self._showtips = show
self.dirty = True
def show_date (self, show):
"""S.show_date (...) -> None
Shows or hides the date display of the StatusBar.
"""
if show != self._showdate:
self._showdate = show
self.dirty = True
def set_date_format (self, format):
"""S.set_date_format (...) -> None
Sets the display format for the date.
The passed format string has to be in a format, which can be
understood by the strftime() method.
"""
self._dateformat = format
self.dirty = True
def get_current_tip (self):
"""S.get_current_tip () -> string or unicode
Gets the current info message to display.
"""
length = len (self._tips)
if length > 0:
return self._tips[length - 1]
return ""
def push_tip (self, tip):
"""S.push_tip (...) -> None
Puts a info message on the queue of the StatusBar.
Raises a TypeError, if the passed argument is not a string or
unicode.
"""
if type (tip) not in (str, unicode):
raise TypeError ("tip must be a string or unicode")
self._tips.append (tip)
self.dirty = True
def pop_tip (self):
"""S.pop_tip () -> None
Removes the last info message from the StatusBar queue.
"""
if len (self._tips) > 0:
self._tips.pop ()
self.dirty = True
def calculate_size (self):
"""S.calculate_size () -> int, int.
Calculates the size needed by the children.
Calculates the size needed by the children and returns the
resulting width and height.
"""
border = base.GlobalStyle.get_border_size \
(self.__class__, self.style,
StyleInformation.get ("STATUSBAR_BORDER"))
width = 2 * (border + self.padding)
height = 0
spacing = self.spacing
for widget in self.children:
width += widget.width + spacing
if widget.height > height:
height = widget.height
height += 2 * (border + self.padding)
# Add width for the tips and date.
if self.tips:
width += self.tip_width
if self.date:
width += self.spacing
if self.date:
width += self.date_width
if not self.tips and not self.date:
width -= self.spacing
return width, height
def dispose_widgets (self):
"""S.dispose_widgets (...) -> None
Moves the children of the StatusBar to their correct positions.
"""
border = base.GlobalStyle.get_border_size \
(self.__class__, self.style,
StyleInformation.get ("STATUSBAR_BORDER"))
padding = self.padding
spacing = self.spacing
diff = (self.height - 2 * (border + padding)) / 2
x = self._xoffset
addy = border + padding + diff
for widget in self.children:
y = border + padding + diff - widget.height / 2
y = addy - widget.height / 2
widget.topleft = (x, y)
x += widget.width + spacing
def notify (self, event):
"""S.notify (...) -> None
Notifies the StatusBar about an event.
"""
if event.signal == SIG_TICK:
if self._timer == 0:
self._timer = _TIMER
if self.date:
self.dirty = True
self._timer -= 1
def draw_bg (self):
"""S.draw_bg () -> Surface
Draws the background surface of the StatusBar and returns it.
Creates the visible surface of the StatusBar and returns it to
the caller.
"""
return base.GlobalStyle.engine.draw_statusbar (self)
def draw (self):
"""S.draw () -> None
Draws the StatusBar surface and places its children on it.
"""
Container.draw (self)
cls = self.__class__
style = base.GlobalStyle
border = style.get_border_size \
(cls, self.style, StyleInformation.get ("STATUSBAR_BORDER"))
blit = self.image.blit
rect = self.image.get_rect()
self._xoffset = self.padding + border
# Draw the current tip.
if self.tips:
surface_tip = style.engine.draw_string (self.current_tip,
self.state, cls,
self.style)
r = surface_tip.get_rect ()
r.x = self._xoffset
r.centery = rect.centery
blit (surface_tip, r, (0, 0, self.tip_width, r.height))
self._xoffset += self.tip_width + self.spacing
# Place the children of the StatusBar and blit them.
self.dispose_widgets ()
for widget in self.children:
blit (widget.image, widget.rect)
# Draw the date.
if self.date:
surface_date = style.engine.draw_string (self.current_date,
self.state, cls,
self.style)
r = surface_date.get_rect ()
r.right = rect.right - border - self.padding
r.centery = rect.centery
blit (surface_date, r, (0, 0, self._datewidth, r.height))
tips = property (lambda self: self._showtips,
lambda self, var: self.show_tips (var),
doc = "Indicates, whether the tooltips should be shown.")
tip_width = property (lambda self: self._tipwidth,
lambda self, var: self.set_tip_width (var),
doc = "The width of the tip area.")
date = property (lambda self: self._showdate,
lambda self, var: self.show_date (var),
doc = "Indicates, whether the date should be shown.")
date_width = property (lambda self: self._datewidth,
lambda self, var: self.set_date_width (var),
doc = "The width of the date area.")
current_tip = property (lambda self: self.get_current_tip (),
doc = "The current tooltip to display.")
current_date = property (lambda self: datetime.now ().strftime \
(self._dateformat),
doc = "The current date to display.")
date_format = property (lambda self: self._dateformat,
lambda self, var: self.set_date_format (var),
doc = "Sets the display format of the date.")
| {
"repo_name": "prim/ocempgui",
"path": "ocempgui/widgets/StatusBar.py",
"copies": "1",
"size": "13618",
"license": "bsd-2-clause",
"hash": 4715660061696632000,
"line_mean": 34.7427821522,
"line_max": 79,
"alpha_frac": 0.5925245998,
"autogenerated": false,
"ratio": 4.356365962891875,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5448890562691875,
"avg_score": null,
"num_lines": null
} |
# $Id: stp.py 23 2006-11-08 15:45:33Z dugsong $
# -*- coding: utf-8 -*-
"""Spanning Tree Protocol."""
from __future__ import print_function
from __future__ import absolute_import
from . import dpkt
class STP(dpkt.Packet):
"""Spanning Tree Protocol.
TODO: Longer class information....
Attributes:
__hdr__: Header fields of STP.
TODO.
"""
__hdr__ = (
('proto_id', 'H', 0),
('v', 'B', 0),
('type', 'B', 0),
('flags', 'B', 0),
('root_id', '8s', ''),
('root_path', 'I', 0),
('bridge_id', '8s', ''),
('port_id', 'H', 0),
('_age', 'H', 0),
('_max_age', 'H', 0),
('_hello', 'H', 0),
('_fd', 'H', 0)
)
@property
def age(self):
return self._age >> 8
@age.setter
def age(self, age):
self._age = age << 8
@property
def max_age(self):
return self._max_age >> 8
@max_age.setter
def max_age(self, max_age):
self._max_age = max_age << 8
@property
def hello(self):
return self._hello >> 8
@hello.setter
def hello(self, hello):
self._hello = hello << 8
@property
def fd(self):
return self._fd >> 8
@fd.setter
def fd(self, fd):
self._fd = fd << 8
def test_stp():
buf = b'\x00\x00\x02\x02\x3e\x80\x00\x08\x00\x27\xad\xa3\x41\x00\x00\x00\x00\x80\x00\x08\x00\x27\xad\xa3\x41\x80\x01\x00\x00\x14\x00\x02\x00\x0f\x00\x00\x00\x00\x00\x02\x00\x14\x00'
stp = STP(buf)
assert stp.proto_id == 0
assert stp.port_id == 0x8001
assert stp.age == 0
assert stp.max_age == 20
assert stp.hello == 2
assert stp.fd == 15
assert bytes(stp) == buf
stp.fd = 100
assert stp.pack_hdr()[-2:] == b'\x64\x00' # 100 << 8
if __name__ == '__main__':
# Runs all the test associated with this class/file
test_stp()
print('Tests Successful...')
| {
"repo_name": "smutt/dpkt",
"path": "dpkt/stp.py",
"copies": "3",
"size": "1938",
"license": "bsd-3-clause",
"hash": 2394489361608326700,
"line_mean": 21.0227272727,
"line_max": 185,
"alpha_frac": 0.5092879257,
"autogenerated": false,
"ratio": 2.841642228739003,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4850930154439003,
"avg_score": null,
"num_lines": null
} |
# $Id: stp.py 23 2006-11-08 15:45:33Z dugsong $
# -*- coding: utf-8 -*-
"""Spanning Tree Protocol."""
import dpkt
class STP(dpkt.Packet):
__hdr__ = (
('proto_id', 'H', 0),
('v', 'B', 0),
('type', 'B', 0),
('flags', 'B', 0),
('root_id', '8s', ''),
('root_path', 'I', 0),
('bridge_id', '8s', ''),
('port_id', 'H', 0),
('_age', 'H', 0),
('_max_age', 'H', 0),
('_hello', 'H', 0),
('_fd', 'H', 0)
)
@property
def age(self):
return self._age >> 8
@age.setter
def age(self, age):
self._age = age << 8
@property
def max_age(self):
return self._max_age >> 8
@max_age.setter
def max_age(self, max_age):
self._max_age = max_age << 8
@property
def hello(self):
return self._hello >> 8
@hello.setter
def hello(self, hello):
self._hello = hello << 8
@property
def fd(self):
return self._fd >> 8
@fd.setter
def fd(self, fd):
self._fd = fd << 8
def test_stp():
buf = '\x00\x00\x02\x02\x3e\x80\x00\x08\x00\x27\xad\xa3\x41\x00\x00\x00\x00\x80\x00\x08\x00\x27\xad\xa3\x41\x80\x01\x00\x00\x14\x00\x02\x00\x0f\x00\x00\x00\x00\x00\x02\x00\x14\x00'
stp = STP(buf)
assert stp.proto_id == 0
assert stp.port_id == 0x8001
assert stp.age == 0
assert stp.max_age == 20
assert stp.hello == 2
assert stp.fd == 15
assert str(stp) == buf
stp.fd = 100
assert stp.pack_hdr()[-2:] == '\x64\x00' # 100 << 8
if __name__ == '__main__':
# Runs all the test associated with this class/file
test_stp()
print 'Tests Successful...'
| {
"repo_name": "lkash/test",
"path": "dpkt/stp.py",
"copies": "6",
"size": "1695",
"license": "bsd-3-clause",
"hash": -9215599178636895000,
"line_mean": 21.012987013,
"line_max": 184,
"alpha_frac": 0.4955752212,
"autogenerated": false,
"ratio": 2.707667731629393,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6203242952829393,
"avg_score": null,
"num_lines": null
} |
# TODO: make font caching much more elegant
"""String drawing functions with font caching capabilities."""
from pygame import font as PygameFont
from Constants import *
__font_cache = {}
def create_file_font (fontfile, size, style=FONT_STYLE_NORMAL):
"""create_file_font (...) -> Font
Creates a new font from a given font file.
The 'fontfile' is the path to a font file or a python file-like
object. The resulting pygame.Font will have the specified height
'size' in pixels. Additional styles can be passed to give the font a
bold, italic, or underlined style. The 'styles' attribute must be a
valid combination of the FONT_STYLE_TYPES.
The font will be cached internally, so it can be reused without
creating it again and again. This also means, that any operation,
which will modify the pygame.Font() object, will also be applied to
the cached one. It is possible to avoid this by copying the font
object using font.copy().
The following example will try to load a font with a height of 14
pixels from a subdirectory:
create_file_font ('fonts/MyFont.ttf', 14)
Raises a TypeError, if the 'style' argument is not a valid FONT_STYLE_TYPES
value.
"""
global __font_cache
retval = None
if (style != FONT_STYLE_NORMAL) and not constants_is_font_style (style):
raise TypeError ("style must be a value from FONT_STYLE_TYPES")
# Try to clean up after 30 fonts, so we get rid of the old ones.
if len (__font_cache.items ()) > 30:
__font_cache.clear ()
if (fontfile, size, style) not in __font_cache:
retval = PygameFont.Font (fontfile, size)
__font_cache[(fontfile, size, style)] = retval
apply_font_style (retval, style)
return retval
else:
return __font_cache[(fontfile, size, style)]
def create_system_font (fontname, size, style=FONT_STYLE_NORMAL):
"""create_system_font (...) -> Font
Creates a new font from a given font name of the system fonts.
The 'fontname' is a valid system font name of an installed font.
The resulting pygame.Font will have the specified height 'size' in
pixels. Additional styles can be passed to give the font a
bold, italic, or underlined style. The 'styles' attribute must be a
valid combination of the FONT_STYLE_TYPES.
The font will be cached internally, so it can be reused without
creating it again and again. This also means, that any operation,
which will modify the pygame.Font() object, will also be applied to
the cached one. It is possible to avoid this by copying the font
object using font.copy().
This will always return a valid Font object, and will fallback on
the builtin pygame font if the given font is not found.
The following example will try to load a font with a height of 14:
create_system_font ('Helvetica', 14)
Raises a TypeError, if the 'style' argument is not a valid FONT_STYLE_TYPES
value.
"""
global __font_cache
retval = None
if (style != FONT_STYLE_NORMAL) and not constants_is_font_style (style):
raise TypeError ("style must be a value from FONT_STYLE_TYPES")
bold = style & FONT_STYLE_BOLD == FONT_STYLE_BOLD
italic = style & FONT_STYLE_ITALIC == FONT_STYLE_ITALIC
underline = style & FONT_STYLE_UNDERLINE == FONT_STYLE_UNDERLINE
# Try to clean up after 30 fonts, so we get rid of the old ones.
if len (__font_cache.items ()) > 30:
__font_cache.clear ()
if (fontname, size, style) not in __font_cache:
retval = PygameFont.SysFont (fontname, size, bold, italic)
__font_cache[(fontname, size, style)] = retval
retval.set_underline (underline)
return retval
else:
return __font_cache[(fontname, size, style)]
def create_font (font, size, style=FONT_STYLE_NORMAL):
"""create_font (...) -> Font
Wraps the create_file_font() and create_system_font() methods.
This function tries to create a font using create_file_font() and
calls create_system_font() upon failure.
"""
fnt = None
try:
fnt = create_file_font (font, size, style)
except IOError: # Could not find font file.
fnt = create_system_font (font, size, style)
return fnt
def draw_string (text, font, size, antialias, color, style=FONT_STYLE_NORMAL):
"""draw_string (...) -> Surface
Creates a surface displaying a string.
'text' is the text, which should be renderered to a surface. 'font'
can be any valid font name of the system fonts or a font file (see
the note). The 'size' is the height of the font in pixels, 'alias'
is an integer value, which enables or disables antialiasing of the
font, and 'color' is a pygame color style argument for the
text. Additional styles can be passed to give the font a bold,
italic, or underlined style. The 'styles' attribute must be a valid
combination of the FONT_STYLE_TYPES.
The following example will create an antialiased string surface
with a black font color and tries to use an installed 'Helvetica'
font:
draw_string ('Test', 'Helvetica', 14, 1, (0, 0, 0))
Note: The function first tries to resolve the font as font file.
If that fails, it looks for a system font name, which matches the
font argument and returns a Font object based on those information
(or the fallback font of pygame, see pygame.font.SysFont() for
more information).
Besides this the function simply calls the pygame Font.render()
function, thus all documentation about it can applied to this
function, too.
"""
fnt = create_font (font, size, style)
if not text:
text = ""
return fnt.render (text, antialias, color)
def draw_string_with_bg (text, font, size, antialias, color, bgcolor,
style=FONT_STYLE_NORMAL):
"""draw_string_with_bg (...) -> Surface
Creates a surface displaying a atring.
'text' is the text, which should be renderered to a surface. 'font'
can be any valid font name of the system fonts or a font file (see
the note). The 'size' is the height of the font in pixels, 'alias'
is an integer value, which enables or disables antialiasing of the
font, and 'color' is a pygame color style argument for the text.
The Surface will be filled with the passed background color
'bgcolor' Additional styles can be passed to give the font a bold,
italic, or underlined style. The 'styles' attribute must be a valid
combination of the FONT_STYLE_TYPES.
The following example will create an antialiased string surface with
a black font color and white background color and tries to use an
installed 'Helvetica' font:
draw_string ('Test', 'Helvetica', 14, 1, (0, 0, 0), (255, 255, 255))
Note: The function first tries to resolve the font as font file.
If that fails, it looks for a system font name, which matches the
font argument and returns a Font object based on those information
(or the fallback font of pygame, see pygame.font.SysFont() for
more information).
Besides this the function simply calls the pygame Font.render()
function, thus all documentation about it can applied to this
function, too.
"""
fnt = create_font (font, size, style)
if not text:
text = ""
return fnt.render (text, antialias, color, bgcolor)
def apply_font_style (font, style):
"""apply_font_style (font, style) -> None
Applies font rendering styles to a Font.
Raises a TypeError, if the 'style' argument is not a valid FONT_STYLE_TYPES
value.
"""
if style == FONT_STYLE_NORMAL:
return
if not constants_is_font_style:
raise TypeError ("style must be a value of FONT_STYLE_TYPES")
bold = style & FONT_STYLE_BOLD == FONT_STYLE_BOLD
italic = style & FONT_STYLE_ITALIC == FONT_STYLE_ITALIC
underline = style & FONT_STYLE_UNDERLINE == FONT_STYLE_UNDERLINE
font.set_bold (bold)
font.set_italic (italic)
font.set_underline (underline)
| {
"repo_name": "prim/ocempgui",
"path": "ocempgui/draw/String.py",
"copies": "1",
"size": "9513",
"license": "bsd-2-clause",
"hash": -5981090804328193000,
"line_mean": 40.7236842105,
"line_max": 79,
"alpha_frac": 0.6959949543,
"autogenerated": false,
"ratio": 3.9687108886107634,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5164705842910764,
"avg_score": null,
"num_lines": null
} |
""" Useful structures and classes to replace or supplement
core Python structures.
"""
import types
from optparse import OptionParser
class ReadOnlyDict(dict):
""" A dictionary that has entries that may not be
overwritten once assigned UNLESS the key has been explicitly
defined as re-writeable by adding it to the rewritable list
with setRewriteable. """
def __init__(self, *args, **kargs):
dict.__init__(self, *args, **kargs)
self.__REWRITEABLE = []
def setRewriteable(self, keys=[]):
""" Make names in the 'keys' list re-bindable in this
dictionary."""
if type(keys) == types.StringType:
self.__REWRITEABLE.append(keys)
elif type(keys) == types.ListType:
self.__REWRITEABLE.extend(keys)
def __setitem__(self, key, value):
if dict.has_key(self, key) and key not in self.__REWRITEABLE:
raise KeyError("This key is already assigned and may not be modified")
dict.__setitem__(self, key, value)
class FilteredOptions(object):
""" Takes options from OptionParser and provides a similar access
interface but one which performs on-the-fly translation of values.
Values to substitute are written as $NAME where NAME is one of:
- the uppercase version of a key in the options dictionary
- the uppercase version of a key in the optional list of values
supplied to the constructor as the 'data' argument
Substitutions supplied in the data argument override those in the command
line so this is not a means to add defaults - they should be set with the
add_option method.
As with options returned by OptionParser, you reference an option as::
options.<key>
"""
def __init__(self, options, data={}):
self.substitutions = {}
self.options = options
self.data = data
for k,v in self.options.__dict__.items():
if type(v) == types.StringType:
self.substitutions[k.upper()] = v
for k,v in self.data.items():
if type(v) == types.StringType:
self.substitutions[k.upper()] = v
else:
raise TypeError("Can only provide strings as substitutions.")
self.substKeys = self.substitutions.keys()
def __getattr__(self, attr):
""" First check to see if the attribute is in the override
list so that overides always get returned. If not, get from the underlying
options then apply substitutions """
if self.data.has_key(attr):
return self.__substitute__(self.data[attr])
v = getattr(self.options, attr)
return self.__substitute__(v)
def __substitute__(self, v):
""" Substitute all $<KEY> values in v for which we have a KEY"""
if type(v) == types.StringType:
for k in self.substKeys:
v = v.replace('$%s'%k, self.substitutions[k])
return v
def __str__(self):
return "Options: %s\nOveride: %s " % (str(self.options), str(self.data) )
def filterList(self, l):
""" Helper utility, this method will take a list of
values and apply the substitution to all values in it. """
return [self.__substitute__(v) for v in l]
class FilteredOptionParser(OptionParser):
def __init__(self, *args, **kargs):
OptionParser.__init__(self, *args, **kargs)
self.substitutions = {}
def setSubstitutions(self, **kargs):
self.substitutions = kargs
def parse_args(self, *args, **kargs):
opts, argList = OptionParser.parse_args(self, *args, **kargs)
fo = FilteredOptions(opts, self.substitutions)
fa = fo.filterList(argList)
return (fo,fa)
class RoundRobinList(list):
""" A list that has handy utility methods for iterating over in a round
robin manner. This handles changing list length and constantly provides
values, looping to the start once it hits the end. """
def rrnext(self):
if not self.__dict__.has_key('__ix'):
self.__dict__['__ix']=0
__ix = self.__dict__['__ix']
_len = self.__len__()
# check things in advance: changes may have occured since
# last call
if _len == 0:
return None
if _len <= __ix:
__ix=0
self.__dict__['__ix']=0
v = self[__ix]
self.__dict__['__ix'] = (__ix+1) % _len
return v
def __getslice__(self, i, j):
""" Ensure a RoundRobinList is returned from a slice.
Returned object has index re-set to zero."""
slc = list.__getslice__(self, i, j)
return RoundRobinList(slc) | {
"repo_name": "aquamatt/Peloton",
"path": "src/peloton/utils/structs.py",
"copies": "1",
"size": "4879",
"license": "bsd-3-clause",
"hash": 3196404754636769300,
"line_mean": 35.1481481481,
"line_max": 82,
"alpha_frac": 0.6081164173,
"autogenerated": false,
"ratio": 4.096557514693535,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.020235630502009112,
"num_lines": 135
} |
# $Id: stun.py 47 2008-05-27 02:10:00Z jon.oberheide $
# -*- coding: utf-8 -*-
"""Simple Traversal of UDP through NAT."""
from __future__ import print_function
from __future__ import absolute_import
import struct
from . import dpkt
# STUN - RFC 3489
# http://tools.ietf.org/html/rfc3489
# Each packet has a 20 byte header followed by 0 or more attribute TLVs.
# Message Types
BINDING_REQUEST = 0x0001
BINDING_RESPONSE = 0x0101
BINDING_ERROR_RESPONSE = 0x0111
SHARED_SECRET_REQUEST = 0x0002
SHARED_SECRET_RESPONSE = 0x0102
SHARED_SECRET_ERROR_RESPONSE = 0x0112
# Message Attributes
MAPPED_ADDRESS = 0x0001
RESPONSE_ADDRESS = 0x0002
CHANGE_REQUEST = 0x0003
SOURCE_ADDRESS = 0x0004
CHANGED_ADDRESS = 0x0005
USERNAME = 0x0006
PASSWORD = 0x0007
MESSAGE_INTEGRITY = 0x0008
ERROR_CODE = 0x0009
UNKNOWN_ATTRIBUTES = 0x000a
REFLECTED_FROM = 0x000b
class STUN(dpkt.Packet):
"""Simple Traversal of UDP through NAT.
STUN - RFC 3489
http://tools.ietf.org/html/rfc3489
Each packet has a 20 byte header followed by 0 or more attribute TLVs.
Attributes:
__hdr__: Header fields of STUN.
TODO.
"""
__hdr__ = (
('type', 'H', 0),
('len', 'H', 0),
('xid', '16s', 0)
)
def tlv(buf):
n = 4
t, l = struct.unpack('>HH', buf[:n])
v = buf[n:n + l]
pad = (n - l % n) % n
buf = buf[n + l + pad:]
return t, l, v, buf
def parse_attrs(buf):
"""Parse STUN.data buffer into a list of (attribute, data) tuples."""
attrs = []
while buf:
t, _, v, buf = tlv(buf)
attrs.append((t, v))
return attrs
def test_stun_response():
s = b'\x01\x01\x00\x0c\x21\x12\xa4\x42\x53\x4f\x70\x43\x69\x69\x35\x4a\x66\x63\x31\x7a\x00\x01\x00\x08\x00\x01\x11\x22\x33\x44\x55\x66'
m = STUN(s)
assert m.type == BINDING_RESPONSE
assert m.len == 12
attrs = parse_attrs(m.data)
assert attrs == [(MAPPED_ADDRESS, b'\x00\x01\x11\x22\x33\x44\x55\x66'), ]
def test_stun_padded():
s = (b'\x00\x01\x00\x54\x21\x12\xa4\x42\x35\x59\x53\x6e\x42\x71\x70\x56\x77\x61\x39\x4f\x00\x06'
b'\x00\x17\x70\x4c\x79\x5a\x48\x52\x3a\x47\x77\x4c\x33\x41\x48\x42\x6f\x76\x75\x62\x4c\x76'
b'\x43\x71\x6e\x00\x80\x2a\x00\x08\x18\x8b\x10\x4c\x69\x7b\xf6\x5b\x00\x25\x00\x00\x00\x24'
b'\x00\x04\x6e\x00\x1e\xff\x00\x08\x00\x14\x60\x2b\xc7\xfc\x0d\x10\x63\xaa\xc5\x38\x1c\xcb'
b'\x96\xa9\x73\x08\x73\x9a\x96\x0c\x80\x28\x00\x04\xd1\x62\xea\x65')
m = STUN(s)
assert m.type == BINDING_REQUEST
assert m.len == 84
attrs = parse_attrs(m.data)
assert len(attrs) == 6
assert attrs[0] == (USERNAME, b'pLyZHR:GwL3AHBovubLvCqn')
assert attrs[4][0] == MESSAGE_INTEGRITY
if __name__ == '__main__':
test_stun_response()
test_stun_padded()
print('Tests Successful...')
| {
"repo_name": "smutt/dpkt",
"path": "dpkt/stun.py",
"copies": "3",
"size": "2822",
"license": "bsd-3-clause",
"hash": -3040369323651185000,
"line_mean": 26.1346153846,
"line_max": 139,
"alpha_frac": 0.635719348,
"autogenerated": false,
"ratio": 2.3615062761506276,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9484190188504409,
"avg_score": 0.0026070871292436138,
"num_lines": 104
} |
# $Id: stun.py 47 2008-05-27 02:10:00Z jon.oberheide $
# -*- coding: utf-8 -*-
"""Simple Traversal of UDP through NAT."""
import struct
import dpkt
# STUN - RFC 3489
# http://tools.ietf.org/html/rfc3489
# Each packet has a 20 byte header followed by 0 or more attribute TLVs.
# Message Types
BINDING_REQUEST = 0x0001
BINDING_RESPONSE = 0x0101
BINDING_ERROR_RESPONSE = 0x0111
SHARED_SECRET_REQUEST = 0x0002
SHARED_SECRET_RESPONSE = 0x0102
SHARED_SECRET_ERROR_RESPONSE = 0x0112
# Message Attributes
MAPPED_ADDRESS = 0x0001
RESPONSE_ADDRESS = 0x0002
CHANGE_REQUEST = 0x0003
SOURCE_ADDRESS = 0x0004
CHANGED_ADDRESS = 0x0005
USERNAME = 0x0006
PASSWORD = 0x0007
MESSAGE_INTEGRITY = 0x0008
ERROR_CODE = 0x0009
UNKNOWN_ATTRIBUTES = 0x000a
REFLECTED_FROM = 0x000b
class STUN(dpkt.Packet):
__hdr__ = (
('type', 'H', 0),
('len', 'H', 0),
('xid', '16s', 0)
)
def tlv(buf):
n = 4
t, l = struct.unpack('>HH', buf[:n])
v = buf[n:n + l]
buf = buf[n + l:]
return t, l, v, buf
def parse_attrs(buf):
"""Parse STUN.data buffer into a list of (attribute, data) tuples."""
attrs = []
while buf:
t, _, v, buf = tlv(buf)
attrs.append((t, v))
return attrs
def test_stun_response():
s = '\x01\x01\x00\x0c\x21\x12\xa4\x42\x53\x4f\x70\x43\x69\x69\x35\x4a\x66\x63\x31\x7a\x00\x01\x00\x08\x00\x01\x11\x22\x33\x44\x55\x66'
m = STUN(s)
assert m.type == BINDING_RESPONSE
assert m.len == 12
attrs = parse_attrs(m.data)
assert attrs == [(MAPPED_ADDRESS, '\x00\x01\x11\x22\x33\x44\x55\x66'), ]
if __name__ == '__main__':
test_stun_response()
print 'Tests Successful...'
| {
"repo_name": "bpanneton/dpkt",
"path": "dpkt/stun.py",
"copies": "6",
"size": "1666",
"license": "bsd-3-clause",
"hash": -7848825107995579000,
"line_mean": 22.1388888889,
"line_max": 138,
"alpha_frac": 0.6368547419,
"autogenerated": false,
"ratio": 2.6236220472440945,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6260476789144095,
"avg_score": null,
"num_lines": null
} |
"""Style class for widgets."""
import os.path, copy
from UserDict import IterableUserDict
from Constants import *
from StyleInformation import StyleInformation
# Import the default theme engine path.
import sys
sys.path.append (DEFAULTDATADIR)
from themes import default
class WidgetStyle (IterableUserDict):
"""WidgetStyle (dict=None) -> WidgetStyle
A style dictionary that tracks changes of its items.
The WidgetStyle dictionary class allows one to let a bound method be
executed, whenever a value of the dictionary changed. Additionally,
the WidgetStyle class supports recursive changes, so that a
WidgetStyle object within this WidgetStyle can escalate the value
change and so on.
To set up the value change handler you can bind it using the
set_value_changed() method of the WidgetStyle class:
style.set_value_changed (callback_method)
This will cause it and all values of it, which are WidgetStyle
objects to invoke the callback_method on changes.
To get the actually set value change handler, the
get_value_changed() method can be used:
callback = style.get_value_changed ()
"""
def __init__ (self, dict=None):
# Notifier slot.
self._valuechanged = None
IterableUserDict.__init__ (self, dict)
def __copy__ (self):
"""W.__copy__ () -> WidgetStyle
Creates a shallow copy of the WidgetStyle dictionary.
"""
newdict = WidgetStyle ()
keys = self.keys ()
for k in keys:
newdict[k] = self[k]
newdict.set_value_changed (self._valuechanged)
return newdict
def __deepcopy__(self, memo={}):
"""W.__deepcopy__ (...) -> WidgetStyle.
Creates a deep copy of the WidgetStyle dictionary.
"""
newdict = WidgetStyle ()
keys = self.keys ()
for k in keys:
newdict[copy.deepcopy (k, memo)] = copy.deepcopy (self[k], memo)
newdict.set_value_changed (self._valuechanged)
memo [id (self)] = newdict
return newdict
def __setitem__ (self, i, y):
"""W.__setitem__ (i, y) <==> w[i] = y
"""
IterableUserDict.__setitem__ (self, i, y)
if isinstance (y, WidgetStyle):
y.set_value_changed (self._valuechanged)
if self._valuechanged:
self._valuechanged ()
def __delitem__ (self, y):
"""W.__delitem__ (i, y) <==> del w[y]
"""
IterableUserDict.__delitem__ (self, y)
if self._valuechanged:
self._valuechanged ()
def __repr__ (self):
"""W.__repr__ () <==> repr (W)
"""
return "WidgetStyle %s" % IterableUserDict.__repr__ (self)
def clear (self, y):
"""W.clear () -> None
Remove all items from the WidgetStyle dictionary.
"""
changed = self._valuechanged
self.set_value_changed (None)
IterableUserDict.clear (self)
self.set_value_changed (changed)
if changed:
changed ()
def pop (self, k, d=None):
"""W.pop (k, d=None) -> object
Remove specified key and return the corresponding value.
If key is not found, d is returned if given, otherwise KeyError
is raised.
"""
changed = IterableUserDict.has_key (self, k)
v = IterableUserDict.pop (self, k, d)
if changed and self._valuechanged:
self._valuechanged ()
return v
def popitem (self):
"""W.popitem () -> (k, v)
Remove and return some (key, value) pair as a 2-tuple
Raises a KeyError if D is empty.
"""
v = IterableUserDict.popitem (self)
if self._valuechanged:
self._valuechanged ()
return v
def setdefault (self, k, d=None):
"""W.setdefault (k,d=None) -> W.get (k, d), also set W[k] = d if k not in W
"""
changed = not IterableUserDict.has_key (self, k)
v = IterableUserDict.setdefault (self, k, d)
if changed and self._valuechanged:
self._valuechanged ()
return v
def update (self, E, **F):
"""W.update (E, **F) -> None
Update W from E and F.
for k in E: W[k] = E[k] (if E has keys else: for (k, v) in E:
W[k] = v) then: for k in F: W[k] = F[k]
"""
amount = len (self)
IterableUserDict.update (self, E, **F)
if self._valuechanged and (len (self) != amount):
self._valuechanged ()
def get_value_changed (self):
"""W.get_value_changed (...) -> callable
Gets the set callback method for the dictionary changes.
"""
return self._valuechanged
def set_value_changed (self, method):
"""W.set_value_changed (...) -> None
Connects a method to invoke, when an item of the dict changes.
Raises a TypeError, if the passed argument is not callable.
"""
if method and not callable (method):
raise TypeError ("method must be callable")
values = self.values ()
for val in values:
if isinstance (val, WidgetStyle):
val.set_value_changed (method)
self._valuechanged = method
class Style (object):
"""Style () -> Style
Style class for drawing objects.
Style definitions
-----------------
Styles are used to change the appearance of the widgets. The drawing
methods of the Style class will use the specific styles of the
'styles' attribute to change the fore- and background colors, font
information and border settings of the specific widgets. The styles
are set using the lower lettered classname of the widget to draw.
Additionally the Style class supports cascading styles by falling
back to the next available class name in the widget its __mro__
list. If no specific set style could be found for the specific
widget type, the Style class will use the 'default' style entry of
its 'styles' dictionary.
Any object can register its own style definition and request it
using the correct key. A Button widget for example, could register
its style definition this way:
Style.styles['button'] = WidgetStyle ({ .... })
Any other button widget then will use this style by default, so
their look is all the same.
It is also possible to pass an own type dictionary to the drawing
methods of the attached engine class in order to get a surface,
which can be shown on the screen then:
own_style = WidgetStyle ({ ... })
surface = style.engine.draw_rect (width, height, own_style)
If the style should be based on an already existing one, the
copy_style() method can be used to retrieve a copy to work with
without touching the orignal one:
own_style = style.copy_style (my_button.__class__)
The BaseWidget class offers a get_style() method, which will copy
the style of the widget class to the specific widget instance. Thus
you can safely modify the instance specific style for the widget
without touching the style for all widgets of that class.
The style dictionaries registered within the 'styles' dictionary of
the Style class need to match some prerequisites to be useful. On
the one hand they need to incorporate specific key-value pairs,
which can be evaluated by the various drawing functions, on the
other they need to have some specific key-value pairs, which are
evaluated by the Style class.
The following lists give an overview about the requirements the
style dictionaries have to match, so that the basic Style class can
work with them as supposed.
Style entries
-------------
The registered style WidgetStyle dictionaries for the widgets need
to contain key-value pairs required by the functions of the
referenced modules. The following key-value pairs are needed to
create the surfaces:
bgcolor = WidgetStyle ({ STATE_TYPE : color, ... })
The background color to use for the widget surface.
fgcolor = WidgetStyle ({ STATE_TYPE : color, ... })
The foreground color to use for the widget. This is also the text
color for widgets, which will display text.
lightcolor = WidgetStyle ({ STATE_TYPE : color, ... })
darkcolor = WidgetStyle ({ STATE_TYPE : color, ... })
Used to create shadow border effects on several widgets. The color
values usually should be a bit brighter or darker than the bgcolor
values.
bordercolor = WidgetStyle ({ STATE_TYPE : color, ... })
Also used to create border effects on several widgets. In contrast
to the lightcolor and darkcolor entries, this is used, if flat
borders (BORDER_FLAT) have to drawn.
shadowcolor = (color1, color2)
The colors to use for dropshadow effects. The first tuple entry will
be used as inner shadow (near by the widget), the second as outer
shadow value.
image = WidgetStyle ({ STATE_TYPE : string })
Pixmap files to use instead of the background color. If the file is
not supplied or cannot be loaded, the respective bgcolor value is
used.
font = WidgetStyle ({ 'name' : string, 'size' : integer,
'alias' : integer, 'style' : integer })
Widgets, which support the display of text, make use of this
key-value pair. The 'name' key denotes the font name ('Helvetica')
or the full path to a font file ('/path/to/Helvetica.ttf'). 'size'
is the font size to use. 'alias' is interpreted as boolean value for
antialiasing. 'style' is a bit-wise combination of FONT_STYLE_TYPES
values as defined in ocempgui.draw.Constants and denotes additional
rendering styles to use.
shadow = integer
The size of the 3D border effect for a widget.
IMPORTANT: It is important to know, that the Style class only
supports a two-level hierarchy for styles. Especially the
copy_style() method is not aware of style entries of more than two
levels. This means, that a dictionary in a style dictionary is
possible (as done in the 'font' or the various color style entries),
but more complex style encapsulations are unlikely to work
correctly.
Some legal examples for user defined style entries:
style['ownentry'] = 99 # One level
style['ownentry'] = { 'foo' : 1, 'bar' : 2 } # Two levels: level1[level2]
This one however is not guaranteed to work correctly and thus should
be avoided:
style['ownentry'] = { 'foo' : { 'bar' : 1, 'baz' : 2 }, 'foobar' : { ... }}
Dicts and WidgetStyle
---------------------
OcempGUI uses a WidgetStyle dictionary class to keep track of
changes within styles and to update widgets on th fly on changing
those styles. When you are creating new style files (as explained in
the next section) you do not need to explicitly use the
WidgetStyle() dictionary, but can use plain dicts instead. Those
will be automatically replaced by a WidgetStyle on calling load().
You should however avoid using plain dicts if you are modifying
styles of widgets or the Style class at run time and use a
WidgetStyle instead:
# generate_new_style() returns a plain dict.
style = generate_new_style ()
button.style = WidgetStyle (style)
Style files
-----------
A style file is a key-value pair association of style entries for
widgets. It can be loaded and used by the Style.load() method to set
specific themes and styles for the widgets. The style files use the
python syntax and contain key-value pairs of style information for
the specific widgets. The general syntax looks like follows:
widgetclassname = WidgetStyle ({ style_entry : { value } })
An example style file entry for the Button widget class can look
like the following:
button = { 'bgcolor' :{ STATE_NORMAL : (200, 100, 0) },
'fgcolor' : { STATE_NORMAL : (255, 0, 0) },
'shadow' : 5 }
The above example will set the bgcolor[STATE_NORMAL] color style
entry for the button widget class to (200, 100, 0), the
fgcolor[STATE_NORMAL] color style entry to (255, 0, 0) and the
'shadow' value for the border size to 5. Any other value of the
style will remain untouched.
Loading a style while running an application does not have any
effect on widgets
* with own styles set via the BaseWidget.get_style() method,
* already drawn widgets using the default style.
The latter ones need to be refreshed via the set_dirty()/update()
methods explicitly to make use of the new style.
Style files allow user-defined variables, which are prefixed with an
underscore. A specific color thus can be stored in a variable to allow
easier access of it.
_red = (255, 0, 0)
___myown_font = 'foo/bar/font.ttf'
Examples
--------
The OcempGUI module contains a file named 'default.rc' in the themes
directory of the installation, which contains several style values
for the default appearance of the widgets. Additional information
can be found in the manual of OcempGUI, too.
Attributes:
styles - A dictionary with the style definitions of various elements.
engine - The drawing engine, which takes care of drawing elements.
"""
__slots__ = ["styles", "_engine"]
def __init__ (self):
# Initialize the default style.
self.styles = {
"default" : WidgetStyle ({
"bgcolor" : WidgetStyle ({ STATE_NORMAL : (234, 228, 223),
STATE_ENTERED : (239, 236, 231),
STATE_ACTIVE : (205, 200, 194),
STATE_INSENSITIVE : (234, 228, 223) }),
"fgcolor" : WidgetStyle ({ STATE_NORMAL : (0, 0, 0),
STATE_ENTERED : (0, 0, 0),
STATE_ACTIVE : (0, 0, 0),
STATE_INSENSITIVE : (204, 192, 192) }),
"lightcolor" : WidgetStyle ({ STATE_NORMAL : (245, 245, 245),
STATE_ENTERED : (245, 245, 245),
STATE_ACTIVE : (30, 30, 30),
STATE_INSENSITIVE : (240, 240, 240)
}),
"darkcolor" : WidgetStyle ({ STATE_NORMAL : (30, 30, 30),
STATE_ENTERED : (30, 30, 30),
STATE_ACTIVE : (245, 245, 245),
STATE_INSENSITIVE : (204, 192, 192)
}),
"bordercolor" : WidgetStyle ({ STATE_NORMAL : (0, 0, 0),
STATE_ENTERED : (0, 0, 0),
STATE_ACTIVE : (0, 0, 0),
STATE_INSENSITIVE : (204, 192, 192)
}),
"shadowcolor" : ((100, 100, 100), (130, 130, 130)),
"image" : WidgetStyle ({ STATE_NORMAL : None,
STATE_ENTERED : None,
STATE_ACTIVE : None,
STATE_INSENSITIVE : None }),
"font" : WidgetStyle ({ "name" : None,
"size" : 16,
"alias" : True,
"style" : 0 }),
"shadow" : 2 })
}
# Load the default style and theme engine.
self.load (os.path.join (DEFAULTDATADIR, "themes", "default",
default.RCFILE))
self.set_engine (default.DefaultEngine (self))
def get_style (self, cls):
"""S.get_style (...) -> WidgetStyle
Returns the style for a specific widget class.
Returns the style for a specific widget class. If no matching
entry was found, the method searches for the next upper
entry of the class's __mro__. If it reaches the end of the
__mro__ list without finding a matching entry, the
default style will be returned.
"""
classes = [c.__name__.lower () for c in cls.__mro__]
for name in classes:
if name in self.styles:
return self.styles[name]
return self.styles.setdefault (cls.__name__.lower (), WidgetStyle ())
def get_style_entry (self, cls, style, key, subkey=None):
"""S.get_style_entry (...) -> value
Gets a style entry from the style dictionary.
Gets a entry from the style dictionary. If the entry could not
be found, the method searches for the next upper entry of the
__mro__. If it reaches the end of the __mro__ list without
finding a matching entry, it will try to return the entry from
the 'default' style dictionary.
"""
deeper = subkey != None
if key in style:
if deeper:
if subkey in style[key]:
return style[key][subkey]
else:
return style[key]
styles = self.styles
classes = [c.__name__.lower () for c in cls.__mro__]
for name in classes:
if name in styles:
style = styles[name]
# Found a higher level class style, check it.
if key in style:
if deeper:
if subkey in style[key]:
return style[key][subkey]
else:
return style[key]
# None found, refer to the default.
if deeper:
return styles["default"][key][subkey]
return styles["default"][key]
def copy_style (self, cls):
"""S.copy_style (...) -> WidgetStyle
Creates a plain copy of a specific style.
Due to the cascading ability of the Style class, an existing
style will be filled with the entries of the 'default' style
dictionary which do not exist in it.
"""
style = copy.deepcopy (self.get_style (cls))
default = self.styles["default"]
for key in default:
if key not in style:
style[key] = copy.deepcopy (default[key])
else:
sub = default[key]
# No dicts anymore
for subkey in default[key]:
style[key][subkey] = copy.deepcopy (sub[subkey])
return style
def load (self, file):
"""S.load (...) -> None
Loads style definitions from a file.
Loads style definitions from a file and adds them to the
'styles' attribute. Already set values in this dictionary will
be overwritten.
"""
glob_dict = {}
loc_dict = {}
execfile (file, glob_dict, loc_dict)
setdefault = self.styles.setdefault
for key in loc_dict:
# Skip the Constants import directive and
# any user-defined variable.
if key.startswith ("_") or (key == "Constants"):
continue
# Search the style or create a new one from scratch.
entry = setdefault (key, WidgetStyle ())
# Look up all entries of our style keys and add them to the
# style.
widget = loc_dict[key]
for key in widget:
if type (widget[key]) == dict:
if key not in entry:
entry[key] = WidgetStyle ()
for subkey in widget[key]:
entry[key][subkey] = widget[key][subkey]
else:
entry[key] = widget[key]
def create_style_dict (self):
"""Style.create_style_dict () -> dict
Creates a new style dictionary.
Creates a new unfilled style dictionary with the most necessary
entries needed by the Style class specifications.
"""
style = WidgetStyle ({
"bgcolor" : WidgetStyle ({ STATE_NORMAL : (0, 0, 0),
STATE_ENTERED : (0, 0, 0),
STATE_ACTIVE : (0, 0, 0),
STATE_INSENSITIVE : (0, 0, 0) }),
"fgcolor" : WidgetStyle ({ STATE_NORMAL : (0, 0, 0),
STATE_ENTERED : (0, 0, 0),
STATE_ACTIVE : (0, 0, 0),
STATE_INSENSITIVE : (0, 0, 0) }),
"lightcolor" : WidgetStyle ({ STATE_NORMAL : (0, 0, 0),
STATE_ENTERED : (0, 0, 0),
STATE_ACTIVE : (0, 0, 0),
STATE_INSENSITIVE : (0, 0, 0) }),
"darkcolor" : WidgetStyle ({ STATE_NORMAL : (0, 0, 0),
STATE_ENTERED : (0, 0, 0),
STATE_ACTIVE : (0, 0, 0),
STATE_INSENSITIVE : (0, 0, 0) }),
"bordercolor" : WidgetStyle ({ STATE_NORMAL : (0, 0, 0),
STATE_ENTERED : (0, 0, 0),
STATE_ACTIVE : (0, 0, 0),
STATE_INSENSITIVE : (0, 0, 0) }),
"shadowcolor": ((0, 0, 0), (0, 0, 0)),
"image" : WidgetStyle ({ STATE_NORMAL : None,
STATE_ENTERED : None,
STATE_ACTIVE : None,
STATE_INSENSITIVE : None }),
"font" : WidgetStyle ({ "name" : None,
"size" : 0,
"alias" : False,
"style" : 0 }),
"shadow" : 0
})
return style
def get_border_size (self, cls=None, style=None, bordertype=BORDER_FLAT):
"""S.get_border_size (...) -> int
Gets the border size for a specific border type and style.
Gets the size of a border in pixels for the specific border type
and style. for BORDER_NONE the value will be 0 by
default. BORDER_FLAT will always return a size of 1.
The sizes of other border types depend on the passed style.
If no style is passed, the method will try to retrieve a style
using the get_style() method.
Raises a ValueError, if the passed bordertype argument is
not a value of the BORDER_TYPES tuple.
"""
if bordertype not in BORDER_TYPES:
raise ValueError ("bordertype must be a value from BORDER_TYPES")
if not style:
style = self.get_style (cls)
if bordertype == BORDER_FLAT:
return 1
elif bordertype in (BORDER_SUNKEN, BORDER_RAISED):
return self.get_style_entry (cls, style, "shadow")
elif bordertype in (BORDER_ETCHED_IN, BORDER_ETCHED_OUT):
return self.get_style_entry (cls, style, "shadow") * 2
return 0
def set_engine (self, engine):
"""S.set_engine (...) -> None
Sets the drawing engine to use for drawing elements.
"""
if engine == None:
raise TypeError ("engine must not be None")
self._engine = engine
engine = property (lambda self: self._engine,
lambda self, var: self.set_engine (var),
doc = "The drawing engine, which draws elements.")
| {
"repo_name": "prim/ocempgui",
"path": "ocempgui/widgets/Style.py",
"copies": "1",
"size": "25289",
"license": "bsd-2-clause",
"hash": -3234399560509615000,
"line_mean": 39.0142405063,
"line_max": 83,
"alpha_frac": 0.57499308,
"autogenerated": false,
"ratio": 4.482275788727402,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.010569768320905611,
"num_lines": 632
} |
"""Simple observer subject class."""
from IObserver import IObserver
class Subject (object):
"""Subject (name) -> Subject
A class matching the subscriptable Subject of the observer pattern.
The Subject class allows other classes to be notified whenever its
state(s) change. It has an unique name which identifies itself upon
notification of the registered classes.
The unique name of the concrete subject implementation can be
adjusted using the 'name' attribute and set_name() method.
subject.name = 'MySubject'
subject.set_name ('AnotherSubject')
Observers will be notified about state changes through an update()
method, they have to implement. The IObserver interface class
contains details about its signature.
To notify observers about a state change, the Subject implementation
must invoke its notify() method with the state, that changed, its
old state value and the newly set value.
class MySubject (Subject):
def __init__ (self):
Subject.__init__ (self, 'UniqueSubjectName')
self._value = None
def set_value (self, value):
# Preserve old value.
oldval = self._value
# Set new value.
self._value = value
# Notify observers
self.notify ('value', old, value)
A listening observer which should react upon changes of that value
could implement the reaction the following way:
class MyObserver (IObserver):
...
def update (self, subject, attribute, oldval, newval):
# Look for the certain subject.
if subject == 'UniqueSubjectName':
# Which state changed?
if attribute == 'value':
self.do_some_action (oldval, newval)
...
Attributes:
name - The name of the Subject.
listeners - List containing the attached observers.
"""
def __init__ (self, name):
self._name = None
self._listeners = []
self.set_name (name)
def set_name (self, name):
"""S.set_name (...) -> None
Sets the name of the Subject.
Raises a TypeError, if the passed argument is not a string or
unicode.
"""
if type (name) not in (str, unicode):
raise TypeError ("name must be a string or unicode")
self._name = name
def add (self, *observers):
"""S.add (observers) -> None
Adds one or more observers to the Subject.
Raises an AttributeError, if one of the passed 'observers'
arguments does not have a callable update() attribute.
Raises a ValueError, if one of the passed 'observers' arguments
is already registered as listener.
"""
for obj in observers:
if not isinstance (obj, IObserver):
print "Warning: observer should inherit from IObserver"
if not hasattr (obj, "update") or not callable (obj.update):
raise AttributeError ("update() method not found in"
"observer %s" % obj)
if obj in self._listeners:
raise ValueError ("observer %s already added" % obj)
self._listeners.append (obj)
def remove (self, observer):
"""S.remove (observer) -> None
Removes an observer from the Subject.
"""
self._listeners.remove (observer)
def notify (self, prop=None, oldval=None, newval=None):
"""S.notify (...) -> None
Notifies all observers about a state change.
"""
for obj in self._listeners:
obj.update (self.name, str (prop), oldval, newval)
name = property (lambda self: self._name,
lambda self, var: self.set_name (name),
doc = "The name of the Subject.")
listeners = property (lambda self: self._listeners,
doc = "The observers for the Subject.")
| {
"repo_name": "prim/ocempgui",
"path": "ocempgui/events/Subject.py",
"copies": "1",
"size": "5465",
"license": "bsd-2-clause",
"hash": 98646055335057380,
"line_mean": 37.7588652482,
"line_max": 78,
"alpha_frac": 0.6386093321,
"autogenerated": false,
"ratio": 4.627434377646063,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5766043709746063,
"avg_score": null,
"num_lines": null
} |
"""
This module defines table parser classes,which parse plaintext-graphic tables
and produce a well-formed data structure suitable for building a CALS table.
:Classes:
- `GridTableParser`: Parse fully-formed tables represented with a grid.
- `SimpleTableParser`: Parse simple tables, delimited by top & bottom
borders.
:Exception class: `TableMarkupError`
:Function:
`update_dict_of_lists()`: Merge two dictionaries containing list values.
"""
__docformat__ = 'reStructuredText'
import re
import sys
from docutils import DataError
class TableMarkupError(DataError): pass
class TableParser:
"""
Abstract superclass for the common parts of the syntax-specific parsers.
"""
head_body_separator_pat = None
"""Matches the row separator between head rows and body rows."""
double_width_pad_char = '\x00'
"""Padding character for East Asian double-width text."""
def parse(self, block):
"""
Analyze the text `block` and return a table data structure.
Given a plaintext-graphic table in `block` (list of lines of text; no
whitespace padding), parse the table, construct and return the data
necessary to construct a CALS table or equivalent.
Raise `TableMarkupError` if there is any problem with the markup.
"""
self.setup(block)
self.find_head_body_sep()
self.parse_table()
structure = self.structure_from_cells()
return structure
def find_head_body_sep(self):
"""Look for a head/body row separator line; store the line index."""
for i in range(len(self.block)):
line = self.block[i]
if self.head_body_separator_pat.match(line):
if self.head_body_sep:
raise TableMarkupError(
'Multiple head/body row separators in table (at line '
'offset %s and %s); only one allowed.'
% (self.head_body_sep, i))
else:
self.head_body_sep = i
self.block[i] = line.replace('=', '-')
if self.head_body_sep == 0 or self.head_body_sep == (len(self.block)
- 1):
raise TableMarkupError('The head/body row separator may not be '
'the first or last line of the table.')
class GridTableParser(TableParser):
"""
Parse a grid table using `parse()`.
Here's an example of a grid table::
+------------------------+------------+----------+----------+
| Header row, column 1 | Header 2 | Header 3 | Header 4 |
+========================+============+==========+==========+
| body row 1, column 1 | column 2 | column 3 | column 4 |
+------------------------+------------+----------+----------+
| body row 2 | Cells may span columns. |
+------------------------+------------+---------------------+
| body row 3 | Cells may | - Table cells |
+------------------------+ span rows. | - contain |
| body row 4 | | - body elements. |
+------------------------+------------+---------------------+
Intersections use '+', row separators use '-' (except for one optional
head/body row separator, which uses '='), and column separators use '|'.
Passing the above table to the `parse()` method will result in the
following data structure::
([24, 12, 10, 10],
[[(0, 0, 1, ['Header row, column 1']),
(0, 0, 1, ['Header 2']),
(0, 0, 1, ['Header 3']),
(0, 0, 1, ['Header 4'])]],
[[(0, 0, 3, ['body row 1, column 1']),
(0, 0, 3, ['column 2']),
(0, 0, 3, ['column 3']),
(0, 0, 3, ['column 4'])],
[(0, 0, 5, ['body row 2']),
(0, 2, 5, ['Cells may span columns.']),
None,
None],
[(0, 0, 7, ['body row 3']),
(1, 0, 7, ['Cells may', 'span rows.', '']),
(1, 1, 7, ['- Table cells', '- contain', '- body elements.']),
None],
[(0, 0, 9, ['body row 4']), None, None, None]])
The first item is a list containing column widths (colspecs). The second
item is a list of head rows, and the third is a list of body rows. Each
row contains a list of cells. Each cell is either None (for a cell unused
because of another cell's span), or a tuple. A cell tuple contains four
items: the number of extra rows used by the cell in a vertical span
(morerows); the number of extra columns used by the cell in a horizontal
span (morecols); the line offset of the first line of the cell contents;
and the cell contents, a list of lines of text.
"""
head_body_separator_pat = re.compile(r'\+=[=+]+=\+ *$')
def setup(self, block):
self.block = block[:] # make a copy; it may be modified
self.block.disconnect() # don't propagate changes to parent
self.bottom = len(block) - 1
self.right = len(block[0]) - 1
self.head_body_sep = None
self.done = [-1] * len(block[0])
self.cells = []
self.rowseps = {0: [0]}
self.colseps = {0: [0]}
def parse_table(self):
"""
Start with a queue of upper-left corners, containing the upper-left
corner of the table itself. Trace out one rectangular cell, remember
it, and add its upper-right and lower-left corners to the queue of
potential upper-left corners of further cells. Process the queue in
top-to-bottom order, keeping track of how much of each text column has
been seen.
We'll end up knowing all the row and column boundaries, cell positions
and their dimensions.
"""
corners = [(0, 0)]
while corners:
top, left = corners.pop(0)
if top == self.bottom or left == self.right \
or top <= self.done[left]:
continue
result = self.scan_cell(top, left)
if not result:
continue
bottom, right, rowseps, colseps = result
update_dict_of_lists(self.rowseps, rowseps)
update_dict_of_lists(self.colseps, colseps)
self.mark_done(top, left, bottom, right)
cellblock = self.block.get_2D_block(top + 1, left + 1,
bottom, right)
cellblock.disconnect() # lines in cell can't sync with parent
cellblock.replace(self.double_width_pad_char, '')
self.cells.append((top, left, bottom, right, cellblock))
corners.extend([(top, right), (bottom, left)])
corners.sort()
if not self.check_parse_complete():
raise TableMarkupError('Malformed table; parse incomplete.')
def mark_done(self, top, left, bottom, right):
"""For keeping track of how much of each text column has been seen."""
before = top - 1
after = bottom - 1
for col in range(left, right):
assert self.done[col] == before
self.done[col] = after
def check_parse_complete(self):
"""Each text column should have been completely seen."""
last = self.bottom - 1
for col in range(self.right):
if self.done[col] != last:
return None
return 1
def scan_cell(self, top, left):
"""Starting at the top-left corner, start tracing out a cell."""
assert self.block[top][left] == '+'
result = self.scan_right(top, left)
return result
def scan_right(self, top, left):
"""
Look for the top-right corner of the cell, and make note of all column
boundaries ('+').
"""
colseps = {}
line = self.block[top]
for i in range(left + 1, self.right + 1):
if line[i] == '+':
colseps[i] = [top]
result = self.scan_down(top, left, i)
if result:
bottom, rowseps, newcolseps = result
update_dict_of_lists(colseps, newcolseps)
return bottom, i, rowseps, colseps
elif line[i] != '-':
return None
return None
def scan_down(self, top, left, right):
"""
Look for the bottom-right corner of the cell, making note of all row
boundaries.
"""
rowseps = {}
for i in range(top + 1, self.bottom + 1):
if self.block[i][right] == '+':
rowseps[i] = [right]
result = self.scan_left(top, left, i, right)
if result:
newrowseps, colseps = result
update_dict_of_lists(rowseps, newrowseps)
return i, rowseps, colseps
elif self.block[i][right] != '|':
return None
return None
def scan_left(self, top, left, bottom, right):
"""
Noting column boundaries, look for the bottom-left corner of the cell.
It must line up with the starting point.
"""
colseps = {}
line = self.block[bottom]
for i in range(right - 1, left, -1):
if line[i] == '+':
colseps[i] = [bottom]
elif line[i] != '-':
return None
if line[left] != '+':
return None
result = self.scan_up(top, left, bottom, right)
if result is not None:
rowseps = result
return rowseps, colseps
return None
def scan_up(self, top, left, bottom, right):
"""
Noting row boundaries, see if we can return to the starting point.
"""
rowseps = {}
for i in range(bottom - 1, top, -1):
if self.block[i][left] == '+':
rowseps[i] = [left]
elif self.block[i][left] != '|':
return None
return rowseps
def structure_from_cells(self):
"""
From the data collected by `scan_cell()`, convert to the final data
structure.
"""
rowseps = self.rowseps.keys() # list of row boundaries
rowseps.sort()
rowindex = {}
for i in range(len(rowseps)):
rowindex[rowseps[i]] = i # row boundary -> row number mapping
colseps = self.colseps.keys() # list of column boundaries
colseps.sort()
colindex = {}
for i in range(len(colseps)):
colindex[colseps[i]] = i # column boundary -> col number map
colspecs = [(colseps[i] - colseps[i - 1] - 1)
for i in range(1, len(colseps))] # list of column widths
# prepare an empty table with the correct number of rows & columns
onerow = [None for i in range(len(colseps) - 1)]
rows = [onerow[:] for i in range(len(rowseps) - 1)]
# keep track of # of cells remaining; should reduce to zero
remaining = (len(rowseps) - 1) * (len(colseps) - 1)
for top, left, bottom, right, block in self.cells:
rownum = rowindex[top]
colnum = colindex[left]
assert rows[rownum][colnum] is None, (
'Cell (row %s, column %s) already used.'
% (rownum + 1, colnum + 1))
morerows = rowindex[bottom] - rownum - 1
morecols = colindex[right] - colnum - 1
remaining -= (morerows + 1) * (morecols + 1)
# write the cell into the table
rows[rownum][colnum] = (morerows, morecols, top + 1, block)
assert remaining == 0, 'Unused cells remaining.'
if self.head_body_sep: # separate head rows from body rows
numheadrows = rowindex[self.head_body_sep]
headrows = rows[:numheadrows]
bodyrows = rows[numheadrows:]
else:
headrows = []
bodyrows = rows
return (colspecs, headrows, bodyrows)
class SimpleTableParser(TableParser):
"""
Parse a simple table using `parse()`.
Here's an example of a simple table::
===== =====
col 1 col 2
===== =====
1 Second column of row 1.
2 Second column of row 2.
Second line of paragraph.
3 - Second column of row 3.
- Second item in bullet
list (row 3, column 2).
4 is a span
------------
5
===== =====
Top and bottom borders use '=', column span underlines use '-', column
separation is indicated with spaces.
Passing the above table to the `parse()` method will result in the
following data structure, whose interpretation is the same as for
`GridTableParser`::
([5, 25],
[[(0, 0, 1, ['col 1']),
(0, 0, 1, ['col 2'])]],
[[(0, 0, 3, ['1']),
(0, 0, 3, ['Second column of row 1.'])],
[(0, 0, 4, ['2']),
(0, 0, 4, ['Second column of row 2.',
'Second line of paragraph.'])],
[(0, 0, 6, ['3']),
(0, 0, 6, ['- Second column of row 3.',
'',
'- Second item in bullet',
' list (row 3, column 2).'])],
[(0, 1, 10, ['4 is a span'])],
[(0, 0, 12, ['5']),
(0, 0, 12, [''])]])
"""
head_body_separator_pat = re.compile('=[ =]*$')
span_pat = re.compile('-[ -]*$')
def setup(self, block):
self.block = block[:] # make a copy; it will be modified
self.block.disconnect() # don't propagate changes to parent
# Convert top & bottom borders to column span underlines:
self.block[0] = self.block[0].replace('=', '-')
self.block[-1] = self.block[-1].replace('=', '-')
self.head_body_sep = None
self.columns = []
self.border_end = None
self.table = []
self.done = [-1] * len(block[0])
self.rowseps = {0: [0]}
self.colseps = {0: [0]}
def parse_table(self):
"""
First determine the column boundaries from the top border, then
process rows. Each row may consist of multiple lines; accumulate
lines until a row is complete. Call `self.parse_row` to finish the
job.
"""
# Top border must fully describe all table columns.
self.columns = self.parse_columns(self.block[0], 0)
self.border_end = self.columns[-1][1]
firststart, firstend = self.columns[0]
offset = 1 # skip top border
start = 1
text_found = None
while offset < len(self.block):
line = self.block[offset]
if self.span_pat.match(line):
# Column span underline or border; row is complete.
self.parse_row(self.block[start:offset], start,
(line.rstrip(), offset))
start = offset + 1
text_found = None
elif line[firststart:firstend].strip():
# First column not blank, therefore it's a new row.
if text_found and offset != start:
self.parse_row(self.block[start:offset], start)
start = offset
text_found = 1
elif not text_found:
start = offset + 1
offset += 1
def parse_columns(self, line, offset):
"""
Given a column span underline, return a list of (begin, end) pairs.
"""
cols = []
end = 0
while 1:
begin = line.find('-', end)
end = line.find(' ', begin)
if begin < 0:
break
if end < 0:
end = len(line)
cols.append((begin, end))
if self.columns:
if cols[-1][1] != self.border_end:
raise TableMarkupError('Column span incomplete at line '
'offset %s.' % offset)
# Allow for an unbounded rightmost column:
cols[-1] = (cols[-1][0], self.columns[-1][1])
return cols
def init_row(self, colspec, offset):
i = 0
cells = []
for start, end in colspec:
morecols = 0
try:
assert start == self.columns[i][0]
while end != self.columns[i][1]:
i += 1
morecols += 1
except (AssertionError, IndexError):
raise TableMarkupError('Column span alignment problem at '
'line offset %s.' % (offset + 1))
cells.append([0, morecols, offset, []])
i += 1
return cells
def parse_row(self, lines, start, spanline=None):
"""
Given the text `lines` of a row, parse it and append to `self.table`.
The row is parsed according to the current column spec (either
`spanline` if provided or `self.columns`). For each column, extract
text from each line, and check for text in column margins. Finally,
adjust for insigificant whitespace.
"""
if not (lines or spanline):
# No new row, just blank lines.
return
if spanline:
columns = self.parse_columns(*spanline)
span_offset = spanline[1]
else:
columns = self.columns[:]
span_offset = start
self.check_columns(lines, start, columns)
row = self.init_row(columns, start)
for i in range(len(columns)):
start, end = columns[i]
cellblock = lines.get_2D_block(0, start, len(lines), end)
cellblock.disconnect() # lines in cell can't sync with parent
cellblock.replace(self.double_width_pad_char, '')
row[i][3] = cellblock
self.table.append(row)
def check_columns(self, lines, first_line, columns):
"""
Check for text in column margins and text overflow in the last column.
Raise TableMarkupError if anything but whitespace is in column margins.
Adjust the end value for the last column if there is text overflow.
"""
# "Infinite" value for a dummy last column's beginning, used to
# check for text overflow:
columns.append((sys.maxint, None))
lastcol = len(columns) - 2
for i in range(len(columns) - 1):
start, end = columns[i]
nextstart = columns[i+1][0]
offset = 0
for line in lines:
if i == lastcol and line[end:].strip():
text = line[start:].rstrip()
new_end = start + len(text)
columns[i] = (start, new_end)
main_start, main_end = self.columns[-1]
if new_end > main_end:
self.columns[-1] = (main_start, new_end)
elif line[end:nextstart].strip():
raise TableMarkupError('Text in column margin at line '
'offset %s.' % (first_line + offset))
offset += 1
columns.pop()
def structure_from_cells(self):
colspecs = [end - start for start, end in self.columns]
first_body_row = 0
if self.head_body_sep:
for i in range(len(self.table)):
if self.table[i][0][2] > self.head_body_sep:
first_body_row = i
break
return (colspecs, self.table[:first_body_row],
self.table[first_body_row:])
def update_dict_of_lists(master, newdata):
"""
Extend the list values of `master` with those from `newdata`.
Both parameters must be dictionaries containing list values.
"""
for key, values in newdata.items():
master.setdefault(key, []).extend(values)
| {
"repo_name": "amith01994/intellij-community",
"path": "python/helpers/docutils/parsers/rst/tableparser.py",
"copies": "60",
"size": "20328",
"license": "apache-2.0",
"hash": 2825528233025261600,
"line_mean": 37.72,
"line_max": 80,
"alpha_frac": 0.5160369933,
"autogenerated": false,
"ratio": 4.137594138001221,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
"""
This module defines table parser classes,which parse plaintext-graphic tables
and produce a well-formed data structure suitable for building a CALS table.
:Classes:
- `GridTableParser`: Parse fully-formed tables represented with a grid.
- `SimpleTableParser`: Parse simple tables, delimited by top & bottom
borders.
:Exception class: `TableMarkupError`
:Function:
`update_dict_of_lists()`: Merge two dictionaries containing list values.
"""
__docformat__ = 'reStructuredText'
import re
import sys
from docutils import DataError
class TableMarkupError(DataError): pass
class TableParser:
"""
Abstract superclass for the common parts of the syntax-specific parsers.
"""
head_body_separator_pat = None
"""Matches the row separator between head rows and body rows."""
double_width_pad_char = '\x00'
"""Padding character for East Asian double-width text."""
def parse(self, block):
"""
Analyze the text `block` and return a table data structure.
Given a plaintext-graphic table in `block` (list of lines of text; no
whitespace padding), parse the table, construct and return the data
necessary to construct a CALS table or equivalent.
Raise `TableMarkupError` if there is any problem with the markup.
"""
self.setup(block)
self.find_head_body_sep()
self.parse_table()
structure = self.structure_from_cells()
return structure
def find_head_body_sep(self):
"""Look for a head/body row separator line; store the line index."""
for i in range(len(self.block)):
line = self.block[i]
if self.head_body_separator_pat.match(line):
if self.head_body_sep:
raise TableMarkupError(
'Multiple head/body row separators in table (at line '
'offset %s and %s); only one allowed.'
% (self.head_body_sep, i))
else:
self.head_body_sep = i
self.block[i] = line.replace('=', '-')
if self.head_body_sep == 0 or self.head_body_sep == (len(self.block)
- 1):
raise TableMarkupError('The head/body row separator may not be '
'the first or last line of the table.')
class GridTableParser(TableParser):
"""
Parse a grid table using `parse()`.
Here's an example of a grid table::
+------------------------+------------+----------+----------+
| Header row, column 1 | Header 2 | Header 3 | Header 4 |
+========================+============+==========+==========+
| body row 1, column 1 | column 2 | column 3 | column 4 |
+------------------------+------------+----------+----------+
| body row 2 | Cells may span columns. |
+------------------------+------------+---------------------+
| body row 3 | Cells may | - Table cells |
+------------------------+ span rows. | - contain |
| body row 4 | | - body elements. |
+------------------------+------------+---------------------+
Intersections use '+', row separators use '-' (except for one optional
head/body row separator, which uses '='), and column separators use '|'.
Passing the above table to the `parse()` method will result in the
following data structure::
([24, 12, 10, 10],
[[(0, 0, 1, ['Header row, column 1']),
(0, 0, 1, ['Header 2']),
(0, 0, 1, ['Header 3']),
(0, 0, 1, ['Header 4'])]],
[[(0, 0, 3, ['body row 1, column 1']),
(0, 0, 3, ['column 2']),
(0, 0, 3, ['column 3']),
(0, 0, 3, ['column 4'])],
[(0, 0, 5, ['body row 2']),
(0, 2, 5, ['Cells may span columns.']),
None,
None],
[(0, 0, 7, ['body row 3']),
(1, 0, 7, ['Cells may', 'span rows.', '']),
(1, 1, 7, ['- Table cells', '- contain', '- body elements.']),
None],
[(0, 0, 9, ['body row 4']), None, None, None]])
The first item is a list containing column widths (colspecs). The second
item is a list of head rows, and the third is a list of body rows. Each
row contains a list of cells. Each cell is either None (for a cell unused
because of another cell's span), or a tuple. A cell tuple contains four
items: the number of extra rows used by the cell in a vertical span
(morerows); the number of extra columns used by the cell in a horizontal
span (morecols); the line offset of the first line of the cell contents;
and the cell contents, a list of lines of text.
"""
head_body_separator_pat = re.compile(r'\+=[=+]+=\+ *$')
def setup(self, block):
self.block = block[:] # make a copy; it may be modified
self.block.disconnect() # don't propagate changes to parent
self.bottom = len(block) - 1
self.right = len(block[0]) - 1
self.head_body_sep = None
self.done = [-1] * len(block[0])
self.cells = []
self.rowseps = {0: [0]}
self.colseps = {0: [0]}
def parse_table(self):
"""
Start with a queue of upper-left corners, containing the upper-left
corner of the table itself. Trace out one rectangular cell, remember
it, and add its upper-right and lower-left corners to the queue of
potential upper-left corners of further cells. Process the queue in
top-to-bottom order, keeping track of how much of each text column has
been seen.
We'll end up knowing all the row and column boundaries, cell positions
and their dimensions.
"""
corners = [(0, 0)]
while corners:
top, left = corners.pop(0)
if top == self.bottom or left == self.right \
or top <= self.done[left]:
continue
result = self.scan_cell(top, left)
if not result:
continue
bottom, right, rowseps, colseps = result
update_dict_of_lists(self.rowseps, rowseps)
update_dict_of_lists(self.colseps, colseps)
self.mark_done(top, left, bottom, right)
cellblock = self.block.get_2D_block(top + 1, left + 1,
bottom, right)
cellblock.disconnect() # lines in cell can't sync with parent
cellblock.replace(self.double_width_pad_char, '')
self.cells.append((top, left, bottom, right, cellblock))
corners.extend([(top, right), (bottom, left)])
corners.sort()
if not self.check_parse_complete():
raise TableMarkupError('Malformed table; parse incomplete.')
def mark_done(self, top, left, bottom, right):
"""For keeping track of how much of each text column has been seen."""
before = top - 1
after = bottom - 1
for col in range(left, right):
assert self.done[col] == before
self.done[col] = after
def check_parse_complete(self):
"""Each text column should have been completely seen."""
last = self.bottom - 1
for col in range(self.right):
if self.done[col] != last:
return None
return 1
def scan_cell(self, top, left):
"""Starting at the top-left corner, start tracing out a cell."""
assert self.block[top][left] == '+'
result = self.scan_right(top, left)
return result
def scan_right(self, top, left):
"""
Look for the top-right corner of the cell, and make note of all column
boundaries ('+').
"""
colseps = {}
line = self.block[top]
for i in range(left + 1, self.right + 1):
if line[i] == '+':
colseps[i] = [top]
result = self.scan_down(top, left, i)
if result:
bottom, rowseps, newcolseps = result
update_dict_of_lists(colseps, newcolseps)
return bottom, i, rowseps, colseps
elif line[i] != '-':
return None
return None
def scan_down(self, top, left, right):
"""
Look for the bottom-right corner of the cell, making note of all row
boundaries.
"""
rowseps = {}
for i in range(top + 1, self.bottom + 1):
if self.block[i][right] == '+':
rowseps[i] = [right]
result = self.scan_left(top, left, i, right)
if result:
newrowseps, colseps = result
update_dict_of_lists(rowseps, newrowseps)
return i, rowseps, colseps
elif self.block[i][right] != '|':
return None
return None
def scan_left(self, top, left, bottom, right):
"""
Noting column boundaries, look for the bottom-left corner of the cell.
It must line up with the starting point.
"""
colseps = {}
line = self.block[bottom]
for i in range(right - 1, left, -1):
if line[i] == '+':
colseps[i] = [bottom]
elif line[i] != '-':
return None
if line[left] != '+':
return None
result = self.scan_up(top, left, bottom, right)
if result is not None:
rowseps = result
return rowseps, colseps
return None
def scan_up(self, top, left, bottom, right):
"""
Noting row boundaries, see if we can return to the starting point.
"""
rowseps = {}
for i in range(bottom - 1, top, -1):
if self.block[i][left] == '+':
rowseps[i] = [left]
elif self.block[i][left] != '|':
return None
return rowseps
def structure_from_cells(self):
"""
From the data collected by `scan_cell()`, convert to the final data
structure.
"""
rowseps = self.rowseps.keys() # list of row boundaries
rowseps.sort()
rowindex = {}
for i in range(len(rowseps)):
rowindex[rowseps[i]] = i # row boundary -> row number mapping
colseps = self.colseps.keys() # list of column boundaries
colseps.sort()
colindex = {}
for i in range(len(colseps)):
colindex[colseps[i]] = i # column boundary -> col number map
colspecs = [(colseps[i] - colseps[i - 1] - 1)
for i in range(1, len(colseps))] # list of column widths
# prepare an empty table with the correct number of rows & columns
onerow = [None for i in range(len(colseps) - 1)]
rows = [onerow[:] for i in range(len(rowseps) - 1)]
# keep track of # of cells remaining; should reduce to zero
remaining = (len(rowseps) - 1) * (len(colseps) - 1)
for top, left, bottom, right, block in self.cells:
rownum = rowindex[top]
colnum = colindex[left]
assert rows[rownum][colnum] is None, (
'Cell (row %s, column %s) already used.'
% (rownum + 1, colnum + 1))
morerows = rowindex[bottom] - rownum - 1
morecols = colindex[right] - colnum - 1
remaining -= (morerows + 1) * (morecols + 1)
# write the cell into the table
rows[rownum][colnum] = (morerows, morecols, top + 1, block)
assert remaining == 0, 'Unused cells remaining.'
if self.head_body_sep: # separate head rows from body rows
numheadrows = rowindex[self.head_body_sep]
headrows = rows[:numheadrows]
bodyrows = rows[numheadrows:]
else:
headrows = []
bodyrows = rows
return (colspecs, headrows, bodyrows)
class SimpleTableParser(TableParser):
"""
Parse a simple table using `parse()`.
Here's an example of a simple table::
===== =====
col 1 col 2
===== =====
1 Second column of row 1.
2 Second column of row 2.
Second line of paragraph.
3 - Second column of row 3.
- Second item in bullet
list (row 3, column 2).
4 is a span
------------
5
===== =====
Top and bottom borders use '=', column span underlines use '-', column
separation is indicated with spaces.
Passing the above table to the `parse()` method will result in the
following data structure, whose interpretation is the same as for
`GridTableParser`::
([5, 25],
[[(0, 0, 1, ['col 1']),
(0, 0, 1, ['col 2'])]],
[[(0, 0, 3, ['1']),
(0, 0, 3, ['Second column of row 1.'])],
[(0, 0, 4, ['2']),
(0, 0, 4, ['Second column of row 2.',
'Second line of paragraph.'])],
[(0, 0, 6, ['3']),
(0, 0, 6, ['- Second column of row 3.',
'',
'- Second item in bullet',
' list (row 3, column 2).'])],
[(0, 1, 10, ['4 is a span'])],
[(0, 0, 12, ['5']),
(0, 0, 12, [''])]])
"""
head_body_separator_pat = re.compile('=[ =]*$')
span_pat = re.compile('-[ -]*$')
def setup(self, block):
self.block = block[:] # make a copy; it will be modified
self.block.disconnect() # don't propagate changes to parent
# Convert top & bottom borders to column span underlines:
self.block[0] = self.block[0].replace('=', '-')
self.block[-1] = self.block[-1].replace('=', '-')
self.head_body_sep = None
self.columns = []
self.border_end = None
self.table = []
self.done = [-1] * len(block[0])
self.rowseps = {0: [0]}
self.colseps = {0: [0]}
def parse_table(self):
"""
First determine the column boundaries from the top border, then
process rows. Each row may consist of multiple lines; accumulate
lines until a row is complete. Call `self.parse_row` to finish the
job.
"""
# Top border must fully describe all table columns.
self.columns = self.parse_columns(self.block[0], 0)
self.border_end = self.columns[-1][1]
firststart, firstend = self.columns[0]
offset = 1 # skip top border
start = 1
text_found = None
while offset < len(self.block):
line = self.block[offset]
if self.span_pat.match(line):
# Column span underline or border; row is complete.
self.parse_row(self.block[start:offset], start,
(line.rstrip(), offset))
start = offset + 1
text_found = None
elif line[firststart:firstend].strip():
# First column not blank, therefore it's a new row.
if text_found and offset != start:
self.parse_row(self.block[start:offset], start)
start = offset
text_found = 1
elif not text_found:
start = offset + 1
offset += 1
def parse_columns(self, line, offset):
"""
Given a column span underline, return a list of (begin, end) pairs.
"""
cols = []
end = 0
while 1:
begin = line.find('-', end)
end = line.find(' ', begin)
if begin < 0:
break
if end < 0:
end = len(line)
cols.append((begin, end))
if self.columns:
if cols[-1][1] != self.border_end:
raise TableMarkupError('Column span incomplete at line '
'offset %s.' % offset)
# Allow for an unbounded rightmost column:
cols[-1] = (cols[-1][0], self.columns[-1][1])
return cols
def init_row(self, colspec, offset):
i = 0
cells = []
for start, end in colspec:
morecols = 0
try:
assert start == self.columns[i][0]
while end != self.columns[i][1]:
i += 1
morecols += 1
except (AssertionError, IndexError):
raise TableMarkupError('Column span alignment problem at '
'line offset %s.' % (offset + 1))
cells.append([0, morecols, offset, []])
i += 1
return cells
def parse_row(self, lines, start, spanline=None):
"""
Given the text `lines` of a row, parse it and append to `self.table`.
The row is parsed according to the current column spec (either
`spanline` if provided or `self.columns`). For each column, extract
text from each line, and check for text in column margins. Finally,
adjust for insigificant whitespace.
"""
if not (lines or spanline):
# No new row, just blank lines.
return
if spanline:
columns = self.parse_columns(*spanline)
span_offset = spanline[1]
else:
columns = self.columns[:]
span_offset = start
self.check_columns(lines, start, columns)
row = self.init_row(columns, start)
for i in range(len(columns)):
start, end = columns[i]
cellblock = lines.get_2D_block(0, start, len(lines), end)
cellblock.disconnect() # lines in cell can't sync with parent
cellblock.replace(self.double_width_pad_char, '')
row[i][3] = cellblock
self.table.append(row)
def check_columns(self, lines, first_line, columns):
"""
Check for text in column margins and text overflow in the last column.
Raise TableMarkupError if anything but whitespace is in column margins.
Adjust the end value for the last column if there is text overflow.
"""
# "Infinite" value for a dummy last column's beginning, used to
# check for text overflow:
columns.append((sys.maxint, None))
lastcol = len(columns) - 2
for i in range(len(columns) - 1):
start, end = columns[i]
nextstart = columns[i+1][0]
offset = 0
for line in lines:
if i == lastcol and line[end:].strip():
text = line[start:].rstrip()
new_end = start + len(text)
columns[i] = (start, new_end)
main_start, main_end = self.columns[-1]
if new_end > main_end:
self.columns[-1] = (main_start, new_end)
elif line[end:nextstart].strip():
raise TableMarkupError('Text in column margin at line '
'offset %s.' % (first_line + offset))
offset += 1
columns.pop()
def structure_from_cells(self):
colspecs = [end - start for start, end in self.columns]
first_body_row = 0
if self.head_body_sep:
for i in range(len(self.table)):
if self.table[i][0][2] > self.head_body_sep:
first_body_row = i
break
return (colspecs, self.table[:first_body_row],
self.table[first_body_row:])
def update_dict_of_lists(master, newdata):
"""
Extend the list values of `master` with those from `newdata`.
Both parameters must be dictionaries containing list values.
"""
for key, values in newdata.items():
master.setdefault(key, []).extend(values)
| {
"repo_name": "rimbalinux/MSISDNArea",
"path": "docutils/parsers/rst/tableparser.py",
"copies": "2",
"size": "20853",
"license": "bsd-3-clause",
"hash": 2542611895427078000,
"line_mean": 37.72,
"line_max": 80,
"alpha_frac": 0.5030451254,
"autogenerated": false,
"ratio": 4.228959643074427,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5732004768474427,
"avg_score": null,
"num_lines": null
} |
"""
This module defines table parser classes,which parse plaintext-graphic tables
and produce a well-formed data structure suitable for building a CALS table.
:Classes:
- `GridTableParser`: Parse fully-formed tables represented with a grid.
- `SimpleTableParser`: Parse simple tables, delimited by top & bottom
borders.
:Exception class: `TableMarkupError`
:Function:
`update_dict_of_lists()`: Merge two dictionaries containing list values.
"""
__docformat__ = 'reStructuredText'
import re
import sys
from docutils import DataError
from docutils.utils import strip_combining_chars
class TableMarkupError(DataError):
"""
Raise if there is any problem with table markup.
The keyword argument `offset` denotes the offset of the problem
from the table's start line.
"""
def __init__(self, *args, **kwargs):
self.offset = kwargs.pop('offset', 0)
DataError.__init__(self, *args)
class TableParser:
"""
Abstract superclass for the common parts of the syntax-specific parsers.
"""
head_body_separator_pat = None
"""Matches the row separator between head rows and body rows."""
double_width_pad_char = '\x00'
"""Padding character for East Asian double-width text."""
def parse(self, block):
"""
Analyze the text `block` and return a table data structure.
Given a plaintext-graphic table in `block` (list of lines of text; no
whitespace padding), parse the table, construct and return the data
necessary to construct a CALS table or equivalent.
Raise `TableMarkupError` if there is any problem with the markup.
"""
self.setup(block)
self.find_head_body_sep()
self.parse_table()
structure = self.structure_from_cells()
return structure
def find_head_body_sep(self):
"""Look for a head/body row separator line; store the line index."""
for i in range(len(self.block)):
line = self.block[i]
if self.head_body_separator_pat.match(line):
if self.head_body_sep:
raise TableMarkupError(
'Multiple head/body row separators '
'(table lines %s and %s); only one allowed.'
% (self.head_body_sep+1, i+1), offset=i)
else:
self.head_body_sep = i
self.block[i] = line.replace('=', '-')
if self.head_body_sep == 0 or self.head_body_sep == (len(self.block)
- 1):
raise TableMarkupError('The head/body row separator may not be '
'the first or last line of the table.',
offset=i)
class GridTableParser(TableParser):
"""
Parse a grid table using `parse()`.
Here's an example of a grid table::
+------------------------+------------+----------+----------+
| Header row, column 1 | Header 2 | Header 3 | Header 4 |
+========================+============+==========+==========+
| body row 1, column 1 | column 2 | column 3 | column 4 |
+------------------------+------------+----------+----------+
| body row 2 | Cells may span columns. |
+------------------------+------------+---------------------+
| body row 3 | Cells may | - Table cells |
+------------------------+ span rows. | - contain |
| body row 4 | | - body elements. |
+------------------------+------------+---------------------+
Intersections use '+', row separators use '-' (except for one optional
head/body row separator, which uses '='), and column separators use '|'.
Passing the above table to the `parse()` method will result in the
following data structure::
([24, 12, 10, 10],
[[(0, 0, 1, ['Header row, column 1']),
(0, 0, 1, ['Header 2']),
(0, 0, 1, ['Header 3']),
(0, 0, 1, ['Header 4'])]],
[[(0, 0, 3, ['body row 1, column 1']),
(0, 0, 3, ['column 2']),
(0, 0, 3, ['column 3']),
(0, 0, 3, ['column 4'])],
[(0, 0, 5, ['body row 2']),
(0, 2, 5, ['Cells may span columns.']),
None,
None],
[(0, 0, 7, ['body row 3']),
(1, 0, 7, ['Cells may', 'span rows.', '']),
(1, 1, 7, ['- Table cells', '- contain', '- body elements.']),
None],
[(0, 0, 9, ['body row 4']), None, None, None]])
The first item is a list containing column widths (colspecs). The second
item is a list of head rows, and the third is a list of body rows. Each
row contains a list of cells. Each cell is either None (for a cell unused
because of another cell's span), or a tuple. A cell tuple contains four
items: the number of extra rows used by the cell in a vertical span
(morerows); the number of extra columns used by the cell in a horizontal
span (morecols); the line offset of the first line of the cell contents;
and the cell contents, a list of lines of text.
"""
head_body_separator_pat = re.compile(r'\+=[=+]+=\+ *$')
def setup(self, block):
self.block = block[:] # make a copy; it may be modified
self.block.disconnect() # don't propagate changes to parent
self.bottom = len(block) - 1
self.right = len(block[0]) - 1
self.head_body_sep = None
self.done = [-1] * len(block[0])
self.cells = []
self.rowseps = {0: [0]}
self.colseps = {0: [0]}
def parse_table(self):
"""
Start with a queue of upper-left corners, containing the upper-left
corner of the table itself. Trace out one rectangular cell, remember
it, and add its upper-right and lower-left corners to the queue of
potential upper-left corners of further cells. Process the queue in
top-to-bottom order, keeping track of how much of each text column has
been seen.
We'll end up knowing all the row and column boundaries, cell positions
and their dimensions.
"""
corners = [(0, 0)]
while corners:
top, left = corners.pop(0)
if top == self.bottom or left == self.right \
or top <= self.done[left]:
continue
result = self.scan_cell(top, left)
if not result:
continue
bottom, right, rowseps, colseps = result
update_dict_of_lists(self.rowseps, rowseps)
update_dict_of_lists(self.colseps, colseps)
self.mark_done(top, left, bottom, right)
cellblock = self.block.get_2D_block(top + 1, left + 1,
bottom, right)
cellblock.disconnect() # lines in cell can't sync with parent
cellblock.replace(self.double_width_pad_char, '')
self.cells.append((top, left, bottom, right, cellblock))
corners.extend([(top, right), (bottom, left)])
corners.sort()
if not self.check_parse_complete():
raise TableMarkupError('Malformed table; parse incomplete.')
def mark_done(self, top, left, bottom, right):
"""For keeping track of how much of each text column has been seen."""
before = top - 1
after = bottom - 1
for col in range(left, right):
assert self.done[col] == before
self.done[col] = after
def check_parse_complete(self):
"""Each text column should have been completely seen."""
last = self.bottom - 1
for col in range(self.right):
if self.done[col] != last:
return False
return True
def scan_cell(self, top, left):
"""Starting at the top-left corner, start tracing out a cell."""
assert self.block[top][left] == '+'
result = self.scan_right(top, left)
return result
def scan_right(self, top, left):
"""
Look for the top-right corner of the cell, and make note of all column
boundaries ('+').
"""
colseps = {}
line = self.block[top]
for i in range(left + 1, self.right + 1):
if line[i] == '+':
colseps[i] = [top]
result = self.scan_down(top, left, i)
if result:
bottom, rowseps, newcolseps = result
update_dict_of_lists(colseps, newcolseps)
return bottom, i, rowseps, colseps
elif line[i] != '-':
return None
return None
def scan_down(self, top, left, right):
"""
Look for the bottom-right corner of the cell, making note of all row
boundaries.
"""
rowseps = {}
for i in range(top + 1, self.bottom + 1):
if self.block[i][right] == '+':
rowseps[i] = [right]
result = self.scan_left(top, left, i, right)
if result:
newrowseps, colseps = result
update_dict_of_lists(rowseps, newrowseps)
return i, rowseps, colseps
elif self.block[i][right] != '|':
return None
return None
def scan_left(self, top, left, bottom, right):
"""
Noting column boundaries, look for the bottom-left corner of the cell.
It must line up with the starting point.
"""
colseps = {}
line = self.block[bottom]
for i in range(right - 1, left, -1):
if line[i] == '+':
colseps[i] = [bottom]
elif line[i] != '-':
return None
if line[left] != '+':
return None
result = self.scan_up(top, left, bottom, right)
if result is not None:
rowseps = result
return rowseps, colseps
return None
def scan_up(self, top, left, bottom, right):
"""
Noting row boundaries, see if we can return to the starting point.
"""
rowseps = {}
for i in range(bottom - 1, top, -1):
if self.block[i][left] == '+':
rowseps[i] = [left]
elif self.block[i][left] != '|':
return None
return rowseps
def structure_from_cells(self):
"""
From the data collected by `scan_cell()`, convert to the final data
structure.
"""
rowseps = list(self.rowseps.keys()) # list of row boundaries
rowseps.sort()
rowindex = {}
for i in range(len(rowseps)):
rowindex[rowseps[i]] = i # row boundary -> row number mapping
colseps = list(self.colseps.keys()) # list of column boundaries
colseps.sort()
colindex = {}
for i in range(len(colseps)):
colindex[colseps[i]] = i # column boundary -> col number map
colspecs = [(colseps[i] - colseps[i - 1] - 1)
for i in range(1, len(colseps))] # list of column widths
# prepare an empty table with the correct number of rows & columns
onerow = [None for i in range(len(colseps) - 1)]
rows = [onerow[:] for i in range(len(rowseps) - 1)]
# keep track of # of cells remaining; should reduce to zero
remaining = (len(rowseps) - 1) * (len(colseps) - 1)
for top, left, bottom, right, block in self.cells:
rownum = rowindex[top]
colnum = colindex[left]
assert rows[rownum][colnum] is None, (
'Cell (row %s, column %s) already used.'
% (rownum + 1, colnum + 1))
morerows = rowindex[bottom] - rownum - 1
morecols = colindex[right] - colnum - 1
remaining -= (morerows + 1) * (morecols + 1)
# write the cell into the table
rows[rownum][colnum] = (morerows, morecols, top + 1, block)
assert remaining == 0, 'Unused cells remaining.'
if self.head_body_sep: # separate head rows from body rows
numheadrows = rowindex[self.head_body_sep]
headrows = rows[:numheadrows]
bodyrows = rows[numheadrows:]
else:
headrows = []
bodyrows = rows
return (colspecs, headrows, bodyrows)
class SimpleTableParser(TableParser):
"""
Parse a simple table using `parse()`.
Here's an example of a simple table::
===== =====
col 1 col 2
===== =====
1 Second column of row 1.
2 Second column of row 2.
Second line of paragraph.
3 - Second column of row 3.
- Second item in bullet
list (row 3, column 2).
4 is a span
------------
5
===== =====
Top and bottom borders use '=', column span underlines use '-', column
separation is indicated with spaces.
Passing the above table to the `parse()` method will result in the
following data structure, whose interpretation is the same as for
`GridTableParser`::
([5, 25],
[[(0, 0, 1, ['col 1']),
(0, 0, 1, ['col 2'])]],
[[(0, 0, 3, ['1']),
(0, 0, 3, ['Second column of row 1.'])],
[(0, 0, 4, ['2']),
(0, 0, 4, ['Second column of row 2.',
'Second line of paragraph.'])],
[(0, 0, 6, ['3']),
(0, 0, 6, ['- Second column of row 3.',
'',
'- Second item in bullet',
' list (row 3, column 2).'])],
[(0, 1, 10, ['4 is a span'])],
[(0, 0, 12, ['5']),
(0, 0, 12, [''])]])
"""
head_body_separator_pat = re.compile('=[ =]*$')
span_pat = re.compile('-[ -]*$')
def setup(self, block):
self.block = block[:] # make a copy; it will be modified
self.block.disconnect() # don't propagate changes to parent
# Convert top & bottom borders to column span underlines:
self.block[0] = self.block[0].replace('=', '-')
self.block[-1] = self.block[-1].replace('=', '-')
self.head_body_sep = None
self.columns = []
self.border_end = None
self.table = []
self.done = [-1] * len(block[0])
self.rowseps = {0: [0]}
self.colseps = {0: [0]}
def parse_table(self):
"""
First determine the column boundaries from the top border, then
process rows. Each row may consist of multiple lines; accumulate
lines until a row is complete. Call `self.parse_row` to finish the
job.
"""
# Top border must fully describe all table columns.
self.columns = self.parse_columns(self.block[0], 0)
self.border_end = self.columns[-1][1]
firststart, firstend = self.columns[0]
offset = 1 # skip top border
start = 1
text_found = None
while offset < len(self.block):
line = self.block[offset]
if self.span_pat.match(line):
# Column span underline or border; row is complete.
self.parse_row(self.block[start:offset], start,
(line.rstrip(), offset))
start = offset + 1
text_found = None
elif line[firststart:firstend].strip():
# First column not blank, therefore it's a new row.
if text_found and offset != start:
self.parse_row(self.block[start:offset], start)
start = offset
text_found = 1
elif not text_found:
start = offset + 1
offset += 1
def parse_columns(self, line, offset):
"""
Given a column span underline, return a list of (begin, end) pairs.
"""
cols = []
end = 0
while True:
begin = line.find('-', end)
end = line.find(' ', begin)
if begin < 0:
break
if end < 0:
end = len(line)
cols.append((begin, end))
if self.columns:
if cols[-1][1] != self.border_end:
raise TableMarkupError('Column span incomplete in table '
'line %s.' % (offset+1),
offset=offset)
# Allow for an unbounded rightmost column:
cols[-1] = (cols[-1][0], self.columns[-1][1])
return cols
def init_row(self, colspec, offset):
i = 0
cells = []
for start, end in colspec:
morecols = 0
try:
assert start == self.columns[i][0]
while end != self.columns[i][1]:
i += 1
morecols += 1
except (AssertionError, IndexError):
raise TableMarkupError('Column span alignment problem '
'in table line %s.' % (offset+2),
offset=offset+1)
cells.append([0, morecols, offset, []])
i += 1
return cells
def parse_row(self, lines, start, spanline=None):
"""
Given the text `lines` of a row, parse it and append to `self.table`.
The row is parsed according to the current column spec (either
`spanline` if provided or `self.columns`). For each column, extract
text from each line, and check for text in column margins. Finally,
adjust for insignificant whitespace.
"""
if not (lines or spanline):
# No new row, just blank lines.
return
if spanline:
columns = self.parse_columns(*spanline)
span_offset = spanline[1]
else:
columns = self.columns[:]
span_offset = start
self.check_columns(lines, start, columns)
row = self.init_row(columns, start)
for i in range(len(columns)):
start, end = columns[i]
cellblock = lines.get_2D_block(0, start, len(lines), end)
cellblock.disconnect() # lines in cell can't sync with parent
cellblock.replace(self.double_width_pad_char, '')
row[i][3] = cellblock
self.table.append(row)
def check_columns(self, lines, first_line, columns):
"""
Check for text in column margins and text overflow in the last column.
Raise TableMarkupError if anything but whitespace is in column margins.
Adjust the end value for the last column if there is text overflow.
"""
# "Infinite" value for a dummy last column's beginning, used to
# check for text overflow:
columns.append((sys.maxsize, None))
lastcol = len(columns) - 2
# combining characters do not contribute to the column width
lines = [strip_combining_chars(line) for line in lines]
for i in range(len(columns) - 1):
start, end = columns[i]
nextstart = columns[i+1][0]
offset = 0
for line in lines:
if i == lastcol and line[end:].strip():
text = line[start:].rstrip()
new_end = start + len(text)
main_start, main_end = self.columns[-1]
columns[i] = (start, max(main_end, new_end))
if new_end > main_end:
self.columns[-1] = (main_start, new_end)
elif line[end:nextstart].strip():
raise TableMarkupError('Text in column margin '
'in table line %s.' % (first_line+offset+1),
offset=first_line+offset)
offset += 1
columns.pop()
def structure_from_cells(self):
colspecs = [end - start for start, end in self.columns]
first_body_row = 0
if self.head_body_sep:
for i in range(len(self.table)):
if self.table[i][0][2] > self.head_body_sep:
first_body_row = i
break
return (colspecs, self.table[:first_body_row],
self.table[first_body_row:])
def update_dict_of_lists(master, newdata):
"""
Extend the list values of `master` with those from `newdata`.
Both parameters must be dictionaries containing list values.
"""
for key, values in list(newdata.items()):
master.setdefault(key, []).extend(values)
| {
"repo_name": "LEXmono/q",
"path": "docutils/parsers/rst/tableparser.py",
"copies": "4",
"size": "21041",
"license": "apache-2.0",
"hash": 2748175589781108000,
"line_mean": 37.6783088235,
"line_max": 79,
"alpha_frac": 0.5171807424,
"autogenerated": false,
"ratio": 4.147644391878573,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0001648933618243081,
"num_lines": 544
} |
"""Widget class, which places its children in a table grid"""
from Container import Container
from Constants import *
import base
class Table (Container):
"""Table (rows, cols) -> Table
A container widget, which packs its children in a table like manner.
The Table class is a layout container, which packs it children in a
regular, table like manner and allows each widget to be aligned
within its table cell. The table uses a 0-based (Null-based)
indexing, which means, that if 4 rows are created, they can be
accessed using a row value ranging from 0 to 3. The same applies to
the columns.
The Table provides read-only 'columns' and 'rows' attributes, which
are the amount of columns and rows within that Table.
totalr = table.rows
totalc = table.columns
To access the children of the Table the 'grid' attribute can be
used. It is a dictionary containing the widgets as values. To access
a widget, a tuple containing the row and column is used as the
dictionary key.
widget = table.grid[(0, 3)]
widget = table.grid[(7, 0)]
The above examples will get the widget located at the first row,
fourth column (0, 3) and the eighth row, first column (7, 0).
The layout for each widget within the table can be set individually
using the set_align() method. Alignments can be combined, which
means, that a ALIGN_TOP | ALIGN_LEFT would align the widget at the
topleft corner of its cell.
However, not every alignment make sense, so a ALIGN_TOP | ALIGN_BOTTOM
would cause the widget to be placed at the top. The priority
order for the alignment follows. The lower the value, the higher the
priority.
Alignment Priority
-----------------------
ALIGN_TOP 0
ALIGN_BOTTOM 1
ALIGN_LEFT 0
ALIGN_RIGHT 1
ALIGN_NONE 2
Default action (invoked by activate()):
None
Mnemonic action (invoked by activate_mnemonic()):
None
Attributes:
columns - The column amount of the Table.
rows - The row amount of the Table.
grid - Grid to hold the children of the Table.
"""
def __init__ (self, rows, cols):
Container.__init__ (self)
if (type (rows) != int) or (type (cols) != int):
raise TypeError ("Arguments must be positive integers")
if (rows <= 0) or (cols <= 0):
raise ValueError ("Arguments must be positive integers")
self._cols = cols
self._rows = rows
# The grid for the children.
self._grid = {}
for i in xrange (self._rows):
for j in xrange (self._cols):
self._grid[(i, j)] = None # None means unused, !None is used.
# Grid for the layout.
self._layout = {}
for i in xrange (self._rows):
for j in xrange (self._cols):
self._layout[(i, j)] = ALIGN_NONE
# Width and height grids.
self._colwidth = {}
self._rowheight = {}
for i in xrange (self._cols):
self._colwidth[i] = 0
for i in xrange (self._rows):
self._rowheight[i] = 0
self.dirty = True # Enforce creation of the internals.
def add_child (self, row, col, widget):
"""T.add_child (...) -> None
Adds a widget into the cell located at (row, col) of the Table.
Raises a ValueError, if the passed row and col arguments are not
within the cell range of the Table.
Raises an Exception, if the cell at the passed row and col
coordinates is already occupied.
"""
if (row, col) not in self.grid:
raise ValueError ("Cell (%d, %d) out of range" % (row, col))
if self.grid[(row, col)] != None:
raise Exception ("Cell (%d, %d) already occupied" % (row, col))
self.grid[(row, col)] = widget
Container.add_child (self, widget)
def remove_child (self, widget):
"""T.remove_widget (...) -> None
Removes a widget from the Table.
"""
Container.remove_child (self, widget)
for i in xrange (self._rows):
for j in xrange (self._cols):
if self.grid[(i, j)] == widget:
self.grid[(i, j)] = None
def set_children (self, children):
"""T.set_children (...) -> None
Sets the children of the Table.
When setting the children of the Table, keep in mind, that the
children will be added row for row, causing the Table to fill
the first row of itself, then the second and so on.
Raises a ValueError, if the passed amount of children exceeds
the cell amount of the Table.
"""
if children != None:
if len (children) > (self.columns * self.rows):
raise ValueError ("children exceed the Table size.")
# Remove all children first.
for i in xrange (self._rows):
for j in xrange (self._cols):
self.grid[(i, j)] = None
Container.set_children (self, children)
if children == None:
return
cells = len (children)
for i in xrange (self._rows):
for j in xrange (self._cols):
self.grid[(i, j)] = children[-cells]
cells -= 1
if cells == 0:
return
def insert_child (self, pos, *children):
"""C.insert_child (...) -> None
Inserts one or more children at the desired position.
Raises a NotImplementedError, as this method cannot be applied
to the Table.
"""
raise NotImplementedError
def set_focus (self, focus=True):
"""T.set_focus (focus=True) -> None
Overrides the set_focus() behaviour for the Table.
The Table class is not focusable by default. It is a layout
class for other widgets, so it does not need to get the input
focus and thus it will return false without doing anything.
"""
return False
def set_align (self, row, col, align=ALIGN_NONE):
"""T.set_align (...) -> None
Sets the alignment for a specific cell.
Raises a ValueError, if the passed row and col arguments are not
within the rows and columns of the Table.
Raises a TypeError, if the passed align argument is not a value
from ALIGN_TYPES.
"""
if (row, col) not in self._layout:
raise ValueError ("Cell (%d, %d) out of range" % (row, col))
if not constants_is_align (align):
raise TypeError ("align must be a value from ALIGN_TYPES")
self._layout[(row, col)] = align
self.dirty = True
def set_column_align (self, col, align=ALIGN_NONE):
"""T.set_column_align (...) -> None
Sets the alignment for a whole column range.
Raises a ValueError, if the passed col argument is not within
the column range of the Table.
Raises a TypeError, if the passed align argument is not a value from
ALIGN_TYPES.
"""
if (0, col) not in self._layout:
raise ValueError ("Column %d out of range" % col)
if not constants_is_align (align):
raise TypeError ("align must be a value from ALIGN_TYPES")
for i in xrange (self.rows):
self._layout[(i, col)] = align
self.dirty = True
def set_row_align (self, row, align=ALIGN_NONE):
"""T.set_row_align (...) -> None
Sets the alignment for a whole row.
Raises a ValueError, if the passed row argument is not within
the row range of the Table.
Raises a TypeError, if the passed align argument is not a value
from ALIGN_TYPES.
"""
if (row, 0) not in self._layout:
raise ValueError ("Row %d out of range" % row)
if not constants_is_align (align):
raise TypeError ("align must be a value from ALIGN_TYPES")
for i in xrange (self.columns):
self._layout[(row, i)] = align
self.dirty = True
def destroy (self):
"""T.destroy () -> None
Destroys the Table and all its children and shedules them for
deletion by the renderer.
"""
Container.destroy (self)
del self._grid
del self._layout
del self._colwidth
del self._rowheight
def calculate_size (self):
"""T.calculate_size () -> int, int
Calculates the size needed by the children.
Calculates the size needed by the children and returns the
resulting width and height.
"""
for i in xrange (self._cols):
self._colwidth[i] = 0
for i in xrange (self._rows):
self._rowheight[i] = 0
spacing = self.spacing
# Fill the width and height grids with correct values.
for row in xrange (self._rows):
actheight = 0
for col in xrange (self._cols):
widget = self.grid[(row, col)]
if not widget: # No child here.
continue
cw = widget.width + spacing
ch = widget.height + spacing
if self._colwidth[col] < cw:
self._colwidth[col] = cw
if actheight < ch:
actheight = ch
if self._rowheight[row] < actheight:
self._rowheight[row] = actheight
height = reduce (lambda x, y: x + y, self._rowheight.values (), 0)
height += 2 * self.padding - spacing
width = reduce (lambda x, y: x + y, self._colwidth.values (), 0)
width += 2 * self.padding - spacing
return max (width, 0), max (height, 0)
def dispose_widgets (self):
"""T.dispose_widgets (...) -> None
Sets the children to their correct positions within the Table.
"""
# Move all widgets to their correct position.
spacing = self.spacing
padding = self.padding
x = padding
y = padding
for row in xrange (self._rows):
for col in xrange (self._cols):
widget = self.grid[(row, col)]
if not widget: # no child here
x += self._colwidth[col]
continue
# Dependant on the cell layout, move the widget to the
# desired position.
align = self._layout[(row, col)]
# Default align is centered.
posx = x + (self._colwidth[col] - widget.width - spacing) / 2
posy = y + (self._rowheight[row] - widget.height - spacing) / 2
if align & ALIGN_LEFT == ALIGN_LEFT:
posx = x
elif align & ALIGN_RIGHT == ALIGN_RIGHT:
posx = x + self._colwidth[col] - widget.width - spacing
if align & ALIGN_TOP == ALIGN_TOP:
posy = y
elif align & ALIGN_BOTTOM == ALIGN_BOTTOM:
posy = y + self._rowheight[row] - widget.height - spacing
widget.topleft = (posx, posy)
x += self._colwidth[col]
y += self._rowheight[row]
x = padding
def draw_bg (self):
"""T.draw_bg () -> Surface
Draws the background surface of the Table and returns it.
Creates the visible surface of the Table and returns it to the
caller.
"""
return base.GlobalStyle.engine.draw_table (self)
def draw (self):
"""T.draw () -> None
Draws the Table surface and places its children on it.
"""
Container.draw (self)
# Draw all children.
self.dispose_widgets ()
blit = self.image.blit
for widget in self.children:
blit (widget.image, widget.rect)
columns = property (lambda self: self._cols,
doc = "The column amount of the Table.")
rows = property (lambda self: self._rows,
doc = "The row amount of the Table.")
grid = property (lambda self: self._grid, doc = "The grid of the Table.")
| {
"repo_name": "prim/ocempgui",
"path": "ocempgui/widgets/Table.py",
"copies": "1",
"size": "13705",
"license": "bsd-2-clause",
"hash": -8869222119175977000,
"line_mean": 35.5466666667,
"line_max": 79,
"alpha_frac": 0.5842393287,
"autogenerated": false,
"ratio": 4.330173775671406,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5414413104371405,
"avg_score": null,
"num_lines": null
} |
"""
Directives for table elements.
"""
__docformat__ = 'reStructuredText'
import sys
import os.path
import csv
from docutils import io, nodes, statemachine, utils
from docutils.utils import SystemMessagePropagation
from docutils.parsers.rst import Directive
from docutils.parsers.rst import directives
class Table(Directive):
"""
Generic table base class.
"""
required_arguments = 0
optional_arguments = 1
final_argument_whitespace = True
option_spec = {'class': directives.class_option}
has_content = True
def make_title(self):
if self.arguments:
title_text = self.arguments[0]
text_nodes, messages = self.state.inline_text(title_text,
self.lineno)
title = nodes.title(title_text, '', *text_nodes)
else:
title = None
messages = []
return title, messages
def process_header_option(self):
source = self.state_machine.get_source(self.lineno - 1)
table_head = []
max_header_cols = 0
if 'header' in self.options: # separate table header in option
rows, max_header_cols = self.parse_csv_data_into_rows(
self.options['header'].split('\n'), self.HeaderDialect(),
source)
table_head.extend(rows)
return table_head, max_header_cols
def check_table_dimensions(self, rows, header_rows, stub_columns):
if len(rows) < header_rows:
error = self.state_machine.reporter.error(
'%s header row(s) specified but only %s row(s) of data '
'supplied ("%s" directive).'
% (header_rows, len(rows), self.name), nodes.literal_block(
self.block_text, self.block_text), line=self.lineno)
raise SystemMessagePropagation(error)
if len(rows) == header_rows > 0:
error = self.state_machine.reporter.error(
'Insufficient data supplied (%s row(s)); no data remaining '
'for table body, required by "%s" directive.'
% (len(rows), self.name), nodes.literal_block(
self.block_text, self.block_text), line=self.lineno)
raise SystemMessagePropagation(error)
for row in rows:
if len(row) < stub_columns:
error = self.state_machine.reporter.error(
'%s stub column(s) specified but only %s columns(s) of '
'data supplied ("%s" directive).' %
(stub_columns, len(row), self.name), nodes.literal_block(
self.block_text, self.block_text), line=self.lineno)
raise SystemMessagePropagation(error)
if len(row) == stub_columns > 0:
error = self.state_machine.reporter.error(
'Insufficient data supplied (%s columns(s)); no data remaining '
'for table body, required by "%s" directive.'
% (len(row), self.name), nodes.literal_block(
self.block_text, self.block_text), line=self.lineno)
raise SystemMessagePropagation(error)
def get_column_widths(self, max_cols):
if 'widths' in self.options:
col_widths = self.options['widths']
if len(col_widths) != max_cols:
error = self.state_machine.reporter.error(
'"%s" widths do not match the number of columns in table '
'(%s).' % (self.name, max_cols), nodes.literal_block(
self.block_text, self.block_text), line=self.lineno)
raise SystemMessagePropagation(error)
elif max_cols:
col_widths = [100 // max_cols] * max_cols
else:
error = self.state_machine.reporter.error(
'No table data detected in CSV file.', nodes.literal_block(
self.block_text, self.block_text), line=self.lineno)
raise SystemMessagePropagation(error)
return col_widths
def extend_short_rows_with_empty_cells(self, columns, parts):
for part in parts:
for row in part:
if len(row) < columns:
row.extend([(0, 0, 0, [])] * (columns - len(row)))
class RSTTable(Table):
def run(self):
if not self.content:
warning = self.state_machine.reporter.warning(
'Content block expected for the "%s" directive; none found.'
% self.name, nodes.literal_block(
self.block_text, self.block_text), line=self.lineno)
return [warning]
title, messages = self.make_title()
node = nodes.Element() # anonymous container for parsing
self.state.nested_parse(self.content, self.content_offset, node)
if len(node) != 1 or not isinstance(node[0], nodes.table):
error = self.state_machine.reporter.error(
'Error parsing content block for the "%s" directive: exactly '
'one table expected.' % self.name, nodes.literal_block(
self.block_text, self.block_text), line=self.lineno)
return [error]
table_node = node[0]
table_node['classes'] += self.options.get('class', [])
if title:
table_node.insert(0, title)
return [table_node] + messages
class CSVTable(Table):
option_spec = {'header-rows': directives.nonnegative_int,
'stub-columns': directives.nonnegative_int,
'header': directives.unchanged,
'widths': directives.positive_int_list,
'file': directives.path,
'url': directives.uri,
'encoding': directives.encoding,
'class': directives.class_option,
# field delimiter char
'delim': directives.single_char_or_whitespace_or_unicode,
# treat whitespace after delimiter as significant
'keepspace': directives.flag,
# text field quote/unquote char:
'quote': directives.single_char_or_unicode,
# char used to escape delim & quote as-needed:
'escape': directives.single_char_or_unicode,}
class DocutilsDialect(csv.Dialect):
"""CSV dialect for `csv_table` directive."""
delimiter = ','
quotechar = '"'
doublequote = True
skipinitialspace = True
lineterminator = '\n'
quoting = csv.QUOTE_MINIMAL
def __init__(self, options):
if 'delim' in options:
self.delimiter = str(options['delim'])
if 'keepspace' in options:
self.skipinitialspace = False
if 'quote' in options:
self.quotechar = str(options['quote'])
if 'escape' in options:
self.doublequote = False
self.escapechar = str(options['escape'])
csv.Dialect.__init__(self)
class HeaderDialect(csv.Dialect):
"""CSV dialect to use for the "header" option data."""
delimiter = ','
quotechar = '"'
escapechar = '\\'
doublequote = False
skipinitialspace = True
lineterminator = '\n'
quoting = csv.QUOTE_MINIMAL
def check_requirements(self):
pass
def run(self):
try:
if (not self.state.document.settings.file_insertion_enabled
and ('file' in self.options
or 'url' in self.options)):
warning = self.state_machine.reporter.warning(
'File and URL access deactivated; ignoring "%s" '
'directive.' % self.name, nodes.literal_block(
self.block_text, self.block_text), line=self.lineno)
return [warning]
self.check_requirements()
title, messages = self.make_title()
csv_data, source = self.get_csv_data()
table_head, max_header_cols = self.process_header_option()
rows, max_cols = self.parse_csv_data_into_rows(
csv_data, self.DocutilsDialect(self.options), source)
max_cols = max(max_cols, max_header_cols)
header_rows = self.options.get('header-rows', 0)
stub_columns = self.options.get('stub-columns', 0)
self.check_table_dimensions(rows, header_rows, stub_columns)
table_head.extend(rows[:header_rows])
table_body = rows[header_rows:]
col_widths = self.get_column_widths(max_cols)
self.extend_short_rows_with_empty_cells(max_cols,
(table_head, table_body))
except SystemMessagePropagation, detail:
return [detail.args[0]]
except csv.Error, detail:
error = self.state_machine.reporter.error(
'Error with CSV data in "%s" directive:\n%s'
% (self.name, detail), nodes.literal_block(
self.block_text, self.block_text), line=self.lineno)
return [error]
table = (col_widths, table_head, table_body)
table_node = self.state.build_table(table, self.content_offset,
stub_columns)
table_node['classes'] += self.options.get('class', [])
if title:
table_node.insert(0, title)
return [table_node] + messages
def get_csv_data(self):
"""
Get CSV data from the directive content, from an external
file, or from a URL reference.
"""
encoding = self.options.get(
'encoding', self.state.document.settings.input_encoding)
if self.content:
# CSV data is from directive content.
if 'file' in self.options or 'url' in self.options:
error = self.state_machine.reporter.error(
'"%s" directive may not both specify an external file and'
' have content.' % self.name, nodes.literal_block(
self.block_text, self.block_text), line=self.lineno)
raise SystemMessagePropagation(error)
source = self.content.source(0)
csv_data = self.content
elif 'file' in self.options:
# CSV data is from an external file.
if 'url' in self.options:
error = self.state_machine.reporter.error(
'The "file" and "url" options may not be simultaneously'
' specified for the "%s" directive.' % self.name,
nodes.literal_block(self.block_text, self.block_text),
line=self.lineno)
raise SystemMessagePropagation(error)
source_dir = os.path.dirname(
os.path.abspath(self.state.document.current_source))
source = os.path.normpath(os.path.join(source_dir,
self.options['file']))
source = utils.relative_path(None, source)
try:
self.state.document.settings.record_dependencies.add(source)
csv_file = io.FileInput(
source_path=source, encoding=encoding,
error_handler=(self.state.document.settings.\
input_encoding_error_handler),
handle_io_errors=None)
csv_data = csv_file.read().splitlines()
except IOError, error:
severe = self.state_machine.reporter.severe(
'Problems with "%s" directive path:\n%s.'
% (self.name, error), nodes.literal_block(
self.block_text, self.block_text), line=self.lineno)
raise SystemMessagePropagation(severe)
elif 'url' in self.options:
# CSV data is from a URL.
# Do not import urllib2 at the top of the module because
# it may fail due to broken SSL dependencies, and it takes
# about 0.15 seconds to load.
import urllib2
source = self.options['url']
try:
csv_text = urllib2.urlopen(source).read()
except (urllib2.URLError, IOError, OSError, ValueError), error:
severe = self.state_machine.reporter.severe(
'Problems with "%s" directive URL "%s":\n%s.'
% (self.name, self.options['url'], error),
nodes.literal_block(self.block_text, self.block_text),
line=self.lineno)
raise SystemMessagePropagation(severe)
csv_file = io.StringInput(
source=csv_text, source_path=source, encoding=encoding,
error_handler=(self.state.document.settings.\
input_encoding_error_handler))
csv_data = csv_file.read().splitlines()
else:
error = self.state_machine.reporter.warning(
'The "%s" directive requires content; none supplied.'
% self.name, nodes.literal_block(
self.block_text, self.block_text), line=self.lineno)
raise SystemMessagePropagation(error)
return csv_data, source
if sys.version_info < (3,):
# 2.x csv module doesn't do Unicode
@staticmethod
def decode_from_csv(s):
return s.decode('utf-8')
@staticmethod
def encode_for_csv(s):
return s.encode('utf-8')
else:
@staticmethod
def decode_from_csv(s):
return s
@staticmethod
def encode_for_csv(s):
return s
def parse_csv_data_into_rows(self, csv_data, dialect, source):
# csv.py doesn't do Unicode; encode temporarily as UTF-8
csv_reader = csv.reader([self.encode_for_csv(line + '\n')
for line in csv_data],
dialect=dialect)
rows = []
max_cols = 0
for row in csv_reader:
row_data = []
for cell in row:
# decode UTF-8 back to Unicode
cell_text = self.decode_from_csv(cell)
cell_data = (0, 0, 0, statemachine.StringList(
cell_text.splitlines(), source=source))
row_data.append(cell_data)
rows.append(row_data)
max_cols = max(max_cols, len(row))
return rows, max_cols
class ListTable(Table):
"""
Implement tables whose data is encoded as a uniform two-level bullet list.
For further ideas, see
http://docutils.sf.net/docs/dev/rst/alternatives.html#list-driven-tables
"""
option_spec = {'header-rows': directives.nonnegative_int,
'stub-columns': directives.nonnegative_int,
'widths': directives.positive_int_list,
'class': directives.class_option}
def run(self):
if not self.content:
error = self.state_machine.reporter.error(
'The "%s" directive is empty; content required.' % self.name,
nodes.literal_block(self.block_text, self.block_text),
line=self.lineno)
return [error]
title, messages = self.make_title()
node = nodes.Element() # anonymous container for parsing
self.state.nested_parse(self.content, self.content_offset, node)
try:
num_cols, col_widths = self.check_list_content(node)
table_data = [[item.children for item in row_list[0]]
for row_list in node[0]]
header_rows = self.options.get('header-rows', 0)
stub_columns = self.options.get('stub-columns', 0)
self.check_table_dimensions(table_data, header_rows, stub_columns)
except SystemMessagePropagation, detail:
return [detail.args[0]]
table_node = self.build_table_from_list(table_data, col_widths,
header_rows, stub_columns)
table_node['classes'] += self.options.get('class', [])
if title:
table_node.insert(0, title)
return [table_node] + messages
def check_list_content(self, node):
if len(node) != 1 or not isinstance(node[0], nodes.bullet_list):
error = self.state_machine.reporter.error(
'Error parsing content block for the "%s" directive: '
'exactly one bullet list expected.' % self.name,
nodes.literal_block(self.block_text, self.block_text),
line=self.lineno)
raise SystemMessagePropagation(error)
list_node = node[0]
# Check for a uniform two-level bullet list:
for item_index in range(len(list_node)):
item = list_node[item_index]
if len(item) != 1 or not isinstance(item[0], nodes.bullet_list):
error = self.state_machine.reporter.error(
'Error parsing content block for the "%s" directive: '
'two-level bullet list expected, but row %s does not '
'contain a second-level bullet list.'
% (self.name, item_index + 1), nodes.literal_block(
self.block_text, self.block_text), line=self.lineno)
raise SystemMessagePropagation(error)
elif item_index:
# ATTN pychecker users: num_cols is guaranteed to be set in the
# "else" clause below for item_index==0, before this branch is
# triggered.
if len(item[0]) != num_cols:
error = self.state_machine.reporter.error(
'Error parsing content block for the "%s" directive: '
'uniform two-level bullet list expected, but row %s '
'does not contain the same number of items as row 1 '
'(%s vs %s).'
% (self.name, item_index + 1, len(item[0]), num_cols),
nodes.literal_block(self.block_text, self.block_text),
line=self.lineno)
raise SystemMessagePropagation(error)
else:
num_cols = len(item[0])
col_widths = self.get_column_widths(num_cols)
return num_cols, col_widths
def build_table_from_list(self, table_data, col_widths, header_rows, stub_columns):
table = nodes.table()
tgroup = nodes.tgroup(cols=len(col_widths))
table += tgroup
for col_width in col_widths:
colspec = nodes.colspec(colwidth=col_width)
if stub_columns:
colspec.attributes['stub'] = 1
stub_columns -= 1
tgroup += colspec
rows = []
for row in table_data:
row_node = nodes.row()
for cell in row:
entry = nodes.entry()
entry += cell
row_node += entry
rows.append(row_node)
if header_rows:
thead = nodes.thead()
thead.extend(rows[:header_rows])
tgroup += thead
tbody = nodes.tbody()
tbody.extend(rows[header_rows:])
tgroup += tbody
return table
| {
"repo_name": "spreeker/democracygame",
"path": "external_apps/docutils-snapshot/build/lib/docutils/parsers/rst/directives/tables.py",
"copies": "2",
"size": "19722",
"license": "bsd-3-clause",
"hash": -2562131729814650400,
"line_mean": 42.9242761693,
"line_max": 87,
"alpha_frac": 0.5469019369,
"autogenerated": false,
"ratio": 4.293925538863488,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5840827475763488,
"avg_score": null,
"num_lines": null
} |
"""
Directives for table elements.
"""
__docformat__ = 'reStructuredText'
import csv
import os.path
import sys
from docutils import io, nodes, statemachine, utils
from docutils.parsers.rst import Directive
from docutils.parsers.rst import directives
from docutils.utils import SystemMessagePropagation
class Table(Directive):
"""
Generic table base class.
"""
required_arguments = 0
optional_arguments = 1
final_argument_whitespace = True
option_spec = {'class': directives.class_option}
has_content = True
def make_title(self):
if self.arguments:
title_text = self.arguments[0]
text_nodes, messages = self.state.inline_text(title_text,
self.lineno)
title = nodes.title(title_text, '', *text_nodes)
else:
title = None
messages = []
return title, messages
def process_header_option(self):
source = self.state_machine.get_source(self.lineno - 1)
table_head = []
max_header_cols = 0
if 'header' in self.options: # separate table header in option
rows, max_header_cols = self.parse_csv_data_into_rows(
self.options['header'].split('\n'), self.HeaderDialect(),
source)
table_head.extend(rows)
return table_head, max_header_cols
def check_table_dimensions(self, rows, header_rows, stub_columns):
if len(rows) < header_rows:
error = self.state_machine.reporter.error(
'%s header row(s) specified but only %s row(s) of data '
'supplied ("%s" directive).'
% (header_rows, len(rows), self.name), nodes.literal_block(
self.block_text, self.block_text), line=self.lineno)
raise SystemMessagePropagation(error)
if len(rows) == header_rows > 0:
error = self.state_machine.reporter.error(
'Insufficient data supplied (%s row(s)); no data remaining '
'for table body, required by "%s" directive.'
% (len(rows), self.name), nodes.literal_block(
self.block_text, self.block_text), line=self.lineno)
raise SystemMessagePropagation(error)
for row in rows:
if len(row) < stub_columns:
error = self.state_machine.reporter.error(
'%s stub column(s) specified but only %s columns(s) of '
'data supplied ("%s" directive).' %
(stub_columns, len(row), self.name), nodes.literal_block(
self.block_text, self.block_text), line=self.lineno)
raise SystemMessagePropagation(error)
if len(row) == stub_columns > 0:
error = self.state_machine.reporter.error(
'Insufficient data supplied (%s columns(s)); no data remaining '
'for table body, required by "%s" directive.'
% (len(row), self.name), nodes.literal_block(
self.block_text, self.block_text), line=self.lineno)
raise SystemMessagePropagation(error)
def get_column_widths(self, max_cols):
if 'widths' in self.options:
col_widths = self.options['widths']
if len(col_widths) != max_cols:
error = self.state_machine.reporter.error(
'"%s" widths do not match the number of columns in table '
'(%s).' % (self.name, max_cols), nodes.literal_block(
self.block_text, self.block_text), line=self.lineno)
raise SystemMessagePropagation(error)
elif max_cols:
col_widths = [100 // max_cols] * max_cols
else:
error = self.state_machine.reporter.error(
'No table data detected in CSV file.', nodes.literal_block(
self.block_text, self.block_text), line=self.lineno)
raise SystemMessagePropagation(error)
return col_widths
def extend_short_rows_with_empty_cells(self, columns, parts):
for part in parts:
for row in part:
if len(row) < columns:
row.extend([(0, 0, 0, [])] * (columns - len(row)))
class RSTTable(Table):
def run(self):
if not self.content:
warning = self.state_machine.reporter.warning(
'Content block expected for the "%s" directive; none found.'
% self.name, nodes.literal_block(
self.block_text, self.block_text), line=self.lineno)
return [warning]
title, messages = self.make_title()
node = nodes.Element() # anonymous container for parsing
self.state.nested_parse(self.content, self.content_offset, node)
if len(node) != 1 or not isinstance(node[0], nodes.table):
error = self.state_machine.reporter.error(
'Error parsing content block for the "%s" directive: exactly '
'one table expected.' % self.name, nodes.literal_block(
self.block_text, self.block_text), line=self.lineno)
return [error]
table_node = node[0]
table_node['classes'] += self.options.get('class', [])
if title:
table_node.insert(0, title)
return [table_node] + messages
class CSVTable(Table):
option_spec = {'header-rows': directives.nonnegative_int,
'stub-columns': directives.nonnegative_int,
'header': directives.unchanged,
'widths': directives.positive_int_list,
'file': directives.path,
'url': directives.uri,
'encoding': directives.encoding,
'class': directives.class_option,
# field delimiter char
'delim': directives.single_char_or_whitespace_or_unicode,
# treat whitespace after delimiter as significant
'keepspace': directives.flag,
# text field quote/unquote char:
'quote': directives.single_char_or_unicode,
# char used to escape delim & quote as-needed:
'escape': directives.single_char_or_unicode,}
class DocutilsDialect(csv.Dialect):
"""CSV dialect for `csv_table` directive."""
delimiter = ','
quotechar = '"'
doublequote = True
skipinitialspace = True
lineterminator = '\n'
quoting = csv.QUOTE_MINIMAL
def __init__(self, options):
if 'delim' in options:
self.delimiter = str(options['delim'])
if 'keepspace' in options:
self.skipinitialspace = False
if 'quote' in options:
self.quotechar = str(options['quote'])
if 'escape' in options:
self.doublequote = False
self.escapechar = str(options['escape'])
csv.Dialect.__init__(self)
class HeaderDialect(csv.Dialect):
"""CSV dialect to use for the "header" option data."""
delimiter = ','
quotechar = '"'
escapechar = '\\'
doublequote = False
skipinitialspace = True
lineterminator = '\n'
quoting = csv.QUOTE_MINIMAL
def check_requirements(self):
pass
def run(self):
try:
if (not self.state.document.settings.file_insertion_enabled
and ('file' in self.options
or 'url' in self.options)):
warning = self.state_machine.reporter.warning(
'File and URL access deactivated; ignoring "%s" '
'directive.' % self.name, nodes.literal_block(
self.block_text, self.block_text), line=self.lineno)
return [warning]
self.check_requirements()
title, messages = self.make_title()
csv_data, source = self.get_csv_data()
table_head, max_header_cols = self.process_header_option()
rows, max_cols = self.parse_csv_data_into_rows(
csv_data, self.DocutilsDialect(self.options), source)
max_cols = max(max_cols, max_header_cols)
header_rows = self.options.get('header-rows', 0)
stub_columns = self.options.get('stub-columns', 0)
self.check_table_dimensions(rows, header_rows, stub_columns)
table_head.extend(rows[:header_rows])
table_body = rows[header_rows:]
col_widths = self.get_column_widths(max_cols)
self.extend_short_rows_with_empty_cells(max_cols,
(table_head, table_body))
except SystemMessagePropagation, detail:
return [detail.args[0]]
except csv.Error, detail:
error = self.state_machine.reporter.error(
'Error with CSV data in "%s" directive:\n%s'
% (self.name, detail), nodes.literal_block(
self.block_text, self.block_text), line=self.lineno)
return [error]
table = (col_widths, table_head, table_body)
table_node = self.state.build_table(table, self.content_offset,
stub_columns)
table_node['classes'] += self.options.get('class', [])
if title:
table_node.insert(0, title)
return [table_node] + messages
def get_csv_data(self):
"""
Get CSV data from the directive content, from an external
file, or from a URL reference.
"""
encoding = self.options.get(
'encoding', self.state.document.settings.input_encoding)
if self.content:
# CSV data is from directive content.
if 'file' in self.options or 'url' in self.options:
error = self.state_machine.reporter.error(
'"%s" directive may not both specify an external file and'
' have content.' % self.name, nodes.literal_block(
self.block_text, self.block_text), line=self.lineno)
raise SystemMessagePropagation(error)
source = self.content.source(0)
csv_data = self.content
elif 'file' in self.options:
# CSV data is from an external file.
if 'url' in self.options:
error = self.state_machine.reporter.error(
'The "file" and "url" options may not be simultaneously'
' specified for the "%s" directive.' % self.name,
nodes.literal_block(self.block_text, self.block_text),
line=self.lineno)
raise SystemMessagePropagation(error)
source_dir = os.path.dirname(
os.path.abspath(self.state.document.current_source))
source = os.path.normpath(os.path.join(source_dir,
self.options['file']))
source = utils.relative_path(None, source)
try:
self.state.document.settings.record_dependencies.add(source)
csv_file = io.FileInput(
source_path=source, encoding=encoding,
error_handler=(self.state.document.settings.\
input_encoding_error_handler),
handle_io_errors=None)
csv_data = csv_file.read().splitlines()
except IOError, error:
severe = self.state_machine.reporter.severe(
'Problems with "%s" directive path:\n%s.'
% (self.name, error), nodes.literal_block(
self.block_text, self.block_text), line=self.lineno)
raise SystemMessagePropagation(severe)
elif 'url' in self.options:
# CSV data is from a URL.
# Do not import urllib2 at the top of the module because
# it may fail due to broken SSL dependencies, and it takes
# about 0.15 seconds to load.
import urllib2
source = self.options['url']
try:
csv_text = urllib2.urlopen(source).read()
except (urllib2.URLError, IOError, OSError, ValueError), error:
severe = self.state_machine.reporter.severe(
'Problems with "%s" directive URL "%s":\n%s.'
% (self.name, self.options['url'], error),
nodes.literal_block(self.block_text, self.block_text),
line=self.lineno)
raise SystemMessagePropagation(severe)
csv_file = io.StringInput(
source=csv_text, source_path=source, encoding=encoding,
error_handler=(self.state.document.settings.\
input_encoding_error_handler))
csv_data = csv_file.read().splitlines()
else:
error = self.state_machine.reporter.warning(
'The "%s" directive requires content; none supplied.'
% self.name, nodes.literal_block(
self.block_text, self.block_text), line=self.lineno)
raise SystemMessagePropagation(error)
return csv_data, source
if sys.version_info < (3,):
# 2.x csv module doesn't do Unicode
def decode_from_csv(s):
return s.decode('utf-8')
def encode_for_csv(s):
return s.encode('utf-8')
else:
def decode_from_csv(s):
return s
def encode_for_csv(s):
return s
decode_from_csv = staticmethod(decode_from_csv)
encode_for_csv = staticmethod(encode_for_csv)
def parse_csv_data_into_rows(self, csv_data, dialect, source):
# csv.py doesn't do Unicode; encode temporarily as UTF-8
csv_reader = csv.reader([self.encode_for_csv(line + '\n')
for line in csv_data],
dialect=dialect)
rows = []
max_cols = 0
for row in csv_reader:
row_data = []
for cell in row:
# decode UTF-8 back to Unicode
cell_text = self.decode_from_csv(cell)
cell_data = (0, 0, 0, statemachine.StringList(
cell_text.splitlines(), source=source))
row_data.append(cell_data)
rows.append(row_data)
max_cols = max(max_cols, len(row))
return rows, max_cols
class ListTable(Table):
"""
Implement tables whose data is encoded as a uniform two-level bullet list.
For further ideas, see
http://docutils.sf.net/docs/dev/rst/alternatives.html#list-driven-tables
"""
option_spec = {'header-rows': directives.nonnegative_int,
'stub-columns': directives.nonnegative_int,
'widths': directives.positive_int_list,
'class': directives.class_option}
def run(self):
if not self.content:
error = self.state_machine.reporter.error(
'The "%s" directive is empty; content required.' % self.name,
nodes.literal_block(self.block_text, self.block_text),
line=self.lineno)
return [error]
title, messages = self.make_title()
node = nodes.Element() # anonymous container for parsing
self.state.nested_parse(self.content, self.content_offset, node)
try:
num_cols, col_widths = self.check_list_content(node)
table_data = [[item.children for item in row_list[0]]
for row_list in node[0]]
header_rows = self.options.get('header-rows', 0)
stub_columns = self.options.get('stub-columns', 0)
self.check_table_dimensions(table_data, header_rows, stub_columns)
except SystemMessagePropagation, detail:
return [detail.args[0]]
table_node = self.build_table_from_list(table_data, col_widths,
header_rows, stub_columns)
table_node['classes'] += self.options.get('class', [])
if title:
table_node.insert(0, title)
return [table_node] + messages
def check_list_content(self, node):
if len(node) != 1 or not isinstance(node[0], nodes.bullet_list):
error = self.state_machine.reporter.error(
'Error parsing content block for the "%s" directive: '
'exactly one bullet list expected.' % self.name,
nodes.literal_block(self.block_text, self.block_text),
line=self.lineno)
raise SystemMessagePropagation(error)
list_node = node[0]
# Check for a uniform two-level bullet list:
for item_index in range(len(list_node)):
item = list_node[item_index]
if len(item) != 1 or not isinstance(item[0], nodes.bullet_list):
error = self.state_machine.reporter.error(
'Error parsing content block for the "%s" directive: '
'two-level bullet list expected, but row %s does not '
'contain a second-level bullet list.'
% (self.name, item_index + 1), nodes.literal_block(
self.block_text, self.block_text), line=self.lineno)
raise SystemMessagePropagation(error)
elif item_index:
# ATTN pychecker users: num_cols is guaranteed to be set in the
# "else" clause below for item_index==0, before this branch is
# triggered.
if len(item[0]) != num_cols:
error = self.state_machine.reporter.error(
'Error parsing content block for the "%s" directive: '
'uniform two-level bullet list expected, but row %s '
'does not contain the same number of items as row 1 '
'(%s vs %s).'
% (self.name, item_index + 1, len(item[0]), num_cols),
nodes.literal_block(self.block_text, self.block_text),
line=self.lineno)
raise SystemMessagePropagation(error)
else:
num_cols = len(item[0])
col_widths = self.get_column_widths(num_cols)
return num_cols, col_widths
def build_table_from_list(self, table_data, col_widths, header_rows, stub_columns):
table = nodes.table()
tgroup = nodes.tgroup(cols=len(col_widths))
table += tgroup
for col_width in col_widths:
colspec = nodes.colspec(colwidth=col_width)
if stub_columns:
colspec.attributes['stub'] = 1
stub_columns -= 1
tgroup += colspec
rows = []
for row in table_data:
row_node = nodes.row()
for cell in row:
entry = nodes.entry()
entry += cell
row_node += entry
rows.append(row_node)
if header_rows:
thead = nodes.thead()
thead.extend(rows[:header_rows])
tgroup += thead
tbody = nodes.tbody()
tbody.extend(rows[header_rows:])
tgroup += tbody
return table
| {
"repo_name": "Soya93/Extract-Refactoring",
"path": "python/helpers/py2only/docutils/parsers/rst/directives/tables.py",
"copies": "5",
"size": "19735",
"license": "apache-2.0",
"hash": -7019802616764362000,
"line_mean": 43.2488789238,
"line_max": 87,
"alpha_frac": 0.5478591335,
"autogenerated": false,
"ratio": 4.279982650184341,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.7327841783684342,
"avg_score": null,
"num_lines": null
} |
"""
Directives for table elements.
"""
__docformat__ = 'reStructuredText'
import sys
import os.path
import csv
from docutils import io, nodes, statemachine, utils
from docutils.utils import SystemMessagePropagation
from docutils.parsers.rst import Directive
from docutils.parsers.rst import directives
class Table(Directive):
"""
Generic table base class.
"""
required_arguments = 0
optional_arguments = 1
final_argument_whitespace = True
option_spec = {'class': directives.class_option}
has_content = True
def make_title(self):
if self.arguments:
title_text = self.arguments[0]
text_nodes, messages = self.state.inline_text(title_text,
self.lineno)
title = nodes.title(title_text, '', *text_nodes)
else:
title = None
messages = []
return title, messages
def process_header_option(self):
source = self.state_machine.get_source(self.lineno - 1)
table_head = []
max_header_cols = 0
if 'header' in self.options: # separate table header in option
rows, max_header_cols = self.parse_csv_data_into_rows(
self.options['header'].split('\n'), self.HeaderDialect(),
source)
table_head.extend(rows)
return table_head, max_header_cols
def check_table_dimensions(self, rows, header_rows, stub_columns):
if len(rows) < header_rows:
error = self.state_machine.reporter.error(
'%s header row(s) specified but only %s row(s) of data '
'supplied ("%s" directive).'
% (header_rows, len(rows), self.name), nodes.literal_block(
self.block_text, self.block_text), line=self.lineno)
raise SystemMessagePropagation(error)
if len(rows) == header_rows > 0:
error = self.state_machine.reporter.error(
'Insufficient data supplied (%s row(s)); no data remaining '
'for table body, required by "%s" directive.'
% (len(rows), self.name), nodes.literal_block(
self.block_text, self.block_text), line=self.lineno)
raise SystemMessagePropagation(error)
for row in rows:
if len(row) < stub_columns:
error = self.state_machine.reporter.error(
'%s stub column(s) specified but only %s columns(s) of '
'data supplied ("%s" directive).' %
(stub_columns, len(row), self.name), nodes.literal_block(
self.block_text, self.block_text), line=self.lineno)
raise SystemMessagePropagation(error)
if len(row) == stub_columns > 0:
error = self.state_machine.reporter.error(
'Insufficient data supplied (%s columns(s)); no data remaining '
'for table body, required by "%s" directive.'
% (len(row), self.name), nodes.literal_block(
self.block_text, self.block_text), line=self.lineno)
raise SystemMessagePropagation(error)
def get_column_widths(self, max_cols):
if 'widths' in self.options:
col_widths = self.options['widths']
if len(col_widths) != max_cols:
error = self.state_machine.reporter.error(
'"%s" widths do not match the number of columns in table '
'(%s).' % (self.name, max_cols), nodes.literal_block(
self.block_text, self.block_text), line=self.lineno)
raise SystemMessagePropagation(error)
elif max_cols:
col_widths = [100 // max_cols] * max_cols
else:
error = self.state_machine.reporter.error(
'No table data detected in CSV file.', nodes.literal_block(
self.block_text, self.block_text), line=self.lineno)
raise SystemMessagePropagation(error)
return col_widths
def extend_short_rows_with_empty_cells(self, columns, parts):
for part in parts:
for row in part:
if len(row) < columns:
row.extend([(0, 0, 0, [])] * (columns - len(row)))
class RSTTable(Table):
def run(self):
if not self.content:
warning = self.state_machine.reporter.warning(
'Content block expected for the "%s" directive; none found.'
% self.name, nodes.literal_block(
self.block_text, self.block_text), line=self.lineno)
return [warning]
title, messages = self.make_title()
node = nodes.Element() # anonymous container for parsing
self.state.nested_parse(self.content, self.content_offset, node)
if len(node) != 1 or not isinstance(node[0], nodes.table):
error = self.state_machine.reporter.error(
'Error parsing content block for the "%s" directive: exactly '
'one table expected.' % self.name, nodes.literal_block(
self.block_text, self.block_text), line=self.lineno)
return [error]
table_node = node[0]
table_node['classes'] += self.options.get('class', [])
if title:
table_node.insert(0, title)
return [table_node] + messages
class CSVTable(Table):
option_spec = {'header-rows': directives.nonnegative_int,
'stub-columns': directives.nonnegative_int,
'header': directives.unchanged,
'widths': directives.positive_int_list,
'file': directives.path,
'url': directives.uri,
'encoding': directives.encoding,
'class': directives.class_option,
# field delimiter char
'delim': directives.single_char_or_whitespace_or_unicode,
# treat whitespace after delimiter as significant
'keepspace': directives.flag,
# text field quote/unquote char:
'quote': directives.single_char_or_unicode,
# char used to escape delim & quote as-needed:
'escape': directives.single_char_or_unicode,}
class DocutilsDialect(csv.Dialect):
"""CSV dialect for `csv_table` directive."""
delimiter = ','
quotechar = '"'
doublequote = True
skipinitialspace = True
lineterminator = '\n'
quoting = csv.QUOTE_MINIMAL
def __init__(self, options):
if 'delim' in options:
self.delimiter = str(options['delim'])
if 'keepspace' in options:
self.skipinitialspace = False
if 'quote' in options:
self.quotechar = str(options['quote'])
if 'escape' in options:
self.doublequote = False
self.escapechar = str(options['escape'])
csv.Dialect.__init__(self)
class HeaderDialect(csv.Dialect):
"""CSV dialect to use for the "header" option data."""
delimiter = ','
quotechar = '"'
escapechar = '\\'
doublequote = False
skipinitialspace = True
lineterminator = '\n'
quoting = csv.QUOTE_MINIMAL
def check_requirements(self):
pass
def run(self):
try:
if (not self.state.document.settings.file_insertion_enabled
and ('file' in self.options
or 'url' in self.options)):
warning = self.state_machine.reporter.warning(
'File and URL access deactivated; ignoring "%s" '
'directive.' % self.name, nodes.literal_block(
self.block_text, self.block_text), line=self.lineno)
return [warning]
self.check_requirements()
title, messages = self.make_title()
csv_data, source = self.get_csv_data()
table_head, max_header_cols = self.process_header_option()
rows, max_cols = self.parse_csv_data_into_rows(
csv_data, self.DocutilsDialect(self.options), source)
max_cols = max(max_cols, max_header_cols)
header_rows = self.options.get('header-rows', 0)
stub_columns = self.options.get('stub-columns', 0)
self.check_table_dimensions(rows, header_rows, stub_columns)
table_head.extend(rows[:header_rows])
table_body = rows[header_rows:]
col_widths = self.get_column_widths(max_cols)
self.extend_short_rows_with_empty_cells(max_cols,
(table_head, table_body))
except SystemMessagePropagation, detail:
return [detail.args[0]]
except csv.Error, detail:
error = self.state_machine.reporter.error(
'Error with CSV data in "%s" directive:\n%s'
% (self.name, detail), nodes.literal_block(
self.block_text, self.block_text), line=self.lineno)
return [error]
table = (col_widths, table_head, table_body)
table_node = self.state.build_table(table, self.content_offset,
stub_columns)
table_node['classes'] += self.options.get('class', [])
if title:
table_node.insert(0, title)
return [table_node] + messages
def get_csv_data(self):
"""
Get CSV data from the directive content, from an external
file, or from a URL reference.
"""
encoding = self.options.get(
'encoding', self.state.document.settings.input_encoding)
if self.content:
# CSV data is from directive content.
if 'file' in self.options or 'url' in self.options:
error = self.state_machine.reporter.error(
'"%s" directive may not both specify an external file and'
' have content.' % self.name, nodes.literal_block(
self.block_text, self.block_text), line=self.lineno)
raise SystemMessagePropagation(error)
source = self.content.source(0)
csv_data = self.content
elif 'file' in self.options:
# CSV data is from an external file.
if 'url' in self.options:
error = self.state_machine.reporter.error(
'The "file" and "url" options may not be simultaneously'
' specified for the "%s" directive.' % self.name,
nodes.literal_block(self.block_text, self.block_text),
line=self.lineno)
raise SystemMessagePropagation(error)
source_dir = os.path.dirname(
os.path.abspath(self.state.document.current_source))
source = os.path.normpath(os.path.join(source_dir,
self.options['file']))
source = utils.relative_path(None, source)
try:
self.state.document.settings.record_dependencies.add(source)
csv_file = io.FileInput(
source_path=source, encoding=encoding,
error_handler=(self.state.document.settings.\
input_encoding_error_handler),
handle_io_errors=None)
csv_data = csv_file.read().splitlines()
except IOError, error:
severe = self.state_machine.reporter.severe(
'Problems with "%s" directive path:\n%s.'
% (self.name, error), nodes.literal_block(
self.block_text, self.block_text), line=self.lineno)
raise SystemMessagePropagation(severe)
elif 'url' in self.options:
# CSV data is from a URL.
# Do not import urllib2 at the top of the module because
# it may fail due to broken SSL dependencies, and it takes
# about 0.15 seconds to load.
import urllib2
source = self.options['url']
try:
csv_text = urllib2.urlopen(source).read()
except (urllib2.URLError, IOError, OSError, ValueError), error:
severe = self.state_machine.reporter.severe(
'Problems with "%s" directive URL "%s":\n%s.'
% (self.name, self.options['url'], error),
nodes.literal_block(self.block_text, self.block_text),
line=self.lineno)
raise SystemMessagePropagation(severe)
csv_file = io.StringInput(
source=csv_text, source_path=source, encoding=encoding,
error_handler=(self.state.document.settings.\
input_encoding_error_handler))
csv_data = csv_file.read().splitlines()
else:
error = self.state_machine.reporter.warning(
'The "%s" directive requires content; none supplied.'
% self.name, nodes.literal_block(
self.block_text, self.block_text), line=self.lineno)
raise SystemMessagePropagation(error)
return csv_data, source
if sys.version_info < (3,):
# 2.x csv module doesn't do Unicode
def decode_from_csv(s):
return s.decode('utf-8')
def encode_for_csv(s):
return s.encode('utf-8')
else:
def decode_from_csv(s):
return s
def encode_for_csv(s):
return s
decode_from_csv = staticmethod(decode_from_csv)
encode_for_csv = staticmethod(encode_for_csv)
def parse_csv_data_into_rows(self, csv_data, dialect, source):
# csv.py doesn't do Unicode; encode temporarily as UTF-8
csv_reader = csv.reader([self.encode_for_csv(line + '\n')
for line in csv_data],
dialect=dialect)
rows = []
max_cols = 0
for row in csv_reader:
row_data = []
for cell in row:
# decode UTF-8 back to Unicode
cell_text = self.decode_from_csv(cell)
cell_data = (0, 0, 0, statemachine.StringList(
cell_text.splitlines(), source=source))
row_data.append(cell_data)
rows.append(row_data)
max_cols = max(max_cols, len(row))
return rows, max_cols
class ListTable(Table):
"""
Implement tables whose data is encoded as a uniform two-level bullet list.
For further ideas, see
http://docutils.sf.net/docs/dev/rst/alternatives.html#list-driven-tables
"""
option_spec = {'header-rows': directives.nonnegative_int,
'stub-columns': directives.nonnegative_int,
'widths': directives.positive_int_list,
'class': directives.class_option}
def run(self):
if not self.content:
error = self.state_machine.reporter.error(
'The "%s" directive is empty; content required.' % self.name,
nodes.literal_block(self.block_text, self.block_text),
line=self.lineno)
return [error]
title, messages = self.make_title()
node = nodes.Element() # anonymous container for parsing
self.state.nested_parse(self.content, self.content_offset, node)
try:
num_cols, col_widths = self.check_list_content(node)
table_data = [[item.children for item in row_list[0]]
for row_list in node[0]]
header_rows = self.options.get('header-rows', 0)
stub_columns = self.options.get('stub-columns', 0)
self.check_table_dimensions(table_data, header_rows, stub_columns)
except SystemMessagePropagation, detail:
return [detail.args[0]]
table_node = self.build_table_from_list(table_data, col_widths,
header_rows, stub_columns)
table_node['classes'] += self.options.get('class', [])
if title:
table_node.insert(0, title)
return [table_node] + messages
def check_list_content(self, node):
if len(node) != 1 or not isinstance(node[0], nodes.bullet_list):
error = self.state_machine.reporter.error(
'Error parsing content block for the "%s" directive: '
'exactly one bullet list expected.' % self.name,
nodes.literal_block(self.block_text, self.block_text),
line=self.lineno)
raise SystemMessagePropagation(error)
list_node = node[0]
# Check for a uniform two-level bullet list:
for item_index in range(len(list_node)):
item = list_node[item_index]
if len(item) != 1 or not isinstance(item[0], nodes.bullet_list):
error = self.state_machine.reporter.error(
'Error parsing content block for the "%s" directive: '
'two-level bullet list expected, but row %s does not '
'contain a second-level bullet list.'
% (self.name, item_index + 1), nodes.literal_block(
self.block_text, self.block_text), line=self.lineno)
raise SystemMessagePropagation(error)
elif item_index:
# ATTN pychecker users: num_cols is guaranteed to be set in the
# "else" clause below for item_index==0, before this branch is
# triggered.
if len(item[0]) != num_cols:
error = self.state_machine.reporter.error(
'Error parsing content block for the "%s" directive: '
'uniform two-level bullet list expected, but row %s '
'does not contain the same number of items as row 1 '
'(%s vs %s).'
% (self.name, item_index + 1, len(item[0]), num_cols),
nodes.literal_block(self.block_text, self.block_text),
line=self.lineno)
raise SystemMessagePropagation(error)
else:
num_cols = len(item[0])
col_widths = self.get_column_widths(num_cols)
return num_cols, col_widths
def build_table_from_list(self, table_data, col_widths, header_rows, stub_columns):
table = nodes.table()
tgroup = nodes.tgroup(cols=len(col_widths))
table += tgroup
for col_width in col_widths:
colspec = nodes.colspec(colwidth=col_width)
if stub_columns:
colspec.attributes['stub'] = 1
stub_columns -= 1
tgroup += colspec
rows = []
for row in table_data:
row_node = nodes.row()
for cell in row:
entry = nodes.entry()
entry += cell
row_node += entry
rows.append(row_node)
if header_rows:
thead = nodes.thead()
thead.extend(rows[:header_rows])
tgroup += thead
tbody = nodes.tbody()
tbody.extend(rows[header_rows:])
tgroup += tbody
return table
| {
"repo_name": "gnuhub/intellij-community",
"path": "python/helpers/docutils/parsers/rst/directives/tables.py",
"copies": "49",
"size": "19736",
"license": "apache-2.0",
"hash": 6214422261960931000,
"line_mean": 43.1521252796,
"line_max": 87,
"alpha_frac": 0.5478313741,
"autogenerated": false,
"ratio": 4.2801995228800696,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
"""
Directives for table elements.
"""
__docformat__ = 'reStructuredText'
import sys
import os.path
import csv
from docutils import io, nodes, statemachine, utils
from docutils.utils import SystemMessagePropagation
from docutils.parsers.rst import Directive
from docutils.parsers.rst import directives
class Table(Directive):
"""
Generic table base class.
"""
required_arguments = 0
optional_arguments = 1
final_argument_whitespace = True
option_spec = {'class': directives.class_option}
has_content = True
def make_title(self):
if self.arguments:
title_text = self.arguments[0]
text_nodes, messages = self.state.inline_text(title_text,
self.lineno)
title = nodes.title(title_text, '', *text_nodes)
else:
title = None
messages = []
return title, messages
def process_header_option(self):
source = self.state_machine.get_source(self.lineno - 1)
table_head = []
max_header_cols = 0
if 'header' in self.options: # separate table header in option
rows, max_header_cols = self.parse_csv_data_into_rows(
self.options['header'].split('\n'), self.HeaderDialect(),
source)
table_head.extend(rows)
return table_head, max_header_cols
def check_table_dimensions(self, rows, header_rows, stub_columns):
if len(rows) < header_rows:
error = self.state_machine.reporter.error(
'%s header row(s) specified but only %s row(s) of data '
'supplied ("%s" directive).'
% (header_rows, len(rows), self.name), nodes.literal_block(
self.block_text, self.block_text), line=self.lineno)
raise SystemMessagePropagation(error)
if len(rows) == header_rows > 0:
error = self.state_machine.reporter.error(
'Insufficient data supplied (%s row(s)); no data remaining '
'for table body, required by "%s" directive.'
% (len(rows), self.name), nodes.literal_block(
self.block_text, self.block_text), line=self.lineno)
raise SystemMessagePropagation(error)
for row in rows:
if len(row) < stub_columns:
error = self.state_machine.reporter.error(
'%s stub column(s) specified but only %s columns(s) of '
'data supplied ("%s" directive).' %
(stub_columns, len(row), self.name), nodes.literal_block(
self.block_text, self.block_text), line=self.lineno)
raise SystemMessagePropagation(error)
if len(row) == stub_columns > 0:
error = self.state_machine.reporter.error(
'Insufficient data supplied (%s columns(s)); no data remaining '
'for table body, required by "%s" directive.'
% (len(row), self.name), nodes.literal_block(
self.block_text, self.block_text), line=self.lineno)
raise SystemMessagePropagation(error)
def get_column_widths(self, max_cols):
if 'widths' in self.options:
col_widths = self.options['widths']
if len(col_widths) != max_cols:
error = self.state_machine.reporter.error(
'"%s" widths do not match the number of columns in table '
'(%s).' % (self.name, max_cols), nodes.literal_block(
self.block_text, self.block_text), line=self.lineno)
raise SystemMessagePropagation(error)
elif max_cols:
col_widths = [100 // max_cols] * max_cols
else:
error = self.state_machine.reporter.error(
'No table data detected in CSV file.', nodes.literal_block(
self.block_text, self.block_text), line=self.lineno)
raise SystemMessagePropagation(error)
return col_widths
def extend_short_rows_with_empty_cells(self, columns, parts):
for part in parts:
for row in part:
if len(row) < columns:
row.extend([(0, 0, 0, [])] * (columns - len(row)))
class RSTTable(Table):
def run(self):
if not self.content:
warning = self.state_machine.reporter.warning(
'Content block expected for the "%s" directive; none found.'
% self.name, nodes.literal_block(
self.block_text, self.block_text), line=self.lineno)
return [warning]
title, messages = self.make_title()
node = nodes.Element() # anonymous container for parsing
self.state.nested_parse(self.content, self.content_offset, node)
if len(node) != 1 or not isinstance(node[0], nodes.table):
error = self.state_machine.reporter.error(
'Error parsing content block for the "%s" directive: exactly '
'one table expected.' % self.name, nodes.literal_block(
self.block_text, self.block_text), line=self.lineno)
return [error]
table_node = node[0]
table_node['classes'] += self.options.get('class', [])
if title:
table_node.insert(0, title)
return [table_node] + messages
class CSVTable(Table):
option_spec = {'header-rows': directives.nonnegative_int,
'stub-columns': directives.nonnegative_int,
'header': directives.unchanged,
'widths': directives.positive_int_list,
'file': directives.path,
'url': directives.uri,
'encoding': directives.encoding,
'class': directives.class_option,
# field delimiter char
'delim': directives.single_char_or_whitespace_or_unicode,
# treat whitespace after delimiter as significant
'keepspace': directives.flag,
# text field quote/unquote char:
'quote': directives.single_char_or_unicode,
# char used to escape delim & quote as-needed:
'escape': directives.single_char_or_unicode,}
class DocutilsDialect(csv.Dialect):
"""CSV dialect for `csv_table` directive."""
delimiter = ','
quotechar = '"'
doublequote = True
skipinitialspace = True
lineterminator = '\n'
quoting = csv.QUOTE_MINIMAL
def __init__(self, options):
if 'delim' in options:
self.delimiter = str(options['delim'])
if 'keepspace' in options:
self.skipinitialspace = False
if 'quote' in options:
self.quotechar = str(options['quote'])
if 'escape' in options:
self.doublequote = False
self.escapechar = str(options['escape'])
csv.Dialect.__init__(self)
class HeaderDialect(csv.Dialect):
"""CSV dialect to use for the "header" option data."""
delimiter = ','
quotechar = '"'
escapechar = '\\'
doublequote = False
skipinitialspace = True
lineterminator = '\n'
quoting = csv.QUOTE_MINIMAL
def check_requirements(self):
pass
def run(self):
try:
if (not self.state.document.settings.file_insertion_enabled
and ('file' in self.options
or 'url' in self.options)):
warning = self.state_machine.reporter.warning(
'File and URL access deactivated; ignoring "%s" '
'directive.' % self.name, nodes.literal_block(
self.block_text, self.block_text), line=self.lineno)
return [warning]
self.check_requirements()
title, messages = self.make_title()
csv_data, source = self.get_csv_data()
table_head, max_header_cols = self.process_header_option()
rows, max_cols = self.parse_csv_data_into_rows(
csv_data, self.DocutilsDialect(self.options), source)
max_cols = max(max_cols, max_header_cols)
header_rows = self.options.get('header-rows', 0)
stub_columns = self.options.get('stub-columns', 0)
self.check_table_dimensions(rows, header_rows, stub_columns)
table_head.extend(rows[:header_rows])
table_body = rows[header_rows:]
col_widths = self.get_column_widths(max_cols)
self.extend_short_rows_with_empty_cells(max_cols,
(table_head, table_body))
except SystemMessagePropagation, detail:
return [detail.args[0]]
except csv.Error, detail:
error = self.state_machine.reporter.error(
'Error with CSV data in "%s" directive:\n%s'
% (self.name, detail), nodes.literal_block(
self.block_text, self.block_text), line=self.lineno)
return [error]
table = (col_widths, table_head, table_body)
table_node = self.state.build_table(table, self.content_offset,
stub_columns)
table_node['classes'] += self.options.get('class', [])
if title:
table_node.insert(0, title)
return [table_node] + messages
def get_csv_data(self):
"""
Get CSV data from the directive content, from an external
file, or from a URL reference.
"""
encoding = self.options.get(
'encoding', self.state.document.settings.input_encoding)
if self.content:
# CSV data is from directive content.
if 'file' in self.options or 'url' in self.options:
error = self.state_machine.reporter.error(
'"%s" directive may not both specify an external file and'
' have content.' % self.name, nodes.literal_block(
self.block_text, self.block_text), line=self.lineno)
raise SystemMessagePropagation(error)
source = self.content.source(0)
csv_data = self.content
elif 'file' in self.options:
# CSV data is from an external file.
if 'url' in self.options:
error = self.state_machine.reporter.error(
'The "file" and "url" options may not be simultaneously'
' specified for the "%s" directive.' % self.name,
nodes.literal_block(self.block_text, self.block_text),
line=self.lineno)
raise SystemMessagePropagation(error)
source_dir = os.path.dirname(
os.path.abspath(self.state.document.current_source))
source = os.path.normpath(os.path.join(source_dir,
self.options['file']))
source = utils.relative_path(None, source)
try:
self.state.document.settings.record_dependencies.add(source)
csv_file = io.FileInput(
source_path=source, encoding=encoding,
error_handler=(self.state.document.settings.\
input_encoding_error_handler),
handle_io_errors=None)
csv_data = csv_file.read().splitlines()
except IOError, error:
severe = self.state_machine.reporter.severe(
'Problems with "%s" directive path:\n%s.'
% (self.name, error), nodes.literal_block(
self.block_text, self.block_text), line=self.lineno)
raise SystemMessagePropagation(severe)
elif 'url' in self.options:
# CSV data is from a URL.
# Do not import urllib2 at the top of the module because
# it may fail due to broken SSL dependencies, and it takes
# about 0.15 seconds to load.
import urllib2
source = self.options['url']
try:
csv_text = urllib2.urlopen(source).read()
except (urllib2.URLError, IOError, OSError, ValueError), error:
severe = self.state_machine.reporter.severe(
'Problems with "%s" directive URL "%s":\n%s.'
% (self.name, self.options['url'], error),
nodes.literal_block(self.block_text, self.block_text),
line=self.lineno)
raise SystemMessagePropagation(severe)
csv_file = io.StringInput(
source=csv_text, source_path=source, encoding=encoding,
error_handler=(self.state.document.settings.\
input_encoding_error_handler))
csv_data = csv_file.read().splitlines()
else:
error = self.state_machine.reporter.warning(
'The "%s" directive requires content; none supplied.'
% self.name, nodes.literal_block(
self.block_text, self.block_text), line=self.lineno)
raise SystemMessagePropagation(error)
return csv_data, source
if sys.version_info < (3,):
# 2.x csv module doesn't do Unicode
def decode_from_csv(s):
return s.decode('utf-8')
def encode_for_csv(s):
return s.encode('utf-8')
else:
def decode_from_csv(s):
return s
def encode_for_csv(s):
return s
decode_from_csv = staticmethod(decode_from_csv)
encode_for_csv = staticmethod(encode_for_csv)
def parse_csv_data_into_rows(self, csv_data, dialect, source):
# csv.py doesn't do Unicode; encode temporarily as UTF-8
csv_reader = csv.reader([self.encode_for_csv(line + '\n')
for line in csv_data],
dialect=dialect)
rows = []
max_cols = 0
for row in csv_reader:
row_data = []
for cell in row:
# decode UTF-8 back to Unicode
cell_text = self.decode_from_csv(cell)
cell_data = (0, 0, 0, statemachine.StringList(
cell_text.splitlines(), source=source))
row_data.append(cell_data)
rows.append(row_data)
max_cols = max(max_cols, len(row))
return rows, max_cols
class ListTable(Table):
"""
Implement tables whose data is encoded as a uniform two-level bullet list.
For further ideas, see
http://docutils.sf.net/docs/dev/rst/alternatives.html#list-driven-tables
"""
option_spec = {'header-rows': directives.nonnegative_int,
'stub-columns': directives.nonnegative_int,
'widths': directives.positive_int_list,
'class': directives.class_option}
def run(self):
if not self.content:
error = self.state_machine.reporter.error(
'The "%s" directive is empty; content required.' % self.name,
nodes.literal_block(self.block_text, self.block_text),
line=self.lineno)
return [error]
title, messages = self.make_title()
node = nodes.Element() # anonymous container for parsing
self.state.nested_parse(self.content, self.content_offset, node)
try:
num_cols, col_widths = self.check_list_content(node)
table_data = [[item.children for item in row_list[0]]
for row_list in node[0]]
header_rows = self.options.get('header-rows', 0)
stub_columns = self.options.get('stub-columns', 0)
self.check_table_dimensions(table_data, header_rows, stub_columns)
except SystemMessagePropagation, detail:
return [detail.args[0]]
table_node = self.build_table_from_list(table_data, col_widths,
header_rows, stub_columns)
table_node['classes'] += self.options.get('class', [])
if title:
table_node.insert(0, title)
return [table_node] + messages
def check_list_content(self, node):
if len(node) != 1 or not isinstance(node[0], nodes.bullet_list):
error = self.state_machine.reporter.error(
'Error parsing content block for the "%s" directive: '
'exactly one bullet list expected.' % self.name,
nodes.literal_block(self.block_text, self.block_text),
line=self.lineno)
raise SystemMessagePropagation(error)
list_node = node[0]
# Check for a uniform two-level bullet list:
for item_index in range(len(list_node)):
item = list_node[item_index]
if len(item) != 1 or not isinstance(item[0], nodes.bullet_list):
error = self.state_machine.reporter.error(
'Error parsing content block for the "%s" directive: '
'two-level bullet list expected, but row %s does not '
'contain a second-level bullet list.'
% (self.name, item_index + 1), nodes.literal_block(
self.block_text, self.block_text), line=self.lineno)
raise SystemMessagePropagation(error)
elif item_index:
# ATTN pychecker users: num_cols is guaranteed to be set in the
# "else" clause below for item_index==0, before this branch is
# triggered.
if len(item[0]) != num_cols:
error = self.state_machine.reporter.error(
'Error parsing content block for the "%s" directive: '
'uniform two-level bullet list expected, but row %s '
'does not contain the same number of items as row 1 '
'(%s vs %s).'
% (self.name, item_index + 1, len(item[0]), num_cols),
nodes.literal_block(self.block_text, self.block_text),
line=self.lineno)
raise SystemMessagePropagation(error)
else:
num_cols = len(item[0])
col_widths = self.get_column_widths(num_cols)
return num_cols, col_widths
def build_table_from_list(self, table_data, col_widths, header_rows, stub_columns):
table = nodes.table()
tgroup = nodes.tgroup(cols=len(col_widths))
table += tgroup
for col_width in col_widths:
colspec = nodes.colspec(colwidth=col_width)
if stub_columns:
colspec.attributes['stub'] = 1
stub_columns -= 1
tgroup += colspec
rows = []
for row in table_data:
row_node = nodes.row()
for cell in row:
entry = nodes.entry()
entry += cell
row_node += entry
rows.append(row_node)
if header_rows:
thead = nodes.thead()
thead.extend(rows[:header_rows])
tgroup += thead
tbody = nodes.tbody()
tbody.extend(rows[header_rows:])
tgroup += tbody
return table
| {
"repo_name": "rimbalinux/MSISDNArea",
"path": "docutils/parsers/rst/directives/tables.py",
"copies": "2",
"size": "20183",
"license": "bsd-3-clause",
"hash": -2367258288781750300,
"line_mean": 43.1521252796,
"line_max": 87,
"alpha_frac": 0.53569836,
"autogenerated": false,
"ratio": 4.33483676975945,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.587053512975945,
"avg_score": null,
"num_lines": null
} |
"""
Directives for table elements.
"""
__docformat__ = 'reStructuredText'
import sys
import os.path
import csv
from docutils import io, nodes, statemachine, utils
from docutils.error_reporting import SafeString
from docutils.utils import SystemMessagePropagation
from docutils.parsers.rst import Directive
from docutils.parsers.rst import directives
class Table(Directive):
"""
Generic table base class.
"""
optional_arguments = 1
final_argument_whitespace = True
option_spec = {'class': directives.class_option,
'name': directives.unchanged}
has_content = True
def make_title(self):
if self.arguments:
title_text = self.arguments[0]
text_nodes, messages = self.state.inline_text(title_text,
self.lineno)
title = nodes.title(title_text, '', *text_nodes)
else:
title = None
messages = []
return title, messages
def process_header_option(self):
source = self.state_machine.get_source(self.lineno - 1)
table_head = []
max_header_cols = 0
if 'header' in self.options: # separate table header in option
rows, max_header_cols = self.parse_csv_data_into_rows(
self.options['header'].split('\n'), self.HeaderDialect(),
source)
table_head.extend(rows)
return table_head, max_header_cols
def check_table_dimensions(self, rows, header_rows, stub_columns):
if len(rows) < header_rows:
error = self.state_machine.reporter.error(
'%s header row(s) specified but only %s row(s) of data '
'supplied ("%s" directive).'
% (header_rows, len(rows), self.name), nodes.literal_block(
self.block_text, self.block_text), line=self.lineno)
raise SystemMessagePropagation(error)
if len(rows) == header_rows > 0:
error = self.state_machine.reporter.error(
'Insufficient data supplied (%s row(s)); no data remaining '
'for table body, required by "%s" directive.'
% (len(rows), self.name), nodes.literal_block(
self.block_text, self.block_text), line=self.lineno)
raise SystemMessagePropagation(error)
for row in rows:
if len(row) < stub_columns:
error = self.state_machine.reporter.error(
'%s stub column(s) specified but only %s columns(s) of '
'data supplied ("%s" directive).' %
(stub_columns, len(row), self.name), nodes.literal_block(
self.block_text, self.block_text), line=self.lineno)
raise SystemMessagePropagation(error)
if len(row) == stub_columns > 0:
error = self.state_machine.reporter.error(
'Insufficient data supplied (%s columns(s)); no data remaining '
'for table body, required by "%s" directive.'
% (len(row), self.name), nodes.literal_block(
self.block_text, self.block_text), line=self.lineno)
raise SystemMessagePropagation(error)
def get_column_widths(self, max_cols):
if 'widths' in self.options:
col_widths = self.options['widths']
if len(col_widths) != max_cols:
error = self.state_machine.reporter.error(
'"%s" widths do not match the number of columns in table '
'(%s).' % (self.name, max_cols), nodes.literal_block(
self.block_text, self.block_text), line=self.lineno)
raise SystemMessagePropagation(error)
elif max_cols:
col_widths = [100 // max_cols] * max_cols
else:
error = self.state_machine.reporter.error(
'No table data detected in CSV file.', nodes.literal_block(
self.block_text, self.block_text), line=self.lineno)
raise SystemMessagePropagation(error)
return col_widths
def extend_short_rows_with_empty_cells(self, columns, parts):
for part in parts:
for row in part:
if len(row) < columns:
row.extend([(0, 0, 0, [])] * (columns - len(row)))
class RSTTable(Table):
def run(self):
if not self.content:
warning = self.state_machine.reporter.warning(
'Content block expected for the "%s" directive; none found.'
% self.name, nodes.literal_block(
self.block_text, self.block_text), line=self.lineno)
return [warning]
title, messages = self.make_title()
node = nodes.Element() # anonymous container for parsing
self.state.nested_parse(self.content, self.content_offset, node)
if len(node) != 1 or not isinstance(node[0], nodes.table):
error = self.state_machine.reporter.error(
'Error parsing content block for the "%s" directive: exactly '
'one table expected.' % self.name, nodes.literal_block(
self.block_text, self.block_text), line=self.lineno)
return [error]
table_node = node[0]
table_node['classes'] += self.options.get('class', [])
self.add_name(table_node)
if title:
table_node.insert(0, title)
return [table_node] + messages
class CSVTable(Table):
option_spec = {'header-rows': directives.nonnegative_int,
'stub-columns': directives.nonnegative_int,
'header': directives.unchanged,
'widths': directives.positive_int_list,
'file': directives.path,
'url': directives.uri,
'encoding': directives.encoding,
'class': directives.class_option,
'name': directives.unchanged,
# field delimiter char
'delim': directives.single_char_or_whitespace_or_unicode,
# treat whitespace after delimiter as significant
'keepspace': directives.flag,
# text field quote/unquote char:
'quote': directives.single_char_or_unicode,
# char used to escape delim & quote as-needed:
'escape': directives.single_char_or_unicode,}
class DocutilsDialect(csv.Dialect):
"""CSV dialect for `csv_table` directive."""
delimiter = ','
quotechar = '"'
doublequote = True
skipinitialspace = True
lineterminator = '\n'
quoting = csv.QUOTE_MINIMAL
def __init__(self, options):
if 'delim' in options:
self.delimiter = str(options['delim'])
if 'keepspace' in options:
self.skipinitialspace = False
if 'quote' in options:
self.quotechar = str(options['quote'])
if 'escape' in options:
self.doublequote = False
self.escapechar = str(options['escape'])
csv.Dialect.__init__(self)
class HeaderDialect(csv.Dialect):
"""CSV dialect to use for the "header" option data."""
delimiter = ','
quotechar = '"'
escapechar = '\\'
doublequote = False
skipinitialspace = True
lineterminator = '\n'
quoting = csv.QUOTE_MINIMAL
def check_requirements(self):
pass
def run(self):
try:
if (not self.state.document.settings.file_insertion_enabled
and ('file' in self.options
or 'url' in self.options)):
warning = self.state_machine.reporter.warning(
'File and URL access deactivated; ignoring "%s" '
'directive.' % self.name, nodes.literal_block(
self.block_text, self.block_text), line=self.lineno)
return [warning]
self.check_requirements()
title, messages = self.make_title()
csv_data, source = self.get_csv_data()
table_head, max_header_cols = self.process_header_option()
rows, max_cols = self.parse_csv_data_into_rows(
csv_data, self.DocutilsDialect(self.options), source)
max_cols = max(max_cols, max_header_cols)
header_rows = self.options.get('header-rows', 0)
stub_columns = self.options.get('stub-columns', 0)
self.check_table_dimensions(rows, header_rows, stub_columns)
table_head.extend(rows[:header_rows])
table_body = rows[header_rows:]
col_widths = self.get_column_widths(max_cols)
self.extend_short_rows_with_empty_cells(max_cols,
(table_head, table_body))
except SystemMessagePropagation, detail:
return [detail.args[0]]
except csv.Error, detail:
error = self.state_machine.reporter.error(
'Error with CSV data in "%s" directive:\n%s'
% (self.name, detail), nodes.literal_block(
self.block_text, self.block_text), line=self.lineno)
return [error]
table = (col_widths, table_head, table_body)
table_node = self.state.build_table(table, self.content_offset,
stub_columns)
table_node['classes'] += self.options.get('class', [])
self.add_name(table_node)
if title:
table_node.insert(0, title)
return [table_node] + messages
def get_csv_data(self):
"""
Get CSV data from the directive content, from an external
file, or from a URL reference.
"""
encoding = self.options.get(
'encoding', self.state.document.settings.input_encoding)
if self.content:
# CSV data is from directive content.
if 'file' in self.options or 'url' in self.options:
error = self.state_machine.reporter.error(
'"%s" directive may not both specify an external file and'
' have content.' % self.name, nodes.literal_block(
self.block_text, self.block_text), line=self.lineno)
raise SystemMessagePropagation(error)
source = self.content.source(0)
csv_data = self.content
elif 'file' in self.options:
# CSV data is from an external file.
if 'url' in self.options:
error = self.state_machine.reporter.error(
'The "file" and "url" options may not be simultaneously'
' specified for the "%s" directive.' % self.name,
nodes.literal_block(self.block_text, self.block_text),
line=self.lineno)
raise SystemMessagePropagation(error)
source_dir = os.path.dirname(
os.path.abspath(self.state.document.current_source))
source = os.path.normpath(os.path.join(source_dir,
self.options['file']))
source = utils.relative_path(None, source)
try:
self.state.document.settings.record_dependencies.add(source)
csv_file = io.FileInput(
source_path=source, encoding=encoding,
error_handler=(self.state.document.settings.\
input_encoding_error_handler),
handle_io_errors=None)
csv_data = csv_file.read().splitlines()
except IOError, error:
severe = self.state_machine.reporter.severe(
u'Problems with "%s" directive path:\n%s.'
% (self.name, SafeString(error)),
nodes.literal_block(self.block_text, self.block_text),
line=self.lineno)
raise SystemMessagePropagation(severe)
elif 'url' in self.options:
# CSV data is from a URL.
# Do not import urllib2 at the top of the module because
# it may fail due to broken SSL dependencies, and it takes
# about 0.15 seconds to load.
import urllib2
source = self.options['url']
try:
csv_text = urllib2.urlopen(source).read()
except (urllib2.URLError, IOError, OSError, ValueError), error:
severe = self.state_machine.reporter.severe(
'Problems with "%s" directive URL "%s":\n%s.'
% (self.name, self.options['url'], SafeString(error)),
nodes.literal_block(self.block_text, self.block_text),
line=self.lineno)
raise SystemMessagePropagation(severe)
csv_file = io.StringInput(
source=csv_text, source_path=source, encoding=encoding,
error_handler=(self.state.document.settings.\
input_encoding_error_handler))
csv_data = csv_file.read().splitlines()
else:
error = self.state_machine.reporter.warning(
'The "%s" directive requires content; none supplied.'
% self.name, nodes.literal_block(
self.block_text, self.block_text), line=self.lineno)
raise SystemMessagePropagation(error)
return csv_data, source
if sys.version_info < (3,):
# 2.x csv module doesn't do Unicode
def decode_from_csv(s):
return s.decode('utf-8')
def encode_for_csv(s):
return s.encode('utf-8')
else:
def decode_from_csv(s):
return s
def encode_for_csv(s):
return s
decode_from_csv = staticmethod(decode_from_csv)
encode_for_csv = staticmethod(encode_for_csv)
def parse_csv_data_into_rows(self, csv_data, dialect, source):
# csv.py doesn't do Unicode; encode temporarily as UTF-8
csv_reader = csv.reader([self.encode_for_csv(line + '\n')
for line in csv_data],
dialect=dialect)
rows = []
max_cols = 0
for row in csv_reader:
row_data = []
for cell in row:
# decode UTF-8 back to Unicode
cell_text = self.decode_from_csv(cell)
cell_data = (0, 0, 0, statemachine.StringList(
cell_text.splitlines(), source=source))
row_data.append(cell_data)
rows.append(row_data)
max_cols = max(max_cols, len(row))
return rows, max_cols
class ListTable(Table):
"""
Implement tables whose data is encoded as a uniform two-level bullet list.
For further ideas, see
http://docutils.sf.net/docs/dev/rst/alternatives.html#list-driven-tables
"""
option_spec = {'header-rows': directives.nonnegative_int,
'stub-columns': directives.nonnegative_int,
'widths': directives.positive_int_list,
'class': directives.class_option,
'name': directives.unchanged}
def run(self):
if not self.content:
error = self.state_machine.reporter.error(
'The "%s" directive is empty; content required.' % self.name,
nodes.literal_block(self.block_text, self.block_text),
line=self.lineno)
return [error]
title, messages = self.make_title()
node = nodes.Element() # anonymous container for parsing
self.state.nested_parse(self.content, self.content_offset, node)
try:
num_cols, col_widths = self.check_list_content(node)
table_data = [[item.children for item in row_list[0]]
for row_list in node[0]]
header_rows = self.options.get('header-rows', 0)
stub_columns = self.options.get('stub-columns', 0)
self.check_table_dimensions(table_data, header_rows, stub_columns)
except SystemMessagePropagation, detail:
return [detail.args[0]]
table_node = self.build_table_from_list(table_data, col_widths,
header_rows, stub_columns)
table_node['classes'] += self.options.get('class', [])
self.add_name(table_node)
if title:
table_node.insert(0, title)
return [table_node] + messages
def check_list_content(self, node):
if len(node) != 1 or not isinstance(node[0], nodes.bullet_list):
error = self.state_machine.reporter.error(
'Error parsing content block for the "%s" directive: '
'exactly one bullet list expected.' % self.name,
nodes.literal_block(self.block_text, self.block_text),
line=self.lineno)
raise SystemMessagePropagation(error)
list_node = node[0]
# Check for a uniform two-level bullet list:
for item_index in range(len(list_node)):
item = list_node[item_index]
if len(item) != 1 or not isinstance(item[0], nodes.bullet_list):
error = self.state_machine.reporter.error(
'Error parsing content block for the "%s" directive: '
'two-level bullet list expected, but row %s does not '
'contain a second-level bullet list.'
% (self.name, item_index + 1), nodes.literal_block(
self.block_text, self.block_text), line=self.lineno)
raise SystemMessagePropagation(error)
elif item_index:
# ATTN pychecker users: num_cols is guaranteed to be set in the
# "else" clause below for item_index==0, before this branch is
# triggered.
if len(item[0]) != num_cols:
error = self.state_machine.reporter.error(
'Error parsing content block for the "%s" directive: '
'uniform two-level bullet list expected, but row %s '
'does not contain the same number of items as row 1 '
'(%s vs %s).'
% (self.name, item_index + 1, len(item[0]), num_cols),
nodes.literal_block(self.block_text, self.block_text),
line=self.lineno)
raise SystemMessagePropagation(error)
else:
num_cols = len(item[0])
col_widths = self.get_column_widths(num_cols)
return num_cols, col_widths
def build_table_from_list(self, table_data, col_widths, header_rows, stub_columns):
table = nodes.table()
tgroup = nodes.tgroup(cols=len(col_widths))
table += tgroup
for col_width in col_widths:
colspec = nodes.colspec(colwidth=col_width)
if stub_columns:
colspec.attributes['stub'] = 1
stub_columns -= 1
tgroup += colspec
rows = []
for row in table_data:
row_node = nodes.row()
for cell in row:
entry = nodes.entry()
entry += cell
row_node += entry
rows.append(row_node)
if header_rows:
thead = nodes.thead()
thead.extend(rows[:header_rows])
tgroup += thead
tbody = nodes.tbody()
tbody.extend(rows[header_rows:])
tgroup += tbody
return table
| {
"repo_name": "paaschpa/badcomputering",
"path": "docutils/parsers/rst/directives/tables.py",
"copies": "6",
"size": "20048",
"license": "bsd-3-clause",
"hash": -1399652706849805000,
"line_mean": 43.1585903084,
"line_max": 87,
"alpha_frac": 0.5478850758,
"autogenerated": false,
"ratio": 4.283760683760684,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.001003034341060516,
"num_lines": 454
} |
"""
Directives for table elements.
"""
__docformat__ = 'reStructuredText'
import sys
import os.path
import csv
from docutils import io, nodes, statemachine, utils
from docutils.error_reporting import SafeString
from docutils.utils import SystemMessagePropagation
from docutils.parsers.rst import Directive
from docutils.parsers.rst import directives
class Table(Directive):
"""
Generic table base class.
"""
optional_arguments = 1
final_argument_whitespace = True
option_spec = {'class': directives.class_option,
'name': directives.unchanged}
has_content = True
def make_title(self):
if self.arguments:
title_text = self.arguments[0]
text_nodes, messages = self.state.inline_text(title_text,
self.lineno)
title = nodes.title(title_text, '', *text_nodes)
else:
title = None
messages = []
return title, messages
def process_header_option(self):
source = self.state_machine.get_source(self.lineno - 1)
table_head = []
max_header_cols = 0
if 'header' in self.options: # separate table header in option
rows, max_header_cols = self.parse_csv_data_into_rows(
self.options['header'].split('\n'), self.HeaderDialect(),
source)
table_head.extend(rows)
return table_head, max_header_cols
def check_table_dimensions(self, rows, header_rows, stub_columns):
if len(rows) < header_rows:
error = self.state_machine.reporter.error(
'%s header row(s) specified but only %s row(s) of data '
'supplied ("%s" directive).'
% (header_rows, len(rows), self.name), nodes.literal_block(
self.block_text, self.block_text), line=self.lineno)
raise SystemMessagePropagation(error)
if len(rows) == header_rows > 0:
error = self.state_machine.reporter.error(
'Insufficient data supplied (%s row(s)); no data remaining '
'for table body, required by "%s" directive.'
% (len(rows), self.name), nodes.literal_block(
self.block_text, self.block_text), line=self.lineno)
raise SystemMessagePropagation(error)
for row in rows:
if len(row) < stub_columns:
error = self.state_machine.reporter.error(
'%s stub column(s) specified but only %s columns(s) of '
'data supplied ("%s" directive).' %
(stub_columns, len(row), self.name), nodes.literal_block(
self.block_text, self.block_text), line=self.lineno)
raise SystemMessagePropagation(error)
if len(row) == stub_columns > 0:
error = self.state_machine.reporter.error(
'Insufficient data supplied (%s columns(s)); no data remaining '
'for table body, required by "%s" directive.'
% (len(row), self.name), nodes.literal_block(
self.block_text, self.block_text), line=self.lineno)
raise SystemMessagePropagation(error)
def get_column_widths(self, max_cols):
if 'widths' in self.options:
col_widths = self.options['widths']
if len(col_widths) != max_cols:
error = self.state_machine.reporter.error(
'"%s" widths do not match the number of columns in table '
'(%s).' % (self.name, max_cols), nodes.literal_block(
self.block_text, self.block_text), line=self.lineno)
raise SystemMessagePropagation(error)
elif max_cols:
col_widths = [100 // max_cols] * max_cols
else:
error = self.state_machine.reporter.error(
'No table data detected in CSV file.', nodes.literal_block(
self.block_text, self.block_text), line=self.lineno)
raise SystemMessagePropagation(error)
return col_widths
def extend_short_rows_with_empty_cells(self, columns, parts):
for part in parts:
for row in part:
if len(row) < columns:
row.extend([(0, 0, 0, [])] * (columns - len(row)))
class RSTTable(Table):
def run(self):
if not self.content:
warning = self.state_machine.reporter.warning(
'Content block expected for the "%s" directive; none found.'
% self.name, nodes.literal_block(
self.block_text, self.block_text), line=self.lineno)
return [warning]
title, messages = self.make_title()
node = nodes.Element() # anonymous container for parsing
self.state.nested_parse(self.content, self.content_offset, node)
if len(node) != 1 or not isinstance(node[0], nodes.table):
error = self.state_machine.reporter.error(
'Error parsing content block for the "%s" directive: exactly '
'one table expected.' % self.name, nodes.literal_block(
self.block_text, self.block_text), line=self.lineno)
return [error]
table_node = node[0]
table_node['classes'] += self.options.get('class', [])
self.add_name(table_node)
if title:
table_node.insert(0, title)
return [table_node] + messages
class CSVTable(Table):
option_spec = {'header-rows': directives.nonnegative_int,
'stub-columns': directives.nonnegative_int,
'header': directives.unchanged,
'widths': directives.positive_int_list,
'file': directives.path,
'url': directives.uri,
'encoding': directives.encoding,
'class': directives.class_option,
'name': directives.unchanged,
# field delimiter char
'delim': directives.single_char_or_whitespace_or_unicode,
# treat whitespace after delimiter as significant
'keepspace': directives.flag,
# text field quote/unquote char:
'quote': directives.single_char_or_unicode,
# char used to escape delim & quote as-needed:
'escape': directives.single_char_or_unicode,}
class DocutilsDialect(csv.Dialect):
"""CSV dialect for `csv_table` directive."""
delimiter = ','
quotechar = '"'
doublequote = True
skipinitialspace = True
lineterminator = '\n'
quoting = csv.QUOTE_MINIMAL
def __init__(self, options):
if 'delim' in options:
self.delimiter = str(options['delim'])
if 'keepspace' in options:
self.skipinitialspace = False
if 'quote' in options:
self.quotechar = str(options['quote'])
if 'escape' in options:
self.doublequote = False
self.escapechar = str(options['escape'])
csv.Dialect.__init__(self)
class HeaderDialect(csv.Dialect):
"""CSV dialect to use for the "header" option data."""
delimiter = ','
quotechar = '"'
escapechar = '\\'
doublequote = False
skipinitialspace = True
lineterminator = '\n'
quoting = csv.QUOTE_MINIMAL
def check_requirements(self):
pass
def run(self):
try:
if (not self.state.document.settings.file_insertion_enabled
and ('file' in self.options
or 'url' in self.options)):
warning = self.state_machine.reporter.warning(
'File and URL access deactivated; ignoring "%s" '
'directive.' % self.name, nodes.literal_block(
self.block_text, self.block_text), line=self.lineno)
return [warning]
self.check_requirements()
title, messages = self.make_title()
csv_data, source = self.get_csv_data()
table_head, max_header_cols = self.process_header_option()
rows, max_cols = self.parse_csv_data_into_rows(
csv_data, self.DocutilsDialect(self.options), source)
max_cols = max(max_cols, max_header_cols)
header_rows = self.options.get('header-rows', 0)
stub_columns = self.options.get('stub-columns', 0)
self.check_table_dimensions(rows, header_rows, stub_columns)
table_head.extend(rows[:header_rows])
table_body = rows[header_rows:]
col_widths = self.get_column_widths(max_cols)
self.extend_short_rows_with_empty_cells(max_cols,
(table_head, table_body))
except SystemMessagePropagation, detail:
return [detail.args[0]]
except csv.Error, detail:
error = self.state_machine.reporter.error(
'Error with CSV data in "%s" directive:\n%s'
% (self.name, detail), nodes.literal_block(
self.block_text, self.block_text), line=self.lineno)
return [error]
table = (col_widths, table_head, table_body)
table_node = self.state.build_table(table, self.content_offset,
stub_columns)
table_node['classes'] += self.options.get('class', [])
self.add_name(table_node)
if title:
table_node.insert(0, title)
return [table_node] + messages
def get_csv_data(self):
"""
Get CSV data from the directive content, from an external
file, or from a URL reference.
"""
encoding = self.options.get(
'encoding', self.state.document.settings.input_encoding)
if self.content:
# CSV data is from directive content.
if 'file' in self.options or 'url' in self.options:
error = self.state_machine.reporter.error(
'"%s" directive may not both specify an external file and'
' have content.' % self.name, nodes.literal_block(
self.block_text, self.block_text), line=self.lineno)
raise SystemMessagePropagation(error)
source = self.content.source(0)
csv_data = self.content
elif 'file' in self.options:
# CSV data is from an external file.
if 'url' in self.options:
error = self.state_machine.reporter.error(
'The "file" and "url" options may not be simultaneously'
' specified for the "%s" directive.' % self.name,
nodes.literal_block(self.block_text, self.block_text),
line=self.lineno)
raise SystemMessagePropagation(error)
source_dir = os.path.dirname(
os.path.abspath(self.state.document.current_source))
source = os.path.normpath(os.path.join(source_dir,
self.options['file']))
source = utils.relative_path(None, source)
try:
self.state.document.settings.record_dependencies.add(source)
csv_file = io.FileInput(
source_path=source, encoding=encoding,
error_handler=(self.state.document.settings.\
input_encoding_error_handler),
handle_io_errors=None)
csv_data = csv_file.read().splitlines()
except IOError, error:
severe = self.state_machine.reporter.severe(
u'Problems with "%s" directive path:\n%s.'
% (self.name, SafeString(error)),
nodes.literal_block(self.block_text, self.block_text),
line=self.lineno)
raise SystemMessagePropagation(severe)
elif 'url' in self.options:
# CSV data is from a URL.
# Do not import urllib2 at the top of the module because
# it may fail due to broken SSL dependencies, and it takes
# about 0.15 seconds to load.
import urllib2
source = self.options['url']
try:
csv_text = urllib2.urlopen(source).read()
except (urllib2.URLError, IOError, OSError, ValueError), error:
severe = self.state_machine.reporter.severe(
'Problems with "%s" directive URL "%s":\n%s.'
% (self.name, self.options['url'], SafeString(error)),
nodes.literal_block(self.block_text, self.block_text),
line=self.lineno)
raise SystemMessagePropagation(severe)
csv_file = io.StringInput(
source=csv_text, source_path=source, encoding=encoding,
error_handler=(self.state.document.settings.\
input_encoding_error_handler))
csv_data = csv_file.read().splitlines()
else:
error = self.state_machine.reporter.warning(
'The "%s" directive requires content; none supplied.'
% self.name, nodes.literal_block(
self.block_text, self.block_text), line=self.lineno)
raise SystemMessagePropagation(error)
return csv_data, source
if sys.version_info < (3,):
# 2.x csv module doesn't do Unicode
def decode_from_csv(s):
return s.decode('utf-8')
def encode_for_csv(s):
return s.encode('utf-8')
else:
def decode_from_csv(s):
return s
def encode_for_csv(s):
return s
decode_from_csv = staticmethod(decode_from_csv)
encode_for_csv = staticmethod(encode_for_csv)
def parse_csv_data_into_rows(self, csv_data, dialect, source):
# csv.py doesn't do Unicode; encode temporarily as UTF-8
csv_reader = csv.reader([self.encode_for_csv(line + '\n')
for line in csv_data],
dialect=dialect)
rows = []
max_cols = 0
for row in csv_reader:
row_data = []
for cell in row:
# decode UTF-8 back to Unicode
cell_text = self.decode_from_csv(cell)
cell_data = (0, 0, 0, statemachine.StringList(
cell_text.splitlines(), source=source))
row_data.append(cell_data)
rows.append(row_data)
max_cols = max(max_cols, len(row))
return rows, max_cols
class ListTable(Table):
"""
Implement tables whose data is encoded as a uniform two-level bullet list.
For further ideas, see
http://docutils.sf.net/docs/dev/rst/alternatives.html#list-driven-tables
"""
option_spec = {'header-rows': directives.nonnegative_int,
'stub-columns': directives.nonnegative_int,
'widths': directives.positive_int_list,
'class': directives.class_option,
'name': directives.unchanged}
def run(self):
if not self.content:
error = self.state_machine.reporter.error(
'The "%s" directive is empty; content required.' % self.name,
nodes.literal_block(self.block_text, self.block_text),
line=self.lineno)
return [error]
title, messages = self.make_title()
node = nodes.Element() # anonymous container for parsing
self.state.nested_parse(self.content, self.content_offset, node)
try:
num_cols, col_widths = self.check_list_content(node)
table_data = [[item.children for item in row_list[0]]
for row_list in node[0]]
header_rows = self.options.get('header-rows', 0)
stub_columns = self.options.get('stub-columns', 0)
self.check_table_dimensions(table_data, header_rows, stub_columns)
except SystemMessagePropagation, detail:
return [detail.args[0]]
table_node = self.build_table_from_list(table_data, col_widths,
header_rows, stub_columns)
table_node['classes'] += self.options.get('class', [])
self.add_name(table_node)
if title:
table_node.insert(0, title)
return [table_node] + messages
def check_list_content(self, node):
if len(node) != 1 or not isinstance(node[0], nodes.bullet_list):
error = self.state_machine.reporter.error(
'Error parsing content block for the "%s" directive: '
'exactly one bullet list expected.' % self.name,
nodes.literal_block(self.block_text, self.block_text),
line=self.lineno)
raise SystemMessagePropagation(error)
list_node = node[0]
# Check for a uniform two-level bullet list:
for item_index in range(len(list_node)):
item = list_node[item_index]
if len(item) != 1 or not isinstance(item[0], nodes.bullet_list):
error = self.state_machine.reporter.error(
'Error parsing content block for the "%s" directive: '
'two-level bullet list expected, but row %s does not '
'contain a second-level bullet list.'
% (self.name, item_index + 1), nodes.literal_block(
self.block_text, self.block_text), line=self.lineno)
raise SystemMessagePropagation(error)
elif item_index:
# ATTN pychecker users: num_cols is guaranteed to be set in the
# "else" clause below for item_index==0, before this branch is
# triggered.
if len(item[0]) != num_cols:
error = self.state_machine.reporter.error(
'Error parsing content block for the "%s" directive: '
'uniform two-level bullet list expected, but row %s '
'does not contain the same number of items as row 1 '
'(%s vs %s).'
% (self.name, item_index + 1, len(item[0]), num_cols),
nodes.literal_block(self.block_text, self.block_text),
line=self.lineno)
raise SystemMessagePropagation(error)
else:
num_cols = len(item[0])
col_widths = self.get_column_widths(num_cols)
return num_cols, col_widths
def build_table_from_list(self, table_data, col_widths, header_rows, stub_columns):
table = nodes.table()
tgroup = nodes.tgroup(cols=len(col_widths))
table += tgroup
for col_width in col_widths:
colspec = nodes.colspec(colwidth=col_width)
if stub_columns:
colspec.attributes['stub'] = 1
stub_columns -= 1
tgroup += colspec
rows = []
for row in table_data:
row_node = nodes.row()
for cell in row:
entry = nodes.entry()
entry += cell
row_node += entry
rows.append(row_node)
if header_rows:
thead = nodes.thead()
thead.extend(rows[:header_rows])
tgroup += thead
tbody = nodes.tbody()
tbody.extend(rows[header_rows:])
tgroup += tbody
return table
| {
"repo_name": "ajaxsys/dict-admin",
"path": "docutils/parsers/rst/directives/tables.py",
"copies": "2",
"size": "20502",
"license": "bsd-3-clause",
"hash": 7466090107343006000,
"line_mean": 43.1585903084,
"line_max": 87,
"alpha_frac": 0.5357526095,
"autogenerated": false,
"ratio": 4.3353774582364135,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5871130067736413,
"avg_score": null,
"num_lines": null
} |
"""
Directives for table elements.
"""
__docformat__ = 'reStructuredText'
import sys
import os.path
import csv
from docutils import io, nodes, statemachine, utils
from docutils.error_reporting import SafeString
from docutils.utils import SystemMessagePropagation
from docutils.parsers.rst import Directive
from docutils.parsers.rst import directives
class Table(Directive):
"""
Generic table base class.
"""
optional_arguments = 1
final_argument_whitespace = True
option_spec = {'class': directives.class_option,
'name': directives.unchanged}
has_content = True
def make_title(self):
if self.arguments:
title_text = self.arguments[0]
text_nodes, messages = self.state.inline_text(title_text,
self.lineno)
title = nodes.title(title_text, '', *text_nodes)
else:
title = None
messages = []
return title, messages
def process_header_option(self):
source = self.state_machine.get_source(self.lineno - 1)
table_head = []
max_header_cols = 0
if 'header' in self.options: # separate table header in option
rows, max_header_cols = self.parse_csv_data_into_rows(
self.options['header'].split('\n'), self.HeaderDialect(),
source)
table_head.extend(rows)
return table_head, max_header_cols
def check_table_dimensions(self, rows, header_rows, stub_columns):
if len(rows) < header_rows:
error = self.state_machine.reporter.error(
'%s header row(s) specified but only %s row(s) of data '
'supplied ("%s" directive).'
% (header_rows, len(rows), self.name), nodes.literal_block(
self.block_text, self.block_text), line=self.lineno)
raise SystemMessagePropagation(error)
if len(rows) == header_rows > 0:
error = self.state_machine.reporter.error(
'Insufficient data supplied (%s row(s)); no data remaining '
'for table body, required by "%s" directive.'
% (len(rows), self.name), nodes.literal_block(
self.block_text, self.block_text), line=self.lineno)
raise SystemMessagePropagation(error)
for row in rows:
if len(row) < stub_columns:
error = self.state_machine.reporter.error(
'%s stub column(s) specified but only %s columns(s) of '
'data supplied ("%s" directive).' %
(stub_columns, len(row), self.name), nodes.literal_block(
self.block_text, self.block_text), line=self.lineno)
raise SystemMessagePropagation(error)
if len(row) == stub_columns > 0:
error = self.state_machine.reporter.error(
'Insufficient data supplied (%s columns(s)); no data remaining '
'for table body, required by "%s" directive.'
% (len(row), self.name), nodes.literal_block(
self.block_text, self.block_text), line=self.lineno)
raise SystemMessagePropagation(error)
def get_column_widths(self, max_cols):
if 'widths' in self.options:
col_widths = self.options['widths']
if len(col_widths) != max_cols:
error = self.state_machine.reporter.error(
'"%s" widths do not match the number of columns in table '
'(%s).' % (self.name, max_cols), nodes.literal_block(
self.block_text, self.block_text), line=self.lineno)
raise SystemMessagePropagation(error)
elif max_cols:
col_widths = [100 // max_cols] * max_cols
else:
error = self.state_machine.reporter.error(
'No table data detected in CSV file.', nodes.literal_block(
self.block_text, self.block_text), line=self.lineno)
raise SystemMessagePropagation(error)
return col_widths
def extend_short_rows_with_empty_cells(self, columns, parts):
for part in parts:
for row in part:
if len(row) < columns:
row.extend([(0, 0, 0, [])] * (columns - len(row)))
class RSTTable(Table):
def run(self):
if not self.content:
warning = self.state_machine.reporter.warning(
'Content block expected for the "%s" directive; none found.'
% self.name, nodes.literal_block(
self.block_text, self.block_text), line=self.lineno)
return [warning]
title, messages = self.make_title()
node = nodes.Element() # anonymous container for parsing
self.state.nested_parse(self.content, self.content_offset, node)
if len(node) != 1 or not isinstance(node[0], nodes.table):
error = self.state_machine.reporter.error(
'Error parsing content block for the "%s" directive: exactly '
'one table expected.' % self.name, nodes.literal_block(
self.block_text, self.block_text), line=self.lineno)
return [error]
table_node = node[0]
table_node['classes'] += self.options.get('class', [])
self.add_name(table_node)
if title:
table_node.insert(0, title)
return [table_node] + messages
class CSVTable(Table):
option_spec = {'header-rows': directives.nonnegative_int,
'stub-columns': directives.nonnegative_int,
'header': directives.unchanged,
'widths': directives.positive_int_list,
'file': directives.path,
'url': directives.uri,
'encoding': directives.encoding,
'class': directives.class_option,
'name': directives.unchanged,
# field delimiter char
'delim': directives.single_char_or_whitespace_or_unicode,
# treat whitespace after delimiter as significant
'keepspace': directives.flag,
# text field quote/unquote char:
'quote': directives.single_char_or_unicode,
# char used to escape delim & quote as-needed:
'escape': directives.single_char_or_unicode,}
class DocutilsDialect(csv.Dialect):
"""CSV dialect for `csv_table` directive."""
delimiter = ','
quotechar = '"'
doublequote = True
skipinitialspace = True
lineterminator = '\n'
quoting = csv.QUOTE_MINIMAL
def __init__(self, options):
if 'delim' in options:
self.delimiter = str(options['delim'])
if 'keepspace' in options:
self.skipinitialspace = False
if 'quote' in options:
self.quotechar = str(options['quote'])
if 'escape' in options:
self.doublequote = False
self.escapechar = str(options['escape'])
csv.Dialect.__init__(self)
class HeaderDialect(csv.Dialect):
"""CSV dialect to use for the "header" option data."""
delimiter = ','
quotechar = '"'
escapechar = '\\'
doublequote = False
skipinitialspace = True
lineterminator = '\n'
quoting = csv.QUOTE_MINIMAL
def check_requirements(self):
pass
def run(self):
try:
if (not self.state.document.settings.file_insertion_enabled
and ('file' in self.options
or 'url' in self.options)):
warning = self.state_machine.reporter.warning(
'File and URL access deactivated; ignoring "%s" '
'directive.' % self.name, nodes.literal_block(
self.block_text, self.block_text), line=self.lineno)
return [warning]
self.check_requirements()
title, messages = self.make_title()
csv_data, source = self.get_csv_data()
table_head, max_header_cols = self.process_header_option()
rows, max_cols = self.parse_csv_data_into_rows(
csv_data, self.DocutilsDialect(self.options), source)
max_cols = max(max_cols, max_header_cols)
header_rows = self.options.get('header-rows', 0)
stub_columns = self.options.get('stub-columns', 0)
self.check_table_dimensions(rows, header_rows, stub_columns)
table_head.extend(rows[:header_rows])
table_body = rows[header_rows:]
col_widths = self.get_column_widths(max_cols)
self.extend_short_rows_with_empty_cells(max_cols,
(table_head, table_body))
except SystemMessagePropagation, detail:
return [detail.args[0]]
except csv.Error, detail:
error = self.state_machine.reporter.error(
'Error with CSV data in "%s" directive:\n%s'
% (self.name, detail), nodes.literal_block(
self.block_text, self.block_text), line=self.lineno)
return [error]
table = (col_widths, table_head, table_body)
table_node = self.state.build_table(table, self.content_offset,
stub_columns)
table_node['classes'] += self.options.get('class', [])
self.add_name(table_node)
if title:
table_node.insert(0, title)
return [table_node] + messages
def get_csv_data(self):
"""
Get CSV data from the directive content, from an external
file, or from a URL reference.
"""
encoding = self.options.get(
'encoding', self.state.document.settings.input_encoding)
if self.content:
# CSV data is from directive content.
if 'file' in self.options or 'url' in self.options:
error = self.state_machine.reporter.error(
'"%s" directive may not both specify an external file and'
' have content.' % self.name, nodes.literal_block(
self.block_text, self.block_text), line=self.lineno)
raise SystemMessagePropagation(error)
source = self.content.source(0)
csv_data = self.content
elif 'file' in self.options:
# CSV data is from an external file.
if 'url' in self.options:
error = self.state_machine.reporter.error(
'The "file" and "url" options may not be simultaneously'
' specified for the "%s" directive.' % self.name,
nodes.literal_block(self.block_text, self.block_text),
line=self.lineno)
raise SystemMessagePropagation(error)
source_dir = os.path.dirname(
os.path.abspath(self.state.document.current_source))
source = os.path.normpath(os.path.join(source_dir,
self.options['file']))
source = utils.relative_path(None, source)
try:
self.state.document.settings.record_dependencies.add(source)
csv_file = io.FileInput(
source_path=source, encoding=encoding,
error_handler=(self.state.document.settings.\
input_encoding_error_handler),
handle_io_errors=None)
csv_data = csv_file.read().splitlines()
except IOError, error:
severe = self.state_machine.reporter.severe(
u'Problems with "%s" directive path:\n%s.'
% (self.name, SafeString(error)),
nodes.literal_block(self.block_text, self.block_text),
line=self.lineno)
raise SystemMessagePropagation(severe)
elif 'url' in self.options:
# CSV data is from a URL.
# Do not import urllib2 at the top of the module because
# it may fail due to broken SSL dependencies, and it takes
# about 0.15 seconds to load.
import urllib2
source = self.options['url']
try:
csv_text = urllib2.urlopen(source).read()
except (urllib2.URLError, IOError, OSError, ValueError), error:
severe = self.state_machine.reporter.severe(
'Problems with "%s" directive URL "%s":\n%s.'
% (self.name, self.options['url'], SafeString(error)),
nodes.literal_block(self.block_text, self.block_text),
line=self.lineno)
raise SystemMessagePropagation(severe)
csv_file = io.StringInput(
source=csv_text, source_path=source, encoding=encoding,
error_handler=(self.state.document.settings.\
input_encoding_error_handler))
csv_data = csv_file.read().splitlines()
else:
error = self.state_machine.reporter.warning(
'The "%s" directive requires content; none supplied.'
% self.name, nodes.literal_block(
self.block_text, self.block_text), line=self.lineno)
raise SystemMessagePropagation(error)
return csv_data, source
if sys.version_info < (3,):
# 2.x csv module doesn't do Unicode
def decode_from_csv(s):
return s.decode('utf-8')
def encode_for_csv(s):
return s.encode('utf-8')
else:
def decode_from_csv(s):
return s
def encode_for_csv(s):
return s
decode_from_csv = staticmethod(decode_from_csv)
encode_for_csv = staticmethod(encode_for_csv)
def parse_csv_data_into_rows(self, csv_data, dialect, source):
# csv.py doesn't do Unicode; encode temporarily as UTF-8
csv_reader = csv.reader([self.encode_for_csv(line + '\n')
for line in csv_data],
dialect=dialect)
rows = []
max_cols = 0
for row in csv_reader:
row_data = []
for cell in row:
# decode UTF-8 back to Unicode
cell_text = self.decode_from_csv(cell)
cell_data = (0, 0, 0, statemachine.StringList(
cell_text.splitlines(), source=source))
row_data.append(cell_data)
rows.append(row_data)
max_cols = max(max_cols, len(row))
return rows, max_cols
class ListTable(Table):
"""
Implement tables whose data is encoded as a uniform two-level bullet list.
For further ideas, see
http://docutils.sf.net/docs/dev/rst/alternatives.html#list-driven-tables
"""
option_spec = {'header-rows': directives.nonnegative_int,
'stub-columns': directives.nonnegative_int,
'widths': directives.positive_int_list,
'class': directives.class_option,
'name': directives.unchanged}
def run(self):
if not self.content:
error = self.state_machine.reporter.error(
'The "%s" directive is empty; content required.' % self.name,
nodes.literal_block(self.block_text, self.block_text),
line=self.lineno)
return [error]
title, messages = self.make_title()
node = nodes.Element() # anonymous container for parsing
self.state.nested_parse(self.content, self.content_offset, node)
try:
num_cols, col_widths = self.check_list_content(node)
table_data = [[item.children for item in row_list[0]]
for row_list in node[0]]
header_rows = self.options.get('header-rows', 0)
stub_columns = self.options.get('stub-columns', 0)
self.check_table_dimensions(table_data, header_rows, stub_columns)
except SystemMessagePropagation, detail:
return [detail.args[0]]
table_node = self.build_table_from_list(table_data, col_widths,
header_rows, stub_columns)
table_node['classes'] += self.options.get('class', [])
self.add_name(table_node)
if title:
table_node.insert(0, title)
return [table_node] + messages
def check_list_content(self, node):
if len(node) != 1 or not isinstance(node[0], nodes.bullet_list):
error = self.state_machine.reporter.error(
'Error parsing content block for the "%s" directive: '
'exactly one bullet list expected.' % self.name,
nodes.literal_block(self.block_text, self.block_text),
line=self.lineno)
raise SystemMessagePropagation(error)
list_node = node[0]
# Check for a uniform two-level bullet list:
for item_index in range(len(list_node)):
item = list_node[item_index]
if len(item) != 1 or not isinstance(item[0], nodes.bullet_list):
error = self.state_machine.reporter.error(
'Error parsing content block for the "%s" directive: '
'two-level bullet list expected, but row %s does not '
'contain a second-level bullet list.'
% (self.name, item_index + 1), nodes.literal_block(
self.block_text, self.block_text), line=self.lineno)
raise SystemMessagePropagation(error)
elif item_index:
# ATTN pychecker users: num_cols is guaranteed to be set in the
# "else" clause below for item_index==0, before this branch is
# triggered.
if len(item[0]) != num_cols:
error = self.state_machine.reporter.error(
'Error parsing content block for the "%s" directive: '
'uniform two-level bullet list expected, but row %s '
'does not contain the same number of items as row 1 '
'(%s vs %s).'
% (self.name, item_index + 1, len(item[0]), num_cols),
nodes.literal_block(self.block_text, self.block_text),
line=self.lineno)
raise SystemMessagePropagation(error)
else:
num_cols = len(item[0])
col_widths = self.get_column_widths(num_cols)
return num_cols, col_widths
def build_table_from_list(self, table_data, col_widths, header_rows, stub_columns):
table = nodes.table()
tgroup = nodes.tgroup(cols=len(col_widths))
table += tgroup
for col_width in col_widths:
colspec = nodes.colspec(colwidth=col_width)
if stub_columns:
colspec.attributes['stub'] = 1
stub_columns -= 1
tgroup += colspec
rows = []
for row in table_data:
row_node = nodes.row()
for cell in row:
entry = nodes.entry()
entry += cell
row_node += entry
rows.append(row_node)
if header_rows:
thead = nodes.thead()
thead.extend(rows[:header_rows])
tgroup += thead
tbody = nodes.tbody()
tbody.extend(rows[header_rows:])
tgroup += tbody
return table
| {
"repo_name": "mcr/ietfdb",
"path": "docutils/parsers/rst/directives/tables.py",
"copies": "4",
"size": "20048",
"license": "bsd-3-clause",
"hash": -5380220328328392000,
"line_mean": 43.1585903084,
"line_max": 87,
"alpha_frac": 0.5478850758,
"autogenerated": false,
"ratio": 4.283760683760684,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6831645759560684,
"avg_score": null,
"num_lines": null
} |
"""
Directives for table elements.
"""
__docformat__ = 'reStructuredText'
import csv
import os.path
import sys
from docutils import io, nodes, statemachine, utils
from docutils.parsers.rst import Directive
from docutils.parsers.rst import directives
from docutils.utils import SystemMessagePropagation
from docutils.utils.error_reporting import SafeString
class Table(Directive):
"""
Generic table base class.
"""
optional_arguments = 1
final_argument_whitespace = True
option_spec = {'class': directives.class_option,
'name': directives.unchanged}
has_content = True
def make_title(self):
if self.arguments:
title_text = self.arguments[0]
text_nodes, messages = self.state.inline_text(title_text,
self.lineno)
title = nodes.title(title_text, '', *text_nodes)
else:
title = None
messages = []
return title, messages
def process_header_option(self):
source = self.state_machine.get_source(self.lineno - 1)
table_head = []
max_header_cols = 0
if 'header' in self.options: # separate table header in option
rows, max_header_cols = self.parse_csv_data_into_rows(
self.options['header'].split('\n'), self.HeaderDialect(),
source)
table_head.extend(rows)
return table_head, max_header_cols
def check_table_dimensions(self, rows, header_rows, stub_columns):
if len(rows) < header_rows:
error = self.state_machine.reporter.error(
'%s header row(s) specified but only %s row(s) of data '
'supplied ("%s" directive).'
% (header_rows, len(rows), self.name), nodes.literal_block(
self.block_text, self.block_text), line=self.lineno)
raise SystemMessagePropagation(error)
if len(rows) == header_rows > 0:
error = self.state_machine.reporter.error(
'Insufficient data supplied (%s row(s)); no data remaining '
'for table body, required by "%s" directive.'
% (len(rows), self.name), nodes.literal_block(
self.block_text, self.block_text), line=self.lineno)
raise SystemMessagePropagation(error)
for row in rows:
if len(row) < stub_columns:
error = self.state_machine.reporter.error(
'%s stub column(s) specified but only %s columns(s) of '
'data supplied ("%s" directive).' %
(stub_columns, len(row), self.name), nodes.literal_block(
self.block_text, self.block_text), line=self.lineno)
raise SystemMessagePropagation(error)
if len(row) == stub_columns > 0:
error = self.state_machine.reporter.error(
'Insufficient data supplied (%s columns(s)); no data remaining '
'for table body, required by "%s" directive.'
% (len(row), self.name), nodes.literal_block(
self.block_text, self.block_text), line=self.lineno)
raise SystemMessagePropagation(error)
def get_column_widths(self, max_cols):
if 'widths' in self.options:
col_widths = self.options['widths']
if len(col_widths) != max_cols:
error = self.state_machine.reporter.error(
'"%s" widths do not match the number of columns in table '
'(%s).' % (self.name, max_cols), nodes.literal_block(
self.block_text, self.block_text), line=self.lineno)
raise SystemMessagePropagation(error)
elif max_cols:
col_widths = [100 // max_cols] * max_cols
else:
error = self.state_machine.reporter.error(
'No table data detected in CSV file.', nodes.literal_block(
self.block_text, self.block_text), line=self.lineno)
raise SystemMessagePropagation(error)
return col_widths
def extend_short_rows_with_empty_cells(self, columns, parts):
for part in parts:
for row in part:
if len(row) < columns:
row.extend([(0, 0, 0, [])] * (columns - len(row)))
class RSTTable(Table):
def run(self):
if not self.content:
warning = self.state_machine.reporter.warning(
'Content block expected for the "%s" directive; none found.'
% self.name, nodes.literal_block(
self.block_text, self.block_text), line=self.lineno)
return [warning]
title, messages = self.make_title()
node = nodes.Element() # anonymous container for parsing
self.state.nested_parse(self.content, self.content_offset, node)
if len(node) != 1 or not isinstance(node[0], nodes.table):
error = self.state_machine.reporter.error(
'Error parsing content block for the "%s" directive: exactly '
'one table expected.' % self.name, nodes.literal_block(
self.block_text, self.block_text), line=self.lineno)
return [error]
table_node = node[0]
table_node['classes'] += self.options.get('class', [])
self.add_name(table_node)
if title:
table_node.insert(0, title)
return [table_node] + messages
class CSVTable(Table):
option_spec = {'header-rows': directives.nonnegative_int,
'stub-columns': directives.nonnegative_int,
'header': directives.unchanged,
'widths': directives.positive_int_list,
'file': directives.path,
'url': directives.uri,
'encoding': directives.encoding,
'class': directives.class_option,
'name': directives.unchanged,
# field delimiter char
'delim': directives.single_char_or_whitespace_or_unicode,
# treat whitespace after delimiter as significant
'keepspace': directives.flag,
# text field quote/unquote char:
'quote': directives.single_char_or_unicode,
# char used to escape delim & quote as-needed:
'escape': directives.single_char_or_unicode,}
class DocutilsDialect(csv.Dialect):
"""CSV dialect for `csv_table` directive."""
delimiter = ','
quotechar = '"'
doublequote = True
skipinitialspace = True
strict = True
lineterminator = '\n'
quoting = csv.QUOTE_MINIMAL
def __init__(self, options):
if 'delim' in options:
self.delimiter = CSVTable.encode_for_csv(options['delim'])
if 'keepspace' in options:
self.skipinitialspace = False
if 'quote' in options:
self.quotechar = CSVTable.encode_for_csv(options['quote'])
if 'escape' in options:
self.doublequote = False
self.escapechar = CSVTable.encode_for_csv(options['escape'])
csv.Dialect.__init__(self)
class HeaderDialect(csv.Dialect):
"""CSV dialect to use for the "header" option data."""
delimiter = ','
quotechar = '"'
escapechar = '\\'
doublequote = False
skipinitialspace = True
strict = True
lineterminator = '\n'
quoting = csv.QUOTE_MINIMAL
def check_requirements(self):
pass
def run(self):
try:
if (not self.state.document.settings.file_insertion_enabled
and ('file' in self.options
or 'url' in self.options)):
warning = self.state_machine.reporter.warning(
'File and URL access deactivated; ignoring "%s" '
'directive.' % self.name, nodes.literal_block(
self.block_text, self.block_text), line=self.lineno)
return [warning]
self.check_requirements()
title, messages = self.make_title()
csv_data, source = self.get_csv_data()
table_head, max_header_cols = self.process_header_option()
rows, max_cols = self.parse_csv_data_into_rows(
csv_data, self.DocutilsDialect(self.options), source)
max_cols = max(max_cols, max_header_cols)
header_rows = self.options.get('header-rows', 0)
stub_columns = self.options.get('stub-columns', 0)
self.check_table_dimensions(rows, header_rows, stub_columns)
table_head.extend(rows[:header_rows])
table_body = rows[header_rows:]
col_widths = self.get_column_widths(max_cols)
self.extend_short_rows_with_empty_cells(max_cols,
(table_head, table_body))
except SystemMessagePropagation as detail:
return [detail.args[0]]
except csv.Error as detail:
message = str(detail)
if sys.version_info < (3,) and '1-character string' in message:
message += '\nwith Python 2.x this must be an ASCII character.'
error = self.state_machine.reporter.error(
'Error with CSV data in "%s" directive:\n%s'
% (self.name, message), nodes.literal_block(
self.block_text, self.block_text), line=self.lineno)
return [error]
table = (col_widths, table_head, table_body)
table_node = self.state.build_table(table, self.content_offset,
stub_columns)
table_node['classes'] += self.options.get('class', [])
self.add_name(table_node)
if title:
table_node.insert(0, title)
return [table_node] + messages
def get_csv_data(self):
"""
Get CSV data from the directive content, from an external
file, or from a URL reference.
"""
encoding = self.options.get(
'encoding', self.state.document.settings.input_encoding)
error_handler = self.state.document.settings.input_encoding_error_handler
if self.content:
# CSV data is from directive content.
if 'file' in self.options or 'url' in self.options:
error = self.state_machine.reporter.error(
'"%s" directive may not both specify an external file and'
' have content.' % self.name, nodes.literal_block(
self.block_text, self.block_text), line=self.lineno)
raise SystemMessagePropagation(error)
source = self.content.source(0)
csv_data = self.content
elif 'file' in self.options:
# CSV data is from an external file.
if 'url' in self.options:
error = self.state_machine.reporter.error(
'The "file" and "url" options may not be simultaneously'
' specified for the "%s" directive.' % self.name,
nodes.literal_block(self.block_text, self.block_text),
line=self.lineno)
raise SystemMessagePropagation(error)
source_dir = os.path.dirname(
os.path.abspath(self.state.document.current_source))
source = os.path.normpath(os.path.join(source_dir,
self.options['file']))
source = utils.relative_path(None, source)
try:
self.state.document.settings.record_dependencies.add(source)
csv_file = io.FileInput(source_path=source,
encoding=encoding,
error_handler=error_handler)
csv_data = csv_file.read().splitlines()
except IOError as error:
severe = self.state_machine.reporter.severe(
'Problems with "%s" directive path:\n%s.'
% (self.name, SafeString(error)),
nodes.literal_block(self.block_text, self.block_text),
line=self.lineno)
raise SystemMessagePropagation(severe)
elif 'url' in self.options:
# CSV data is from a URL.
# Do not import urllib2 at the top of the module because
# it may fail due to broken SSL dependencies, and it takes
# about 0.15 seconds to load.
import urllib.request, urllib.error, urllib.parse
source = self.options['url']
try:
csv_text = urllib.request.urlopen(source).read()
except (urllib.error.URLError, IOError, OSError, ValueError) as error:
severe = self.state_machine.reporter.severe(
'Problems with "%s" directive URL "%s":\n%s.'
% (self.name, self.options['url'], SafeString(error)),
nodes.literal_block(self.block_text, self.block_text),
line=self.lineno)
raise SystemMessagePropagation(severe)
csv_file = io.StringInput(
source=csv_text, source_path=source, encoding=encoding,
error_handler=(self.state.document.settings.\
input_encoding_error_handler))
csv_data = csv_file.read().splitlines()
else:
error = self.state_machine.reporter.warning(
'The "%s" directive requires content; none supplied.'
% self.name, nodes.literal_block(
self.block_text, self.block_text), line=self.lineno)
raise SystemMessagePropagation(error)
return csv_data, source
if sys.version_info < (3,):
# 2.x csv module doesn't do Unicode
def decode_from_csv(s):
return s.decode('utf-8')
def encode_for_csv(s):
return s.encode('utf-8')
else:
def decode_from_csv(s):
return s
def encode_for_csv(s):
return s
decode_from_csv = staticmethod(decode_from_csv)
encode_for_csv = staticmethod(encode_for_csv)
def parse_csv_data_into_rows(self, csv_data, dialect, source):
# csv.py doesn't do Unicode; encode temporarily as UTF-8
csv_reader = csv.reader([self.encode_for_csv(line + '\n')
for line in csv_data],
dialect=dialect)
rows = []
max_cols = 0
for row in csv_reader:
row_data = []
for cell in row:
# decode UTF-8 back to Unicode
cell_text = self.decode_from_csv(cell)
cell_data = (0, 0, 0, statemachine.StringList(
cell_text.splitlines(), source=source))
row_data.append(cell_data)
rows.append(row_data)
max_cols = max(max_cols, len(row))
return rows, max_cols
class ListTable(Table):
"""
Implement tables whose data is encoded as a uniform two-level bullet list.
For further ideas, see
http://docutils.sf.net/docs/dev/rst/alternatives.html#list-driven-tables
"""
option_spec = {'header-rows': directives.nonnegative_int,
'stub-columns': directives.nonnegative_int,
'widths': directives.positive_int_list,
'class': directives.class_option,
'name': directives.unchanged}
def run(self):
if not self.content:
error = self.state_machine.reporter.error(
'The "%s" directive is empty; content required.' % self.name,
nodes.literal_block(self.block_text, self.block_text),
line=self.lineno)
return [error]
title, messages = self.make_title()
node = nodes.Element() # anonymous container for parsing
self.state.nested_parse(self.content, self.content_offset, node)
try:
num_cols, col_widths = self.check_list_content(node)
table_data = [[item.children for item in row_list[0]]
for row_list in node[0]]
header_rows = self.options.get('header-rows', 0)
stub_columns = self.options.get('stub-columns', 0)
self.check_table_dimensions(table_data, header_rows, stub_columns)
except SystemMessagePropagation as detail:
return [detail.args[0]]
table_node = self.build_table_from_list(table_data, col_widths,
header_rows, stub_columns)
table_node['classes'] += self.options.get('class', [])
self.add_name(table_node)
if title:
table_node.insert(0, title)
return [table_node] + messages
def check_list_content(self, node):
if len(node) != 1 or not isinstance(node[0], nodes.bullet_list):
error = self.state_machine.reporter.error(
'Error parsing content block for the "%s" directive: '
'exactly one bullet list expected.' % self.name,
nodes.literal_block(self.block_text, self.block_text),
line=self.lineno)
raise SystemMessagePropagation(error)
list_node = node[0]
# Check for a uniform two-level bullet list:
for item_index in range(len(list_node)):
item = list_node[item_index]
if len(item) != 1 or not isinstance(item[0], nodes.bullet_list):
error = self.state_machine.reporter.error(
'Error parsing content block for the "%s" directive: '
'two-level bullet list expected, but row %s does not '
'contain a second-level bullet list.'
% (self.name, item_index + 1), nodes.literal_block(
self.block_text, self.block_text), line=self.lineno)
raise SystemMessagePropagation(error)
elif item_index:
# ATTN pychecker users: num_cols is guaranteed to be set in the
# "else" clause below for item_index==0, before this branch is
# triggered.
if len(item[0]) != num_cols:
error = self.state_machine.reporter.error(
'Error parsing content block for the "%s" directive: '
'uniform two-level bullet list expected, but row %s '
'does not contain the same number of items as row 1 '
'(%s vs %s).'
% (self.name, item_index + 1, len(item[0]), num_cols),
nodes.literal_block(self.block_text, self.block_text),
line=self.lineno)
raise SystemMessagePropagation(error)
else:
num_cols = len(item[0])
col_widths = self.get_column_widths(num_cols)
return num_cols, col_widths
def build_table_from_list(self, table_data, col_widths, header_rows, stub_columns):
table = nodes.table()
tgroup = nodes.tgroup(cols=len(col_widths))
table += tgroup
for col_width in col_widths:
colspec = nodes.colspec(colwidth=col_width)
if stub_columns:
colspec.attributes['stub'] = 1
stub_columns -= 1
tgroup += colspec
rows = []
for row in table_data:
row_node = nodes.row()
for cell in row:
entry = nodes.entry()
entry += cell
row_node += entry
rows.append(row_node)
if header_rows:
thead = nodes.thead()
thead.extend(rows[:header_rows])
tgroup += thead
tbody = nodes.tbody()
tbody.extend(rows[header_rows:])
tgroup += tbody
return table
| {
"repo_name": "GunoH/intellij-community",
"path": "python/helpers/py3only/docutils/parsers/rst/directives/tables.py",
"copies": "44",
"size": "20399",
"license": "apache-2.0",
"hash": 8683139622190201000,
"line_mean": 43.636761488,
"line_max": 87,
"alpha_frac": 0.5500759841,
"autogenerated": false,
"ratio": 4.2810073452256034,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0010516376863683318,
"num_lines": 457
} |
"""
Directives for table elements.
"""
__docformat__ = 'reStructuredText'
import sys
import os.path
import csv
from docutils import io, nodes, statemachine, utils
from docutils.utils.error_reporting import SafeString
from docutils.utils import SystemMessagePropagation
from docutils.parsers.rst import Directive
from docutils.parsers.rst import directives
class Table(Directive):
"""
Generic table base class.
"""
optional_arguments = 1
final_argument_whitespace = True
option_spec = {'class': directives.class_option,
'name': directives.unchanged}
has_content = True
def make_title(self):
if self.arguments:
title_text = self.arguments[0]
text_nodes, messages = self.state.inline_text(title_text,
self.lineno)
title = nodes.title(title_text, '', *text_nodes)
else:
title = None
messages = []
return title, messages
def process_header_option(self):
source = self.state_machine.get_source(self.lineno - 1)
table_head = []
max_header_cols = 0
if 'header' in self.options: # separate table header in option
rows, max_header_cols = self.parse_csv_data_into_rows(
self.options['header'].split('\n'), self.HeaderDialect(),
source)
table_head.extend(rows)
return table_head, max_header_cols
def check_table_dimensions(self, rows, header_rows, stub_columns):
if len(rows) < header_rows:
error = self.state_machine.reporter.error(
'%s header row(s) specified but only %s row(s) of data '
'supplied ("%s" directive).'
% (header_rows, len(rows), self.name), nodes.literal_block(
self.block_text, self.block_text), line=self.lineno)
raise SystemMessagePropagation(error)
if len(rows) == header_rows > 0:
error = self.state_machine.reporter.error(
'Insufficient data supplied (%s row(s)); no data remaining '
'for table body, required by "%s" directive.'
% (len(rows), self.name), nodes.literal_block(
self.block_text, self.block_text), line=self.lineno)
raise SystemMessagePropagation(error)
for row in rows:
if len(row) < stub_columns:
error = self.state_machine.reporter.error(
'%s stub column(s) specified but only %s columns(s) of '
'data supplied ("%s" directive).' %
(stub_columns, len(row), self.name), nodes.literal_block(
self.block_text, self.block_text), line=self.lineno)
raise SystemMessagePropagation(error)
if len(row) == stub_columns > 0:
error = self.state_machine.reporter.error(
'Insufficient data supplied (%s columns(s)); no data remaining '
'for table body, required by "%s" directive.'
% (len(row), self.name), nodes.literal_block(
self.block_text, self.block_text), line=self.lineno)
raise SystemMessagePropagation(error)
def get_column_widths(self, max_cols):
if 'widths' in self.options:
col_widths = self.options['widths']
if len(col_widths) != max_cols:
error = self.state_machine.reporter.error(
'"%s" widths do not match the number of columns in table '
'(%s).' % (self.name, max_cols), nodes.literal_block(
self.block_text, self.block_text), line=self.lineno)
raise SystemMessagePropagation(error)
elif max_cols:
col_widths = [100 // max_cols] * max_cols
else:
error = self.state_machine.reporter.error(
'No table data detected in CSV file.', nodes.literal_block(
self.block_text, self.block_text), line=self.lineno)
raise SystemMessagePropagation(error)
return col_widths
def extend_short_rows_with_empty_cells(self, columns, parts):
for part in parts:
for row in part:
if len(row) < columns:
row.extend([(0, 0, 0, [])] * (columns - len(row)))
class RSTTable(Table):
def run(self):
if not self.content:
warning = self.state_machine.reporter.warning(
'Content block expected for the "%s" directive; none found.'
% self.name, nodes.literal_block(
self.block_text, self.block_text), line=self.lineno)
return [warning]
title, messages = self.make_title()
node = nodes.Element() # anonymous container for parsing
self.state.nested_parse(self.content, self.content_offset, node)
if len(node) != 1 or not isinstance(node[0], nodes.table):
error = self.state_machine.reporter.error(
'Error parsing content block for the "%s" directive: exactly '
'one table expected.' % self.name, nodes.literal_block(
self.block_text, self.block_text), line=self.lineno)
return [error]
table_node = node[0]
table_node['classes'] += self.options.get('class', [])
self.add_name(table_node)
if title:
table_node.insert(0, title)
return [table_node] + messages
class CSVTable(Table):
option_spec = {'header-rows': directives.nonnegative_int,
'stub-columns': directives.nonnegative_int,
'header': directives.unchanged,
'widths': directives.positive_int_list,
'file': directives.path,
'url': directives.uri,
'encoding': directives.encoding,
'class': directives.class_option,
'name': directives.unchanged,
# field delimiter char
'delim': directives.single_char_or_whitespace_or_unicode,
# treat whitespace after delimiter as significant
'keepspace': directives.flag,
# text field quote/unquote char:
'quote': directives.single_char_or_unicode,
# char used to escape delim & quote as-needed:
'escape': directives.single_char_or_unicode,}
class DocutilsDialect(csv.Dialect):
"""CSV dialect for `csv_table` directive."""
delimiter = ','
quotechar = '"'
doublequote = True
skipinitialspace = True
strict = True
lineterminator = '\n'
quoting = csv.QUOTE_MINIMAL
def __init__(self, options):
if 'delim' in options:
self.delimiter = CSVTable.encode_for_csv(options['delim'])
if 'keepspace' in options:
self.skipinitialspace = False
if 'quote' in options:
self.quotechar = CSVTable.encode_for_csv(options['quote'])
if 'escape' in options:
self.doublequote = False
self.escapechar = CSVTable.encode_for_csv(options['escape'])
csv.Dialect.__init__(self)
class HeaderDialect(csv.Dialect):
"""CSV dialect to use for the "header" option data."""
delimiter = ','
quotechar = '"'
escapechar = '\\'
doublequote = False
skipinitialspace = True
strict = True
lineterminator = '\n'
quoting = csv.QUOTE_MINIMAL
def check_requirements(self):
pass
def run(self):
try:
if (not self.state.document.settings.file_insertion_enabled
and ('file' in self.options
or 'url' in self.options)):
warning = self.state_machine.reporter.warning(
'File and URL access deactivated; ignoring "%s" '
'directive.' % self.name, nodes.literal_block(
self.block_text, self.block_text), line=self.lineno)
return [warning]
self.check_requirements()
title, messages = self.make_title()
csv_data, source = self.get_csv_data()
table_head, max_header_cols = self.process_header_option()
rows, max_cols = self.parse_csv_data_into_rows(
csv_data, self.DocutilsDialect(self.options), source)
max_cols = max(max_cols, max_header_cols)
header_rows = self.options.get('header-rows', 0)
stub_columns = self.options.get('stub-columns', 0)
self.check_table_dimensions(rows, header_rows, stub_columns)
table_head.extend(rows[:header_rows])
table_body = rows[header_rows:]
col_widths = self.get_column_widths(max_cols)
self.extend_short_rows_with_empty_cells(max_cols,
(table_head, table_body))
except SystemMessagePropagation, detail:
return [detail.args[0]]
except csv.Error, detail:
message = str(detail)
if sys.version_info < (3,) and '1-character string' in message:
message += '\nwith Python 2.x this must be an ASCII character.'
error = self.state_machine.reporter.error(
'Error with CSV data in "%s" directive:\n%s'
% (self.name, message), nodes.literal_block(
self.block_text, self.block_text), line=self.lineno)
return [error]
table = (col_widths, table_head, table_body)
table_node = self.state.build_table(table, self.content_offset,
stub_columns)
table_node['classes'] += self.options.get('class', [])
self.add_name(table_node)
if title:
table_node.insert(0, title)
return [table_node] + messages
def get_csv_data(self):
"""
Get CSV data from the directive content, from an external
file, or from a URL reference.
"""
encoding = self.options.get(
'encoding', self.state.document.settings.input_encoding)
error_handler = self.state.document.settings.input_encoding_error_handler
if self.content:
# CSV data is from directive content.
if 'file' in self.options or 'url' in self.options:
error = self.state_machine.reporter.error(
'"%s" directive may not both specify an external file and'
' have content.' % self.name, nodes.literal_block(
self.block_text, self.block_text), line=self.lineno)
raise SystemMessagePropagation(error)
source = self.content.source(0)
csv_data = self.content
elif 'file' in self.options:
# CSV data is from an external file.
if 'url' in self.options:
error = self.state_machine.reporter.error(
'The "file" and "url" options may not be simultaneously'
' specified for the "%s" directive.' % self.name,
nodes.literal_block(self.block_text, self.block_text),
line=self.lineno)
raise SystemMessagePropagation(error)
source_dir = os.path.dirname(
os.path.abspath(self.state.document.current_source))
source = os.path.normpath(os.path.join(source_dir,
self.options['file']))
source = utils.relative_path(None, source)
try:
self.state.document.settings.record_dependencies.add(source)
csv_file = io.FileInput(source_path=source,
encoding=encoding,
error_handler=error_handler)
csv_data = csv_file.read().splitlines()
except IOError, error:
severe = self.state_machine.reporter.severe(
u'Problems with "%s" directive path:\n%s.'
% (self.name, SafeString(error)),
nodes.literal_block(self.block_text, self.block_text),
line=self.lineno)
raise SystemMessagePropagation(severe)
elif 'url' in self.options:
# CSV data is from a URL.
# Do not import urllib2 at the top of the module because
# it may fail due to broken SSL dependencies, and it takes
# about 0.15 seconds to load.
import urllib2
source = self.options['url']
try:
csv_text = urllib2.urlopen(source).read()
except (urllib2.URLError, IOError, OSError, ValueError), error:
severe = self.state_machine.reporter.severe(
'Problems with "%s" directive URL "%s":\n%s.'
% (self.name, self.options['url'], SafeString(error)),
nodes.literal_block(self.block_text, self.block_text),
line=self.lineno)
raise SystemMessagePropagation(severe)
csv_file = io.StringInput(
source=csv_text, source_path=source, encoding=encoding,
error_handler=(self.state.document.settings.\
input_encoding_error_handler))
csv_data = csv_file.read().splitlines()
else:
error = self.state_machine.reporter.warning(
'The "%s" directive requires content; none supplied.'
% self.name, nodes.literal_block(
self.block_text, self.block_text), line=self.lineno)
raise SystemMessagePropagation(error)
return csv_data, source
if sys.version_info < (3,):
# 2.x csv module doesn't do Unicode
def decode_from_csv(s):
return s.decode('utf-8')
def encode_for_csv(s):
return s.encode('utf-8')
else:
def decode_from_csv(s):
return s
def encode_for_csv(s):
return s
decode_from_csv = staticmethod(decode_from_csv)
encode_for_csv = staticmethod(encode_for_csv)
def parse_csv_data_into_rows(self, csv_data, dialect, source):
# csv.py doesn't do Unicode; encode temporarily as UTF-8
csv_reader = csv.reader([self.encode_for_csv(line + '\n')
for line in csv_data],
dialect=dialect)
rows = []
max_cols = 0
for row in csv_reader:
row_data = []
for cell in row:
# decode UTF-8 back to Unicode
cell_text = self.decode_from_csv(cell)
cell_data = (0, 0, 0, statemachine.StringList(
cell_text.splitlines(), source=source))
row_data.append(cell_data)
rows.append(row_data)
max_cols = max(max_cols, len(row))
return rows, max_cols
class ListTable(Table):
"""
Implement tables whose data is encoded as a uniform two-level bullet list.
For further ideas, see
http://docutils.sf.net/docs/dev/rst/alternatives.html#list-driven-tables
"""
option_spec = {'header-rows': directives.nonnegative_int,
'stub-columns': directives.nonnegative_int,
'widths': directives.positive_int_list,
'class': directives.class_option,
'name': directives.unchanged}
def run(self):
if not self.content:
error = self.state_machine.reporter.error(
'The "%s" directive is empty; content required.' % self.name,
nodes.literal_block(self.block_text, self.block_text),
line=self.lineno)
return [error]
title, messages = self.make_title()
node = nodes.Element() # anonymous container for parsing
self.state.nested_parse(self.content, self.content_offset, node)
try:
num_cols, col_widths = self.check_list_content(node)
table_data = [[item.children for item in row_list[0]]
for row_list in node[0]]
header_rows = self.options.get('header-rows', 0)
stub_columns = self.options.get('stub-columns', 0)
self.check_table_dimensions(table_data, header_rows, stub_columns)
except SystemMessagePropagation, detail:
return [detail.args[0]]
table_node = self.build_table_from_list(table_data, col_widths,
header_rows, stub_columns)
table_node['classes'] += self.options.get('class', [])
self.add_name(table_node)
if title:
table_node.insert(0, title)
return [table_node] + messages
def check_list_content(self, node):
if len(node) != 1 or not isinstance(node[0], nodes.bullet_list):
error = self.state_machine.reporter.error(
'Error parsing content block for the "%s" directive: '
'exactly one bullet list expected.' % self.name,
nodes.literal_block(self.block_text, self.block_text),
line=self.lineno)
raise SystemMessagePropagation(error)
list_node = node[0]
# Check for a uniform two-level bullet list:
for item_index in range(len(list_node)):
item = list_node[item_index]
if len(item) != 1 or not isinstance(item[0], nodes.bullet_list):
error = self.state_machine.reporter.error(
'Error parsing content block for the "%s" directive: '
'two-level bullet list expected, but row %s does not '
'contain a second-level bullet list.'
% (self.name, item_index + 1), nodes.literal_block(
self.block_text, self.block_text), line=self.lineno)
raise SystemMessagePropagation(error)
elif item_index:
# ATTN pychecker users: num_cols is guaranteed to be set in the
# "else" clause below for item_index==0, before this branch is
# triggered.
if len(item[0]) != num_cols:
error = self.state_machine.reporter.error(
'Error parsing content block for the "%s" directive: '
'uniform two-level bullet list expected, but row %s '
'does not contain the same number of items as row 1 '
'(%s vs %s).'
% (self.name, item_index + 1, len(item[0]), num_cols),
nodes.literal_block(self.block_text, self.block_text),
line=self.lineno)
raise SystemMessagePropagation(error)
else:
num_cols = len(item[0])
col_widths = self.get_column_widths(num_cols)
return num_cols, col_widths
def build_table_from_list(self, table_data, col_widths, header_rows, stub_columns):
table = nodes.table()
tgroup = nodes.tgroup(cols=len(col_widths))
table += tgroup
for col_width in col_widths:
colspec = nodes.colspec(colwidth=col_width)
if stub_columns:
colspec.attributes['stub'] = 1
stub_columns -= 1
tgroup += colspec
rows = []
for row in table_data:
row_node = nodes.row()
for cell in row:
entry = nodes.entry()
entry += cell
row_node += entry
rows.append(row_node)
if header_rows:
thead = nodes.thead()
thead.extend(rows[:header_rows])
tgroup += thead
tbody = nodes.tbody()
tbody.extend(rows[header_rows:])
tgroup += tbody
return table
| {
"repo_name": "gauribhoite/personfinder",
"path": "env/site-packages/docutils/parsers/rst/directives/tables.py",
"copies": "86",
"size": "20344",
"license": "apache-2.0",
"hash": -8469382722651313000,
"line_mean": 43.4192139738,
"line_max": 87,
"alpha_frac": 0.549252851,
"autogenerated": false,
"ratio": 4.2793437105595284,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0009878192088235342,
"num_lines": 458
} |
# $Id: tcp.py 42 2007-08-02 22:38:47Z jon.oberheide $
# -*- coding: utf-8 -*-
"""Transmission Control Protocol."""
from __future__ import print_function
from __future__ import absolute_import
from . import dpkt
from .decorators import deprecated
from .compat import compat_ord
# TCP control flags
TH_FIN = 0x01 # end of data
TH_SYN = 0x02 # synchronize sequence numbers
TH_RST = 0x04 # reset connection
TH_PUSH = 0x08 # push
TH_ACK = 0x10 # acknowledgment number set
TH_URG = 0x20 # urgent pointer set
TH_ECE = 0x40 # ECN echo, RFC 3168
TH_CWR = 0x80 # congestion window reduced
TCP_PORT_MAX = 65535 # maximum port
TCP_WIN_MAX = 65535 # maximum (unscaled) window
class TCP(dpkt.Packet):
"""Transmission Control Protocol.
TODO: Longer class information....
Attributes:
__hdr__: Header fields of TCP.
TODO.
"""
__hdr__ = (
('sport', 'H', 0xdead),
('dport', 'H', 0),
('seq', 'I', 0xdeadbeef),
('ack', 'I', 0),
('_off', 'B', ((5 << 4) | 0)),
('flags', 'B', TH_SYN),
('win', 'H', TCP_WIN_MAX),
('sum', 'H', 0),
('urp', 'H', 0)
)
opts = b''
@property
def off(self):
return self._off >> 4
@off.setter
def off(self, off):
self._off = (off << 4) | (self._off & 0xf)
def __len__(self):
return self.__hdr_len__ + len(self.opts) + len(self.data)
def __bytes__(self):
return self.pack_hdr() + bytes(self.opts) + bytes(self.data)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
ol = ((self._off >> 4) << 2) - self.__hdr_len__
if ol < 0:
raise dpkt.UnpackError('invalid header length')
self.opts = buf[self.__hdr_len__:self.__hdr_len__ + ol]
self.data = buf[self.__hdr_len__ + ol:]
# Options (opt_type) - http://www.iana.org/assignments/tcp-parameters
TCP_OPT_EOL = 0 # end of option list
TCP_OPT_NOP = 1 # no operation
TCP_OPT_MSS = 2 # maximum segment size
TCP_OPT_WSCALE = 3 # window scale factor, RFC 1072
TCP_OPT_SACKOK = 4 # SACK permitted, RFC 2018
TCP_OPT_SACK = 5 # SACK, RFC 2018
TCP_OPT_ECHO = 6 # echo (obsolete), RFC 1072
TCP_OPT_ECHOREPLY = 7 # echo reply (obsolete), RFC 1072
TCP_OPT_TIMESTAMP = 8 # timestamp, RFC 1323
TCP_OPT_POCONN = 9 # partial order conn, RFC 1693
TCP_OPT_POSVC = 10 # partial order service, RFC 1693
TCP_OPT_CC = 11 # connection count, RFC 1644
TCP_OPT_CCNEW = 12 # CC.NEW, RFC 1644
TCP_OPT_CCECHO = 13 # CC.ECHO, RFC 1644
TCP_OPT_ALTSUM = 14 # alt checksum request, RFC 1146
TCP_OPT_ALTSUMDATA = 15 # alt checksum data, RFC 1146
TCP_OPT_SKEETER = 16 # Skeeter
TCP_OPT_BUBBA = 17 # Bubba
TCP_OPT_TRAILSUM = 18 # trailer checksum
TCP_OPT_MD5 = 19 # MD5 signature, RFC 2385
TCP_OPT_SCPS = 20 # SCPS capabilities
TCP_OPT_SNACK = 21 # selective negative acks
TCP_OPT_REC = 22 # record boundaries
TCP_OPT_CORRUPT = 23 # corruption experienced
TCP_OPT_SNAP = 24 # SNAP
TCP_OPT_TCPCOMP = 26 # TCP compression filter
TCP_OPT_MAX = 27
def parse_opts(buf):
"""Parse TCP option buffer into a list of (option, data) tuples."""
opts = []
while buf:
o = compat_ord(buf[0])
if o > TCP_OPT_NOP:
try:
# advance buffer at least 2 bytes = 1 type + 1 length
l = max(2, compat_ord(buf[1]))
d, buf = buf[2:l], buf[l:]
except (IndexError, ValueError):
# print 'bad option', repr(str(buf))
opts.append(None) # XXX
break
else:
# options 0 and 1 are not followed by length byte
d, buf = b'', buf[1:]
opts.append((o, d))
return opts
def test_parse_opts():
# normal scenarios
buf = b'\x02\x04\x23\x00\x01\x01\x04\x02'
opts = parse_opts(buf)
assert opts == [
(TCP_OPT_MSS, b'\x23\x00'),
(TCP_OPT_NOP, b''),
(TCP_OPT_NOP, b''),
(TCP_OPT_SACKOK, b'')
]
buf = b'\x01\x01\x05\x0a\x37\xf8\x19\x70\x37\xf8\x29\x78'
opts = parse_opts(buf)
assert opts == [
(TCP_OPT_NOP, b''),
(TCP_OPT_NOP, b''),
(TCP_OPT_SACK, b'\x37\xf8\x19\x70\x37\xf8\x29\x78')
]
# test a zero-length option
buf = b'\x02\x00\x01'
opts = parse_opts(buf)
assert opts == [
(TCP_OPT_MSS, b''),
(TCP_OPT_NOP, b'')
]
# test a one-byte malformed option
buf = b'\xff'
opts = parse_opts(buf)
assert opts == [None]
def test_offset():
tcpheader = TCP(b'\x01\xbb\xc0\xd7\xb6\x56\xa8\xb9\xd1\xac\xaa\xb1\x50\x18\x40\x00\x56\xf8\x00\x00')
assert tcpheader.off == 5
# test setting header offset
tcpheader.off = 8
assert bytes(tcpheader) == b'\x01\xbb\xc0\xd7\xb6\x56\xa8\xb9\xd1\xac\xaa\xb1\x80\x18\x40\x00\x56\xf8\x00\x00'
if __name__ == '__main__':
# Runs all the test associated with this class/file
test_parse_opts()
test_offset()
print('Tests Successful...')
| {
"repo_name": "dimagol/trex-core",
"path": "scripts/external_libs/dpkt-1.9.1/dpkt/tcp.py",
"copies": "3",
"size": "4972",
"license": "apache-2.0",
"hash": -8298814106276147000,
"line_mean": 29.1333333333,
"line_max": 114,
"alpha_frac": 0.5808527755,
"autogenerated": false,
"ratio": 2.854190585533869,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9930331831855066,
"avg_score": 0.0009423058357605641,
"num_lines": 165
} |
# $Id: tcp.py 42 2007-08-02 22:38:47Z jon.oberheide $
# -*- coding: utf-8 -*-
"""Transmission Control Protocol."""
import dpkt
from decorators import deprecated
# TCP control flags
TH_FIN = 0x01 # end of data
TH_SYN = 0x02 # synchronize sequence numbers
TH_RST = 0x04 # reset connection
TH_PUSH = 0x08 # push
TH_ACK = 0x10 # acknowledgment number set
TH_URG = 0x20 # urgent pointer set
TH_ECE = 0x40 # ECN echo, RFC 3168
TH_CWR = 0x80 # congestion window reduced
TCP_PORT_MAX = 65535 # maximum port
TCP_WIN_MAX = 65535 # maximum (unscaled) window
class TCP(dpkt.Packet):
__hdr__ = (
('sport', 'H', 0xdead),
('dport', 'H', 0),
('seq', 'I', 0xdeadbeefL),
('ack', 'I', 0),
('_off', 'B', ((5 << 4) | 0)),
('flags', 'B', TH_SYN),
('win', 'H', TCP_WIN_MAX),
('sum', 'H', 0),
('urp', 'H', 0)
)
opts = ''
@property
def off(self):
return self._off >> 4
@off.setter
def off(self, off):
self._off = (off << 4) | (self._off & 0xf)
# Deprecated methods, will be removed in the future
# =================================================
@deprecated('off')
def _get_off(self): return self.off
@deprecated('off')
def _set_off(self, off): self.off = off
# =================================================
def __len__(self):
return self.__hdr_len__ + len(self.opts) + len(self.data)
def __str__(self):
return self.pack_hdr() + str(self.opts) + str(self.data)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
ol = ((self._off >> 4) << 2) - self.__hdr_len__
if ol < 0:
raise dpkt.UnpackError, 'invalid header length'
self.opts = buf[self.__hdr_len__:self.__hdr_len__ + ol]
self.data = buf[self.__hdr_len__ + ol:]
# Options (opt_type) - http://www.iana.org/assignments/tcp-parameters
TCP_OPT_EOL = 0 # end of option list
TCP_OPT_NOP = 1 # no operation
TCP_OPT_MSS = 2 # maximum segment size
TCP_OPT_WSCALE = 3 # window scale factor, RFC 1072
TCP_OPT_SACKOK = 4 # SACK permitted, RFC 2018
TCP_OPT_SACK = 5 # SACK, RFC 2018
TCP_OPT_ECHO = 6 # echo (obsolete), RFC 1072
TCP_OPT_ECHOREPLY = 7 # echo reply (obsolete), RFC 1072
TCP_OPT_TIMESTAMP = 8 # timestamp, RFC 1323
TCP_OPT_POCONN = 9 # partial order conn, RFC 1693
TCP_OPT_POSVC = 10 # partial order service, RFC 1693
TCP_OPT_CC = 11 # connection count, RFC 1644
TCP_OPT_CCNEW = 12 # CC.NEW, RFC 1644
TCP_OPT_CCECHO = 13 # CC.ECHO, RFC 1644
TCP_OPT_ALTSUM = 14 # alt checksum request, RFC 1146
TCP_OPT_ALTSUMDATA = 15 # alt checksum data, RFC 1146
TCP_OPT_SKEETER = 16 # Skeeter
TCP_OPT_BUBBA = 17 # Bubba
TCP_OPT_TRAILSUM = 18 # trailer checksum
TCP_OPT_MD5 = 19 # MD5 signature, RFC 2385
TCP_OPT_SCPS = 20 # SCPS capabilities
TCP_OPT_SNACK = 21 # selective negative acks
TCP_OPT_REC = 22 # record boundaries
TCP_OPT_CORRUPT = 23 # corruption experienced
TCP_OPT_SNAP = 24 # SNAP
TCP_OPT_TCPCOMP = 26 # TCP compression filter
TCP_OPT_MAX = 27
def parse_opts(buf):
"""Parse TCP option buffer into a list of (option, data) tuples."""
opts = []
while buf:
o = ord(buf[0])
if o > TCP_OPT_NOP:
try:
# advance buffer at least 2 bytes = 1 type + 1 length
l = max(2, ord(buf[1]))
d, buf = buf[2:l], buf[l:]
except (IndexError, ValueError):
# print 'bad option', repr(str(buf))
opts.append(None) # XXX
break
else:
# options 0 and 1 are not followed by length byte
d, buf = '', buf[1:]
opts.append((o, d))
return opts
def test_parse_opts():
# normal scenarios
buf = '\x02\x04\x23\x00\x01\x01\x04\x02'
opts = parse_opts(buf)
assert opts == [
(TCP_OPT_MSS, '\x23\x00'),
(TCP_OPT_NOP, ''),
(TCP_OPT_NOP, ''),
(TCP_OPT_SACKOK, '')
]
buf = '\x01\x01\x05\x0a\x37\xf8\x19\x70\x37\xf8\x29\x78'
opts = parse_opts(buf)
assert opts == [
(TCP_OPT_NOP, ''),
(TCP_OPT_NOP, ''),
(TCP_OPT_SACK, '\x37\xf8\x19\x70\x37\xf8\x29\x78')
]
# test a zero-length option
buf = '\x02\x00\x01'
opts = parse_opts(buf)
assert opts == [
(TCP_OPT_MSS, ''),
(TCP_OPT_NOP, '')
]
# test a one-byte malformed option
buf = '\xff'
opts = parse_opts(buf)
assert opts == [None]
# @profile
def test_packet():
# import tcp
# import ip
# import ethernet
srcmac = '\x0c\xdaAw{\x06'
dstmac = '\xac\x87\xa3<}\x88'
srcip = 'z\xe1\xe3\xa8'
dstip = '\nG1\xe4'
sport = 80
dport = 8080
for i in xrange(10000):
t = TCP(sport=sport,dport=dport,flags=0x02,seq=0,ack=0,win=0x2000)
# i = ip.IP(dst=dnet.ip_aton(dstip),
# src=dnet.ip_aton(Localip),
# p=ip.IP_PROTO_TCP,
# data=t)
# i.len = i.__len__()
#
# e = ethernet.Ethernet(
# dst=dstmac,
# src=srcmac,
# data=i)
if __name__ == '__main__':
# Runs all the test associated with this class/file
# test_parse_opts()
# print 'Tests Successful...'
test_packet() | {
"repo_name": "yangbh/dpkt",
"path": "dpkt/tcp.py",
"copies": "1",
"size": "4732",
"license": "bsd-3-clause",
"hash": 7463971067034633000,
"line_mean": 25.1491712707,
"line_max": 69,
"alpha_frac": 0.6090448014,
"autogenerated": false,
"ratio": 2.482686253934942,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8408633542187064,
"avg_score": 0.0366195026295756,
"num_lines": 181
} |
# $Id: tcp.py 42 2007-08-02 22:38:47Z jon.oberheide $
"""Transmission Control Protocol."""
import dpkt
# TCP control flags
TH_FIN = 0x01 # end of data
TH_SYN = 0x02 # synchronize sequence numbers
TH_RST = 0x04 # reset connection
TH_PUSH = 0x08 # push
TH_ACK = 0x10 # acknowledgment number set
TH_URG = 0x20 # urgent pointer set
TH_ECE = 0x40 # ECN echo, RFC 3168
TH_CWR = 0x80 # congestion window reduced
TCP_PORT_MAX = 65535 # maximum port
TCP_WIN_MAX = 65535 # maximum (unscaled) window
class TCP(dpkt.Packet):
__hdr__ = (
('sport', 'H', 0xdead),
('dport', 'H', 0),
('seq', 'I', 0xdeadbeefL),
('ack', 'I', 0),
('off_x2', 'B', ((5 << 4) | 0)),
('flags', 'B', TH_SYN),
('win', 'H', TCP_WIN_MAX),
('sum', 'H', 0),
('urp', 'H', 0)
)
opts = ''
def _get_off(self): return self.off_x2 >> 4
def _set_off(self, off): self.off_x2 = (off << 4) | (self.off_x2 & 0xf)
off = property(_get_off, _set_off)
def __len__(self):
return self.__hdr_len__ + len(self.opts) + len(self.data)
def __str__(self):
return self.pack_hdr() + self.opts + str(self.data)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
ol = ((self.off_x2 >> 4) << 2) - self.__hdr_len__
if ol < 0:
raise dpkt.UnpackError, 'invalid header length'
self.opts = buf[self.__hdr_len__:self.__hdr_len__ + ol]
self.data = buf[self.__hdr_len__ + ol:]
# Options (opt_type) - http://www.iana.org/assignments/tcp-parameters
TCP_OPT_EOL = 0 # end of option list
TCP_OPT_NOP = 1 # no operation
TCP_OPT_MSS = 2 # maximum segment size
TCP_OPT_WSCALE = 3 # window scale factor, RFC 1072
TCP_OPT_SACKOK = 4 # SACK permitted, RFC 2018
TCP_OPT_SACK = 5 # SACK, RFC 2018
TCP_OPT_ECHO = 6 # echo (obsolete), RFC 1072
TCP_OPT_ECHOREPLY = 7 # echo reply (obsolete), RFC 1072
TCP_OPT_TIMESTAMP = 8 # timestamp, RFC 1323
TCP_OPT_POCONN = 9 # partial order conn, RFC 1693
TCP_OPT_POSVC = 10 # partial order service, RFC 1693
TCP_OPT_CC = 11 # connection count, RFC 1644
TCP_OPT_CCNEW = 12 # CC.NEW, RFC 1644
TCP_OPT_CCECHO = 13 # CC.ECHO, RFC 1644
TCP_OPT_ALTSUM = 14 # alt checksum request, RFC 1146
TCP_OPT_ALTSUMDATA = 15 # alt checksum data, RFC 1146
TCP_OPT_SKEETER = 16 # Skeeter
TCP_OPT_BUBBA = 17 # Bubba
TCP_OPT_TRAILSUM = 18 # trailer checksum
TCP_OPT_MD5 = 19 # MD5 signature, RFC 2385
TCP_OPT_SCPS = 20 # SCPS capabilities
TCP_OPT_SNACK = 21 # selective negative acks
TCP_OPT_REC = 22 # record boundaries
TCP_OPT_CORRUPT = 23 # corruption experienced
TCP_OPT_SNAP = 24 # SNAP
TCP_OPT_TCPCOMP = 26 # TCP compression filter
TCP_OPT_MAX = 27
def parse_opts(buf):
"""Parse TCP option buffer into a list of (option, data) tuples."""
opts = []
while buf:
o = ord(buf[0])
if o > TCP_OPT_NOP:
try:
l = ord(buf[1])
d, buf = buf[2:l], buf[l:]
except ValueError:
#print 'bad option', repr(str(buf))
opts.append(None) # XXX
break
else:
d, buf = '', buf[1:]
opts.append((o,d))
return opts
| {
"repo_name": "somethingnew2-0/CS642-HW2",
"path": "dpkt/tcp.py",
"copies": "15",
"size": "3226",
"license": "mit",
"hash": 329827176222244740,
"line_mean": 31.9183673469,
"line_max": 75,
"alpha_frac": 0.5790452573,
"autogenerated": false,
"ratio": 2.6705298013245033,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
# $Id: telnet.py 23 2006-11-08 15:45:33Z dugsong $
# -*- coding: utf-8 -*-
"""Telnet."""
from __future__ import print_function
from __future__ import absolute_import
import struct
from .compat import compat_ord
IAC = 255 # interpret as command:
DONT = 254 # you are not to use option
DO = 253 # please, you use option
WONT = 252 # I won't use option
WILL = 251 # I will use option
SB = 250 # interpret as subnegotiation
GA = 249 # you may reverse the line
EL = 248 # erase the current line
EC = 247 # erase the current character
AYT = 246 # are you there
AO = 245 # abort output--but let prog finish
IP = 244 # interrupt process--permanently
BREAK = 243 # break
DM = 242 # data mark--for connect. cleaning
NOP = 241 # nop
SE = 240 # end sub negotiation
EOR = 239 # end of record (transparent mode)
ABORT = 238 # Abort process
SUSP = 237 # Suspend process
xEOF = 236 # End of file: EOF is already used...
SYNCH = 242 # for telfunc calls
def strip_options(buf):
"""Return a list of lines and dict of options from telnet data."""
l = buf.split(struct.pack("B", IAC))
# print l
b = []
d = {}
subopt = False
for w in l:
if not w:
continue
o = compat_ord(w[0])
if o > SB:
# print 'WILL/WONT/DO/DONT/IAC', `w`
w = w[2:]
elif o == SE:
# print 'SE', `w`
w = w[1:]
subopt = False
elif o == SB:
# print 'SB', `w`
subopt = True
for opt in (b'USER', b'DISPLAY', b'TERM'):
p = w.find(opt + b'\x01')
if p != -1:
d[opt] = w[p + len(opt) + 1:].split(b'\x00', 1)[0]
w = None
elif subopt:
w = None
if w:
w = w.replace(b'\x00', b'\n').splitlines()
if not w[-1]: w.pop()
b.extend(w)
return b, d
def test_telnet():
l = []
s = b"\xff\xfb%\xff\xfa%\x00\x00\x00\xff\xf0\xff\xfd&\xff\xfa&\x05\xff\xf0\xff\xfa&\x01\x01\x02\xff\xf0\xff\xfb\x18\xff\xfb \xff\xfb#\xff\xfb'\xff\xfc$\xff\xfa \x0038400,38400\xff\xf0\xff\xfa#\x00doughboy.citi.umich.edu:0.0\xff\xf0\xff\xfa'\x00\x00DISPLAY\x01doughboy.citi.umich.edu:0.0\x00USER\x01dugsong\xff\xf0\xff\xfa\x18\x00XTERM\xff\xf0\xff\xfd\x03\xff\xfc\x01\xff\xfb\x1f\xff\xfa\x1f\x00P\x00(\xff\xf0\xff\xfd\x05\xff\xfb!\xff\xfd\x01fugly\r\x00yoda\r\x00bashtard\r\x00"
l.append(s)
s = b'\xff\xfd\x01\xff\xfd\x03\xff\xfb\x18\xff\xfb\x1f\xff\xfa\x1f\x00X\x002\xff\xf0admin\r\x00\xff\xfa\x18\x00LINUX\xff\xf0foobar\r\x00enable\r\x00foobar\r\x00\r\x00show ip int Vlan 666\r\x00'
l.append(s)
s = b'\xff\xfb%\xff\xfa%\x00\x00\x00\xff\xf0\xff\xfd&\xff\xfa&\x05\xff\xf0\xff\xfa&\x01\x01\x02\xff\xf0\xff\xfb&\xff\xfb\x18\xff\xfb \xff\xfb#\xff\xfb\'\xff\xfc$\xff\xfa \x0038400,38400\xff\xf0\xff\xfa#\x00doughboy.citi.umich.edu:0.0\xff\xf0\xff\xfa\'\x00\x00DISPLAY\x01doughboy.citi.umich.edu:0.0\x00USER\x01dugsong\xff\xf0\xff\xfa\x18\x00XTERM\xff\xf0\xff\xfd\x03\xff\xfc\x01\xff\xfb"\xff\xfa"\x03\x01\x03\x00\x03b\x03\x04\x02\x0f\x05\x00\xff\xff\x07b\x1c\x08\x02\x04\tB\x1a\n\x02\x7f\x0b\x02\x15\x0c\x02\x17\r\x02\x12\x0e\x02\x16\x0f\x02\x11\x10\x02\x13\x11\x00\xff\xff\x12\x00\xff\xff\xff\xf0\xff\xfb\x1f\xff\xfa\x1f\x00P\x00(\xff\xf0\xff\xfd\x05\xff\xfb!\xff\xfa"\x01\x0f\xff\xf0\xff\xfd\x01\xff\xfe\x01\xff\xfa"\x03\x01\x80\x00\xff\xf0\xff\xfd\x01werd\r\n\xff\xfe\x01yoda\r\n\xff\xfd\x01darthvader\r\n\xff\xfe\x01'
l.append(s)
exp = [([b'fugly', b'yoda', b'bashtard'], {b'USER': b'dugsong', b'DISPLAY': b'doughboy.citi.umich.edu:0.0'}),
([b'admin', b'foobar', b'enable', b'foobar', b'', b'show ip int Vlan 666'], {}),
([b'werd', b'yoda', b'darthvader'], {b'USER': b'dugsong', b'DISPLAY': b'doughboy.citi.umich.edu:0.0'})]
assert (list(map(strip_options, l)) == exp)
if __name__ == '__main__':
test_telnet()
print('Tests Successful...')
| {
"repo_name": "smutt/dpkt",
"path": "dpkt/telnet.py",
"copies": "3",
"size": "3943",
"license": "bsd-3-clause",
"hash": 5144583564762358000,
"line_mean": 44.8488372093,
"line_max": 824,
"alpha_frac": 0.6134922648,
"autogenerated": false,
"ratio": 2.3085480093676813,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9411494601970438,
"avg_score": 0.00210913443944866,
"num_lines": 86
} |
# $Id: telnet.py 23 2006-11-08 15:45:33Z dugsong $
# -*- coding: utf-8 -*-
"""Telnet."""
IAC = 255 # interpret as command:
DONT = 254 # you are not to use option
DO = 253 # please, you use option
WONT = 252 # I won't use option
WILL = 251 # I will use option
SB = 250 # interpret as subnegotiation
GA = 249 # you may reverse the line
EL = 248 # erase the current line
EC = 247 # erase the current character
AYT = 246 # are you there
AO = 245 # abort output--but let prog finish
IP = 244 # interrupt process--permanently
BREAK = 243 # break
DM = 242 # data mark--for connect. cleaning
NOP = 241 # nop
SE = 240 # end sub negotiation
EOR = 239 # end of record (transparent mode)
ABORT = 238 # Abort process
SUSP = 237 # Suspend process
xEOF = 236 # End of file: EOF is already used...
SYNCH = 242 # for telfunc calls
def strip_options(buf):
"""Return a list of lines and dict of options from telnet data."""
l = buf.split(chr(IAC))
# print l
b = []
d = {}
subopt = False
for w in l:
if not w:
continue
o = ord(w[0])
if o > SB:
# print 'WILL/WONT/DO/DONT/IAC', `w`
w = w[2:]
elif o == SE:
# print 'SE', `w`
w = w[1:]
subopt = False
elif o == SB:
# print 'SB', `w`
subopt = True
for opt in ('USER', 'DISPLAY', 'TERM'):
p = w.find(opt + '\x01')
if p != -1:
d[opt] = w[p + len(opt) + 1:].split('\x00', 1)[0]
w = None
elif subopt:
w = None
if w:
w = w.replace('\x00', '\n').splitlines()
if not w[-1]: w.pop()
b.extend(w)
return b, d
def test_telnet():
l = []
s = "\xff\xfb%\xff\xfa%\x00\x00\x00\xff\xf0\xff\xfd&\xff\xfa&\x05\xff\xf0\xff\xfa&\x01\x01\x02\xff\xf0\xff\xfb\x18\xff\xfb \xff\xfb#\xff\xfb'\xff\xfc$\xff\xfa \x0038400,38400\xff\xf0\xff\xfa#\x00doughboy.citi.umich.edu:0.0\xff\xf0\xff\xfa'\x00\x00DISPLAY\x01doughboy.citi.umich.edu:0.0\x00USER\x01dugsong\xff\xf0\xff\xfa\x18\x00XTERM\xff\xf0\xff\xfd\x03\xff\xfc\x01\xff\xfb\x1f\xff\xfa\x1f\x00P\x00(\xff\xf0\xff\xfd\x05\xff\xfb!\xff\xfd\x01fugly\r\x00yoda\r\x00bashtard\r\x00"
l.append(s)
s = '\xff\xfd\x01\xff\xfd\x03\xff\xfb\x18\xff\xfb\x1f\xff\xfa\x1f\x00X\x002\xff\xf0admin\r\x00\xff\xfa\x18\x00LINUX\xff\xf0foobar\r\x00enable\r\x00foobar\r\x00\r\x00show ip int Vlan 666\r\x00'
l.append(s)
s = '\xff\xfb%\xff\xfa%\x00\x00\x00\xff\xf0\xff\xfd&\xff\xfa&\x05\xff\xf0\xff\xfa&\x01\x01\x02\xff\xf0\xff\xfb&\xff\xfb\x18\xff\xfb \xff\xfb#\xff\xfb\'\xff\xfc$\xff\xfa \x0038400,38400\xff\xf0\xff\xfa#\x00doughboy.citi.umich.edu:0.0\xff\xf0\xff\xfa\'\x00\x00DISPLAY\x01doughboy.citi.umich.edu:0.0\x00USER\x01dugsong\xff\xf0\xff\xfa\x18\x00XTERM\xff\xf0\xff\xfd\x03\xff\xfc\x01\xff\xfb"\xff\xfa"\x03\x01\x03\x00\x03b\x03\x04\x02\x0f\x05\x00\xff\xff\x07b\x1c\x08\x02\x04\tB\x1a\n\x02\x7f\x0b\x02\x15\x0c\x02\x17\r\x02\x12\x0e\x02\x16\x0f\x02\x11\x10\x02\x13\x11\x00\xff\xff\x12\x00\xff\xff\xff\xf0\xff\xfb\x1f\xff\xfa\x1f\x00P\x00(\xff\xf0\xff\xfd\x05\xff\xfb!\xff\xfa"\x01\x0f\xff\xf0\xff\xfd\x01\xff\xfe\x01\xff\xfa"\x03\x01\x80\x00\xff\xf0\xff\xfd\x01werd\r\n\xff\xfe\x01yoda\r\n\xff\xfd\x01darthvader\r\n\xff\xfe\x01'
l.append(s)
exp = [(['fugly', 'yoda', 'bashtard'], {'USER': 'dugsong', 'DISPLAY': 'doughboy.citi.umich.edu:0.0'}),
(['admin', 'foobar', 'enable', 'foobar', '', 'show ip int Vlan 666'], {}),
(['werd', 'yoda', 'darthvader'], {'USER': 'dugsong', 'DISPLAY': 'doughboy.citi.umich.edu:0.0'})]
assert (map(strip_options, l) == exp)
if __name__ == '__main__':
test_telnet()
print 'Tests Successful...' | {
"repo_name": "yangbh/dpkt",
"path": "dpkt/telnet.py",
"copies": "6",
"size": "3761",
"license": "bsd-3-clause",
"hash": -2542551538582526000,
"line_mean": 46.025,
"line_max": 823,
"alpha_frac": 0.6048923159,
"autogenerated": false,
"ratio": 2.303123086344152,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5908015402244152,
"avg_score": null,
"num_lines": null
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.