"""Classes for interacting with Python as a base language.

Includes
 - classes for parsing python source code into constituent parts so that
   we can convert python code into meta code.
 - ...
"""

import collections
import os
import pprint
import re
import subprocess
import sys
import tempfile


class Error(Exception):
  pass


class InternalError(Error):
  pass


DEBUG = False


class File(object):
  """Represents a single python file."""

  MULTILINE_STR_RE = re.compile(r'((\"\"\"|\'\'\').*?\2)', re.S)
  SINGLELINE_SGLSTR_RE = re.compile(r"('(?:[^\\'\n]|\\.)+')")
  SINGLELINE_DBLSTR_RE = re.compile(r'("(?:[^\\"\n]|\\.)+")')
  HOLDER_RE = re.compile(r'(<@[sS]\d+@>)')
  HOLDER_INFIX_RE = re.compile(r'(.*?)(<@[sS]\d+@>)(.*)', re.S)

  def __init__(self, filename):
    with open(filename, 'r') as fp:
      source = fp.read()

    strs = {}
    positions = {}
    multi_ptr = [0]
    def MultiReplace(obj):
      multi_ptr[0] += 1
      rep = '<@S%03d@>' % multi_ptr[0]
      strs[rep] = obj.group(1)
      positions[rep] = obj.span(1)
      return rep

    single_ptr = [0]
    def SingleReplace(obj):
      single_ptr[0] += 1
      rep = '<@s%03d@>' % single_ptr[0]
      strs[rep] = obj.group(1)
      return rep

    src = File.MULTILINE_STR_RE.sub(MultiReplace, source)
    src = File.SINGLELINE_SGLSTR_RE.sub(SingleReplace, src)
    src = File.SINGLELINE_DBLSTR_RE.sub(SingleReplace, src)

    # field filename: str
    #   Where the source code resides.
    self._filename = filename

    # field source_str: str
    #   The entire contents of the file, unmodified.
    self._source_str = source

    # field src_str: str
    #   The entire contents of the file, with multi-line strings replaced
    #   with <@Sxx@> placeholders.
    self._src_str = src

    # field strs: dict
    #   Maps placeholder strings to the multi-line literal strings they
    #   replace.
    self._strs = strs

    # Establish the line numbers within src that have placeholders.
    src_lines = {}
    for obj in File.HOLDER_RE.finditer(src):
      rep = obj.group(0)
      pos = obj.start(0)
      line = src.count('\n', 0, pos) + 1
      src_lines[rep] = line

    # Establish the mapping from line number in src to line number in source.
    delta = 0
    source_lines = {}
    for rep in sorted(src_lines, key=lambda k: src_lines[k]):
      src_line = src_lines[rep]
      count = strs[rep].count('\n')
      source_lines[src_line] = src_line + delta
      source_lines[src_line + 1] = src_line + delta + count + 1
      delta += count
    line_map = [0]
    for i in range(1, src.count('\n') + 2):
      if i in source_lines:
        line = source_lines[i]
      else:
        line += 1
      line_map.append(line)

    # field line_map: list of int
    #   Maps line numbers in src to line numbers in source.
    self._line_map = line_map

  def verify(self):
    source1 = self._source_str
    source2 = self.source(0, len(self._src_str))
    if source1 != source2:
      fd1, filename1 = tempfile.mkstemp()
      os.write(fd1, source1)
      os.close(fd1)
      fd2, filename2 = tempfile.mkstemp()
      os.write(fd2, source2)
      os.close(fd2)
      print 'diff %s %s' % (filename1, filename2)
      p = subprocess.Popen(
        ['diff', '-y', '-W', '160', filename1, filename2],
        stdout=subprocess.PIPE)
      stdout, stderr = p.communicate()
      print stdout
      raise InternalError('Source %s is miconfigured.' % self._filename)

  def srcSize(self):
    return len(self._src_str)

  def sourceSize(self):
    return len(self._source_str)

  def src(self, start, end):
    """Obtain the portion of self._src_str in [start, end).

    Args:
      start: int
      end: int

    Returns: str
    """
    return self._src_str[start:end]

  def source(self, start, end):
    """Obtain the portion of self._src_str in [start, end) litstr-expanded.

    Args:
      start: int
      end: int

    Returns: str
    """
    return self.srcToSource(self.src(start, end))

  def srcToSource(self, src):
    """Convert escaped src to expanded source.

    Args:
      src: str

    Returns: str
    """
    strs = self._strs

    def Replace(obj):
      placeholder = obj.group(1)
      return strs[placeholder]

    # TODO(wmh): We currently need to apply the replacements twice because
    # of the way we are producing the escaped sequences (single-quoted
    # single-line strings before double-quoted single-line strings).  This
    # means that sequences like "a string 'with nested' strings" will create
    # an escaped version of 'with nested', then another escaped string for
    # the entire string.
    #
    # If we can perform the single-line substitutions with a single replacement,
    # this will no longer be necessary.
    result = File.HOLDER_RE.sub(Replace, src)
    result = File.HOLDER_RE.sub(Replace, result)
    return result

  def numToNumber(self, line):
    """Obtain the source line corresponding to src line.

    Args:
      line: int

    Returns: int
    """
    return self._line_map[line]

  def keyToStr(self, key):
    """Convert a literal string key to its contents.

    Args:
      key: str
        Something like <@Sxx@>.

    Returns: str
      The text (with the literal string delimiters intact).
    """
    # Intentionally raising error if not present.
    return self._strs[key]


class Region(object):
  """A contiguous region within a python source file."""

  def __init__(self, file_, kind, name, positions=None):
    """Initializer.

    Args:
      file_: File
        The File that this Region represents a contiguous region of.
      start: int
        The start of the region with the File's src() string.
      end: int
        The end of the region with the File's src() string (one past last
        char).
      kind: str
        One of 'module', 'class', or 'def'
      name: str
        The name associated with this region.
      positions: dict or None
        Only None if kind == 'module'.  Otherwise, it should contain
        conceptual keys whose values are the int char positions within
        File's src of the associated region part.
    """
    if positions is None:
      if kind == 'module':
        end = file_.srcSize()
        positions = {
          'pre': 0,
          'start': 0,
          'sparams': -1,
          'eparams': -1,
          'scope': -1,
          'comment': -1,  # TODO(wmh): Fix this ... see if first line has comment
          'body': 0, # TODO(wmh): Fix this by advancing past comment.
          'post': end,
          'end': end,
        }
      else:
        raise Error('Region of kind %s must be provided with positions' % kind)
    
    # field file: File
    #   Represents the file 
    self._file = file_

    # field kind: str
    #   One of 'module', 'class' or 'def'
    self._kind = kind

    # field name: str
    #   The name of this Region
    self._name = name

    # field positions: dict
    #   Maps conceptual location strings to integer positions within self._file
    #     pre:    file[pre-1] == '\n' (or beginning of file)
    #     start:  file[start-1] == '\n'
    #     sparams file[paren-1] == '('
    #     eparams file[paren] == ')'
    #     scope
    #     comment comment == -1 or file[comment] == '<'
    #     body    file[body-1] == '\n'
    #     post    file[post-1] == '\n'
    #     end     file[end] == '\n'
    self._positions = positions

    # field children: OrderedDict
    #   Initialized when split() is called.
    self._children = None

  def file(self):
    return self._file

  def kind(self):
    return self._kind

  def name(self):
    return self._name

  def pre(self):
    return self._positions['pre']

  def start(self):
    return self._positions['start']

  def end(self):
    return self._positions['end']

  def child(self, name):
    return self._children.get(name, None)

  def children(self):
    return self._children

  @classmethod
  def GenerateRegexp(cls, keyword, indent, subindent):
    """Generate q regexp to match child regions.

    Args:
      keyword: str
        The keyword that denotes start of a child region.
      indent: str
        The indentation expected before child keywords.
      subindent: str
        The indentation expected within child scope.

    Returns: regexp
      The returned regexp will match an entire child region, except for
      preamble before the keyword, and postamble that should be made preamble
      for the next child.
    """
    return re.compile(
      # Although it feels safest to root things with a newline, this breaks
      # things because the tail end of this regexp grabs empty lines within
      # the body, consuming the newline before the next child.
      # r'(?:^|\n)'
      r'%s(?P<keyword>%s)\s+'
      r'(?P<name>[a-zA-Z0-9_]+)\s*'
      r'\((?P<params>.*?)\):'
      r' *(?:(?P<oneline>[^ \n][^\n]*)|\n'
      r'(?P<indent> *)(?P<comment><@[sS]\d+@>)?(?P<comment_tail>[^\n]*)'
      # The '%s' in the following line should really be '(?P=indent)',
      # but that produces an exception when parsing the regexp:
      #    sre_constants.error: nothing to repeat
      # why?  This also happens when we use \4.
      r'(?P<scope>(?:\n(?:%s[^\n]*)?)*))' %
      (indent, keyword, subindent), re.S)

  def src(self, start=-1, end=-1):
    if start < 0:
      start = self.start()
    if end < 0:
      end = self.end()
    return self.file().src(start, end)

  def source(self, start=-1, end=-1, quote_dent=''):
    if start < 0:
      start = self.start()
    if end < 0:
      end = self.end()
    return self.file().source(start=start, end=end, quote_dent=quote_dent)

  def _substr(self, key1, key2, end_adj=0, sub=False, quote_dent=''):
    """Obtain the text between key1 and key2.

    Args:
      key1: str
        The conceptual key identifying the start of the range desired.
      key2: str
        The conceptual key identifying the character one past the end of
        the range desired.
      end_adj: int
        How much to adjust the end position dicated by key2.
      sub: bool
        If True, return the subbed version (with literal strings replaced
        with placeholders.  If False, return the original source.
      quote_dent: str
        The indentation to put at the beginning of each line except the
        first in multi-line literal string expansions.

    Returns: str
    """
    start = self._positions[key1]
    end = self._positions[key2]
    if start == -1 or end == -1:
      result = ''
    else:
      end += end_adj
      if sub:
        result = self.src(start=start, end=end)
      else:
        result = self.source(start=start, end=end, quote_dent=quote_dent)
    return result

  def preamble(self, sub=False):
    return self._substr('pre', 'start', sub=sub)
  
  def postamble(self, sub=False):
    return self._substr('post', 'end', sub=sub)

  def argamble(self, sub=False):
    return self._substr('sparams', 'eparams', sub=sub)

  def docstr(self, sub=False, indent=False):
    result = self._substr('comment', 'scope', sub=sub)
    if not indent:
      # We are to remove indentation
      subdent = '  ' if self.kind() == 'class' else '    '
      result = result.replace('\n' + subdent, '\n')
    return result
  
  def body(self, sub=False, indent=False, quote_dent=''):
    result = self._substr('scope', 'post', sub=sub, quote_dent=quote_dent)
    if not indent:
      # We are to remove indentation.
      # TODO(wmh): Do we need to avoid removing indentation on lines starting
      # with context.re('quote_dent') aka '.*>|'?
      subdent = '  ' if self.kind() == 'class' else '    '
      result = result.replace('\n' + subdent, '\n')
    return result

  def entityPreamble(self):
    """Obtain the text from the start of self to the first child in self."""
    children = self._children.values()  # not present until split() called.
    end = children[0].pre() if children else self.end()
    return self.src(self.pre(), end)

  def split(self, keyword, indent):
    """Split a subset of my src into child Regions.

    Args:
      keyword: str
        Either 'class' or 'def'
      indent: str
        How much indentation to expect before the keyword.
    """
    abs_start = self.start()
    abs_end = self.end()

    def Fix(val):
      return -1 if val == -1 else val + abs_start

    children = collections.OrderedDict()
    pos_list = []
    splitter_re = self.GenerateRegexp(keyword, indent, indent + '  ')
    post_re = re.compile('((?:\n%s[^\n]+)+)\n$' % indent, re.S)

    re.compile('^' + indent + '\S')

    src = self.src(start=abs_start, end=abs_end)
    for match in splitter_re.finditer(src):
      # groups:
      #   0 = all
      #   'preamble'     = 
      #   'name'         = name of child
      #   'params'       = parameters
      #   'indent'       = whitespace before optional docstr
      #   'comment'      = docstr placeholder
      #   'comment_tail' = if comment, should always be empty, else first scope line.
      #   'scope'        = newline after comment (start of scope)
      mdict = match.groupdict()
      ename = mdict['name']
      
      start = match.start('keyword') - len(indent)
      if src[start] == '\n':
        start += 1

      # We currently have the start of this child's definition at 'start',
      # but some of the text at the end of the previous child should be
      # considered 'pre' for this child as well.  In particular, any line
      # indented to the exactly the same level as start.
      pre = start
      if pos_list:
        previous_pos = pos_list[-1]
        post_text = self.file().src(previous_pos['post'], start+abs_start)
        post_match = re.search(post_re, post_text)
        previous_end = start
        if post_match:
          # The text in post_match.groups(1) represents preamble for this
          # child to be removed from the postamble of the previous child.
          post_start, post_end = post_match.span(1)
          post_diff = post_end - post_start
          previous_end -= post_diff
          pre -= post_diff
        previous_pos['end'] = previous_end

      if match.start('oneline') > -1:
        # This is a one-line definition.
        comment = -1
        scope = match.start('oneline')
        post = match.end('oneline')

      else:
        # Multi-line definition (the more traditional kind)
        comment = match.start('comment')
        if comment == -1:
          scope = match.start('indent') - 1
        else:
          scope = match.start('scope')

        # Because the scope consumes properly lines indented *and* empty lines,
        # it consumes empty lines at the end of scope that are better considered
        # part of the postamble. We adjust the start of the postamble
        # accordingly.
        post = match.end('scope') - 1
        nl = 0
        while src[post] == '\n':
          post -= 1
          nl += 1
        post += min(nl, 2)

      pos = {
        'pre': Fix(pre),
        'start': Fix(start),
        'sparams': Fix(match.start('params')) - 1,
        'eparams': Fix(match.end('params')) + 1,
        'comment': Fix(comment),
        'scope': Fix(scope),
        'post': Fix(post),
      }
      if pos_list:
        pos_list[-1]['end'] = pos['pre']

      # Create a child Region.
      name = match.group('name')
      pos_list.append(pos)
      child = Region(self.file(), keyword, name, positions=pos)
      children[name] = child

    if pos_list:
      pos_list[-1]['end'] = abs_end

    # children: The collection of Region instances (delimited by keyword,
    # adjusted for preamble) that make up this source string.
    self._children = children

    if self.kind() == 'module' and False:
      with open('./testdata/k.py', 'w') as fp:
        fp.write(src + '\n')

    return children

  def debug(self):
    positions = self._positions
    keys = sorted(positions, key=lambda k: positions[k])
    prefix = '    |'
    for i in range(0, len(keys)-1):
      sk = keys[i]
      ek = keys[i+1]
      s = positions[sk]
      e = positions[ek]
      print '%-7s-%-7s: %d-%d' % (sk, ek, s, e)
      src = prefix + self._file.src(s, e).replace('\n', '\n' + prefix)
      if src.endswith(prefix): src = src[:-len(prefix)]
      print src


class Module(Entity):
  """Represents a parsed version of a python module definition.

  This does not use any python introspection, so that the code can be
  migrated to any base language.
  """

  # Cached Module instances.
  # TODO(wmh): Should this go some place else so that it can be used
  # for more than just Class.New()?
  MODULES = {}

  @classmethod
  def New(cls, path):
    """Create a new (interned) Module instance from a file."""
    module = cls.MODULES.get(path, None)
    if not module:
      basename, source, test_source = cls._Load(path)
      test_sources = [test_source]
      if basename != source.name():
        raise Error('Expecting %s == %s' % (basename, source.name()))
      module = cls(
        source, test_sources=test_sources, previous_source=None,
        parent=None, path=path)
      cls.MODULES[path] = module
    return module

  @classmethod
  def _Load(cls, path):
    """Load a module's source (and its test module's source).

    Args:
      path: str

    Returns: three-tuple
     [0] str (basename)
     [1] Region (module)
     [2] Region (test module)
    """
    basename = os.path.basename(path)
    if basename.endswith('.py'):
      basename = basename[:-3]
    else:
      raise Error('Expecting module paths to end in .py: %s' % path)
    test_path = os.path.join(os.path.dirname(path), '%s_test.py' % basename)
    source = Region(File(path), 'module', basename)
    test_source = Region(File(test_path), 'module', basename + '_test')
    return basename, source, test_source

  def __init__(self, source, test_sources=None, previous_source=None,
               previous_test_sources=None, parent=None, path=None):
    """Initializer.

    Args:
      source: Region
        The entire code, as structured data.
      test_sources: list of Region
        The entire test code, as structured data.
      previous_source: Source or None
        The source before the source of this block.
      parent: Entity or None
        The block that contains this block.
      path: str or None
        The path to the file containing the module.
    """
    super(Module, self).__init__(
      source, test_sources=test_sources,
      previous_source=previous_source,
      previous_test_sources=previous_test_sources,
      parent=parent)
    name = self.name()

    classes, unclaimed_test_classes = self.parseSource(
      'module', Class, 'class', '', lambda name: name + 'Test', source,
      test_sources=test_sources)
    
    # field path: str
    #   The path to the module.
    self._path = path

    # field classes: collections.OrderedDict
    #   Maintains a mapping from class name to Class instance.
    self._classes = classes

    # field unclaimed_test_classes: collections.OrderedDict
    #   The classes in the test module not acting as test cases.
    self._unclaimed_tests = unclaimed_test_classes

  def path(self):
    return self._path

  def sourceText(self):
    """The source code for the module.

    Returns: str
      The entire contents of the module as a single string.
    """
    with open(self._path, 'r') as fp:
      return fp.read()

  def classNamed(self, name):
    """Find a class.

    Args:
      name: str
        The name of the class.

    Returns: Class or None
    """
    return self._classes.get(name, None)

  def summarize(self, fp=sys.stdout, indent=''):
    subindent = indent + '  '
    for name, clazz in self._classes.iteritems():
      assert name == clazz.name()
      fp.write('\n%s%4d: Class %s\n' % (indent, clazz.start(), name))
      clazz.summarize(fp=fp, indent=subindent)

  def toMeta(self, metac, output, indent=''):
    """Convert this Module to Meta syntax.

    Args:
      metac: Compiler
        The compiler to use to obtain meta-specific info.
      output: Output
        Add lines to this list representing the meta source code.
      indent: str
        The indentatino before each newly added line.
    """
    rem = '/#'  # TODO(wmh): Get this from Context.tokens('rem').  How?    
    subindent = indent + '  '

    output.addPrimary('namespace', self.name())

    classes = self._classes.values()
    classes_output = output.clone(delta_indent='  ')
    tests_output = output.clone(delta_indent='  ')

    # Process the initial text of the module (the docstr and the imports,
    # everything before the first class).
    if classes:
      module_preamble_src = self.source().entityPreamble()
      holder_match = File.HOLDER_INFIX_RE.search(module_preamble_src)
      if holder_match:
        pre_text, module_docstr_key, post_text = holder_match.groups()
        module_docstr = self.source().file().keyToStr(module_docstr_key)
        output.addBlock(
          'comment',
          module_docstr.replace('"""', '').rstrip().split('\n'),
          comments=True)
      self.nativeToMeta(classes_output, post_text.split('\n'))

      test_source = self.testSources()[0]
      # TODO(wmh): Fix this!
      if False and first_test_sources:
        print 'Here with %s' % self.name()
        first_test_source = first_test_source[0]
        test_src = first_test_source.src()
        test_holder_match = Source.HOLDER_INFIX_RE.search(src)
        if test_holder_match:
          pre_text, module_docstr_key, post_text = test_holder_match.groups()
          print '#' * 70
          print post_text
          print '#' * 70        
    
    # Now we generate the text for each class.
    for class_ in classes:
      classes_output.reset()
      classes_output.newline()
      class_.toMeta(metac, classes_output, indent=subindent)

    output.addBlock('scope', classes_output.lines(), already_indented=True)

    output.newline()
    output.addTerm('namespace', self.name())


class Class(Entity):
  """Represents a parsed version of a python class definition.

  This does not use any python introspection, so that the code can be
  migrated to any base language.
  """

  def __init__(self, source, test_sources=None, previous_source=None,
               previous_test_sources=None, parent=None):
    """Initializer.

    Args:
      source: Source
        The entire code, as structured data.
      test_sources: list of Source
        The entire test code, as structured data.
      previous_source: str or None
        The source before the source of this block.
      parent: Entity or None
        The block that contains this block.
    """
    super(Class, self).__init__(
      source, test_sources=test_sources, previous_source=previous_source,
      parent=parent)
    name = self.name()
    
    methods, unclaimed_test_methods = self.parseSource(
      'class', Method, 'def', '  ', lambda name: 'test_' + name,
      source, test_sources=test_sources)

    # Parse the field information for this class.
    acc_types = {'get': '', 'set': 'Is', 'ref': 'Ref'}
    field_map = collections.OrderedDict()
    acc_map = {}
    init_meth = methods.get('__init__', None)
    if init_meth:
      field_order = init_meth.parseFieldInfo()
      for field_info in field_order:
        # field_info contains:
        #    comments: list of str
        #      The description of the field
        #    field: str
        #      The actual field name (with underscore)
        #    name: str
        #      The conceptual field name
        #    type: str
        #      The user-provided type of the field
        #    value: str
        #      ?
        name = field_info['name']
        field_map[name] = field_info
        for acctype in acc_types:
          suffix = acc_types[acctype]
          acc_map[name + suffix] = (acctype, field_info)

    # field methods: collections.OrderedDict
    #   Maintains a mapping from method name to Method instance.
    self._methods = methods

    # field unclaimed_test: collections.OrderedDict
    #   The methods in the test class that are not test methods.
    self._unclaimed_tests = unclaimed_test_methods

    # field field_map: collections.OrderedDict
    #   Maintains field info.
    self._field_map = field_map

    # field acc_map: dict
    #   Accessor map.
    self._acc_map = acc_map

  def methods(self):
    return self._methods

  def fieldMap(self):
    return self._field_map

  def show(self, fp=sys.stdout):
    # TODO(wmh): Fix this.
    fp.write(self.source().source())
      
  def summarize(self, fp=sys.stdout, indent=''):
    for name, method in self._methods.iteritems():
      assert name == method.name()
      test_sources = method.testSources()
      if test_sources:
        first_test_summary = '%4d: %s' % (
          test_sources[0].start(), test_sources[0].name())
      else:
        first_test_summary = ''
      fp.write(
        '%s%4d: %-40s | %s\n' %
        (indent, method.start(), name, first_test_summary))
      for test_source in test_sources[1:]:
        fp.write(
          '%s%46s | %4d: %s\n' %
          (indent, '', test_source.start(), test_source.name()))

  def methodNamed(self, name):
    """Find a method.

    Args:
      name: str
        The name of the method.

    Returns: Method or None
    """
    return self._methods.get(name, None)

  def methodsStartingWith(self, prefix):
    """Find all methods starting with given prefix.

    Args:
      prefix: str
        A name prefix.

    Returns: list of Method
    """
    result = []
    for method in self._methods.values():
      if method.name().startswith(prefix):
        result.append(method)
    return result

  def toMeta(self, metac, output, indent=''):
    """Convert this Class to Meta syntax.

    Args:
      metac: Compiler
        The compiler to use to obtain meta-specific info.
      output: Output
        Add lines to this list representing the meta source code.
      indent: str
        The indentatino before each newly added line.
    """
    debug = False
    source = self.source()
    tests = self.testSources()
    # TODO(wmh): Support multiple test classes?
    test_source = tests[0] if tests else None
    docstr = source.docstr()
    cls_name = source.name()
    field_map = self.fieldMap()
    term = metac.token('term')
    rem = metac.token('rem')

    subindent = indent + '  '

    scope_lines = []
    scope_output = output.clone(output=scope_lines, delta_indent='  ')
    
    test_lines = []
    test_output = output.clone(output=test_lines, delta_indent='  ')
    
    dummy_field_map = {}  # TODO(wmh): Remove this (and from _methodToMeta()
    init_scope_index = -1
    methods = self.methods().values()

    if debug:
      print 'Class %s' % cls_name

    # We first deal with the text within the class before the first method. We
    # add a native block to capture it. We can obtain this text from
    # methods[0].previousSource(), but unfortunately that text represents
    # everything in the class definition up to the first child definition,
    # including the definition of the class itself and everything in between.
    # We want to obtain the part after the class definition and docstr.
    # I'd initial thought I could deal with this in Source.split() by obtaining
    # the offset of the end of docstr using
    #      self._points['src']['body'] - self._points['src']['start']
    # but this produces an offset within the parent's src, which
    # is not necessarily the same offset as the current src (because
    # the current src resubs a smaller set of strings so what looks
    # like <@STRING109@> in the parent becomes <@STRING8@> in current,
    # (producing different offsets))
    #
    # Instead, we apply a regexp to substr.  Something cleaner would
    # be nice.
    if methods:
      sub_src = self.source().entityPreamble()
      init_re = re.compile('(.*\): *\n(?:\s*<@[sS]\d+@> *\n)?)')
      init_match = init_re.search(sub_src)
      if init_match:
        index = len(init_match.group(1))
        inamble = sub_src[index:]
        
        if inamble.strip():
          inamble_lines = inamble.split('\n')

          # The very last index of inamble_lines is always empty. Any lines
          # before that that are not empty appear immediately before the
          # first method, and are thus preamble of that method, NOT the
          # initial class preamble.
          idx = len(inamble_lines) - 2
          while inamble_lines[idx].strip():
            idx -= 1
          self.nativeToMeta(scope_output, inamble_lines[:idx])
      else:
        raise Error('Failed to find init preamble')
      
    for method in methods:
      # Iniital setup for this method
      mname = method.name()
      if debug:
        print '  Method %s' % mname

      # TODO(wmh): If we always arrange for method preambles to contain ONLY
      # text associated with the method def (decorators, etc.), then we should
      # NOT generate a native block ... that will be handled by postamble.
      preamble = method.source().preamble()
      if preamble:
        if False:
          self.nativeToMeta(scope_output, preamble.split('\n'))

      # Establish the test sources for this method. This is a subset of those
      # in method.testSources(), which consists of all test methods with
      # test_<mname> as a prefix. Suppose <mname> is 'delim' and there is also
      # another method named 'delimIs'. Both 'test_delim' and 'test_delimIs'
      # will be in method.testSources(), but we want to remove test_delimIs.
      test_map = {}
      for test_method_source in method.testSources():
        tmsname = test_method_source.name()
        ok = True
        for ometh in self.methodsStartingWith(mname):
          oname = ometh.name()
          if oname == mname:
            continue
          otest_prefix = 'test_' + oname
          if tmsname.startswith(otest_prefix):
            ok = False
            break
        if ok:
          test_map[tmsname] = test_method_source
          if debug:
            print '    Test %s' % tmsname

      acc_map = self._acc_map
      if mname in acc_map:
        # This is an accessor for a field .. we note the kind of accessor
        # (get, set or ref) in field_info['accessor']
        acctype, field_info = acc_map[mname]
        field_info.setdefault('accessors', {})[acctype] = {
          'scope': method, 'tests': test_map.values()}
      else:
        if mname == '__init__':
          init_scope_index = len(scope_lines)
        scope_output.reset()
        method.toMeta(metac, scope_output, indent=subindent)
        postamble = method.source().postamble()
        if postamble:
          self.nativeToMeta(scope_output, postamble.split('\n'))

    # ----------------------------------------------------------------------
    # Compose the field code
    forder = {'get': 1, 'set': 2, 'ref': 3}

    # TODO(wmh): These must be obtained from the schema (the appropriate
    # 'template' attributes of construct field).
    fplates = {
      'get': 'return self._%(field)s',
      'set': 'self._%(field)s = value',
      'ref': 'return self._%(field)s'
    }

    if field_map:
      if init_scope_index < 0:
        raise Error('Found fields but no initializer.')

      # Accumulate all lines representing field definitions into 'field_lines'
      field_lines = []

      for field_info in field_map.values():

        # Define a field from field_info
        field_output = scope_output.clone()
        field_output.newline()

        # Establish the field name
        field_name = field_info['name']
        fullname = '%s.%s' % (cls_name, field_name)
  
        field = field_info.get('field', None)
        if not field:
          print (
            'WARNING: Found documentation for field %s but no self._%s '
            'was found in %s.__init__' % (field_name, field_name, cls_name))
          continue
        if field != '_' + field_name:
          raise Error('Not yet supporting %s vs %s' % (field, field_name))

        # Establish the feature attributes of the field
        accessors = field_info.get('accessors', {})
        accs = sorted(
          accessors.keys(), cmp=lambda a, b: cmp(forder[a], forder[b]))
        field_features = ''.join(accs) or 'raw'
        if field_features == 'getsetref':
          # This is the default.
          field_features = ''

        # Define the field construct
        field_output.addPrimary('field', field_name, features=field_features)

        # Add field typing information if present
        if 'type' in field_info:
          basetype = field_info['type']
          metatype = metac.baselang().baseTypeToMeta(basetype, metac)
          field_output.addSecondary(':', metatype)

        # Add the field comment
        if 'comments' in field_info:
          field_output.addBlock('#', field_info['comments'], comments=True)

        # Define the accessors
        if accs:
          acc_lines = []
          for acc in accs:
            accessor_output = field_output.clone(delta_indent='  ')
            accessor_output.addPrimary('accessor', acc)
            acc_info = accessors[acc]

            acc_scope = acc_info['scope']
            acc_body = acc_scope.source().body()

            # Establish whether the definition for the accessor provided by
            # the user differs from the default definition.  If so, add a
            # scope block to the accessor providing the definition.
            simple_acc_body = fplates[acc] % {'field': field_name}
            if acc_body.strip() != simple_acc_body:
              accessor_output.addBlock('scope', self.splitText(acc_body))

            if 'tests' in acc_info:
              ftests = acc_info['tests']
              if not ftests:
                # No unittests for this field.
                continue
              ftest = ftests[0]
              accessor_output.addBlock('test', self.splitText(ftest.body()))
            if False:
              # This makes the output rather too verbose, but there may be times
              # we want strict generation.
              # TODO(wmh): Add a flag to control this.
              accessor_output.addTerm('accessor', acc)
            if accessor_output.size() > 1:
              acc_lines.extend(accessor_output.lines())
          # Add all accessor lines into the field scope.
          field_output.addBlock('scope', acc_lines, already_indented=True)

        # End the field construct
        field_output.addTerm('field', field_name)
        field_lines.extend(field_output.lines())

      # We insert the fields immediately before the initializer for this
      # class.
      scope_lines = (
        scope_lines[:init_scope_index] +
        field_lines +
        scope_lines[init_scope_index:])

    # ----------------------------------------------------------------------
    # Now process the test code.
    
    # For any method in test source that is not associated with a core method,
    # we insert the method into the test block of the class itself.
    for test_method in self._unclaimed_tests.values():
      test_method.toMeta(metac, test_output, indent=subindent)
      postamble = test_method.source().postamble()
      if postamble:
        self.nativeToMeta(scope_output, postamble.split('\n'))

    # ----------------------------------------------------------------------
    # Put it all together by writing the class definition to 'output'

    parents = source.argamble()[1:-1]
    output.addPrimary('class', cls_name)
    output.addSecondary(':', parents)
    if docstr:
      docstr_match = File.MULTILINE_STR_RE.match(docstr)
      if not docstr_match:
        raise Error('Invalid docstr\n%s' % docstr)
      docstr_text = docstr_match.group(1)
      output.addBlock(
        'comment', docstr_text.strip(docstr_text[0]).split('\n'), comments=True)
    if test_lines:
      output.addBlock('test', test_lines, already_indented=True)
      output.newline()
    if scope_lines:
      # TODO(wmh): switch to scope<python>
      output.addBlock('scope', scope_lines, already_indented=True)
      output.newline()
    output.addTerm('class', cls_name)

    # ----------------------------------------------------------------------
    # Deal with any postamble of the class.

    postamble = source.postamble()
    if postamble.strip():
      self.nativeToMeta(output, postamble.split('\n'), newlines=0)


class Method(Entity):
  """Represents a parsed version of a python method definition.

  This does not use any python introspection, so that the code can be
  migrated to any base language.
  """

  SUPER_RE = re.compile(r"""
    (.*)                # the text before the super call.
    \s*super
    \(
    \s*([a-zA-Z0-9_]+)  # the subclass
    \s*,\s*
    ([a-zA-Z0-9_]+)\s*  # the receiver object
    \)
    \.
    ([a-zA-Z0-9_]+)     # the parent method being invoked
    \s*\(([^\)]*?)\)    # the (possibly multiline) arg list.  Does not work
                        # if any params have ')' in them.
    (.*)                # the text after the super call.
    """,
    re.S | re.X)

  def __init__(self, source, test_sources=None, previous_source=None,
               previous_test_sources=None, parent=None):
    """Initializer

    Args:
      source: Source
        The entire code, as structured data.
      test_sources: list of Source
        The entire test code, as structured data.
      previous_source: str or None
        The source before the source of this block.
      parent: Entity or None
        The block that contains this block.
    """
    super(Method, self).__init__(
      source, test_sources=test_sources, previous_source=previous_source,
      previous_test_sources=previous_test_sources, parent=parent)

    # Check to seee if there is a super() call in the body, and if so, whether
    # it is the first statement (post-extend), last statement (pre-extend) or
    # inner statement (super-extend).
    body = self.source().body()
    super_info = None
    super_match = self.SUPER_RE.match(body)
    if super_match:
      # Now parse the super data.
      pre_text, subcls_str, rec_str, parent_meth, super_args, post_text = (
        super_match.groups())
      if not pre_text.strip():
        # no text before the super call, so this is post-extend.
        inheritance = 'post_extend'
      elif not post_text.strip():
        # no text after the super call, so this is pre-extend
        inheritance = 'pre_extend'
      else:
        # super() call is somewhere in the middle of the method body.
        inheritance = 'super_extend'

      # When we generate Meta code, the body should not include the super
      # call, since it is either implicit (pre_extend or post_extend) or
      # specified by special syntax (super_extend).  We need to remember some
      # portion of the text matches here (\2\3\4\5 maybe?) to be replaced
      text = super_match.group(0)[len(super_match.group(1)):-len(super_match.group(6))]
      super_info = {
        'args': super_args.replace('\n', ' ').strip(),
        'inheritance': inheritance,
        'text': text,
      }

    # field level: str
    #   The level of this method ('instance', 'static' or 'meta')
    level = 'instance'
    preamble = self.source().preamble()
    if preamble:
      if '@classmethod' in preamble:
        level = 'meta'
      elif '@staticmethod' in preamble:
        level = 'static'
    self._level = level

    # field super_info: dict
    #   Maintains information about super class.
    self._super_info = super_info

    # field comments: dict
    #   Information from a structured comment.
    self._comments = self._parseComment()

  def superInfo(self):
    return self._super_info

  def level(self):
    return self._level

  def comments(self):
    return self._comments

  def _parseComment(self):
    """Parse my docstr."""
    debug = False
    docstr = self.source().docstr()[3:-3]

    data = {'comments': []}
    if docstr:
      arg_indent = -1
      base_indent = 0   # getdoc() cleans the doc so that it always starts at 0
      line_re = re.compile('^(\s*)(?:(\S+):)?(\s*)(.*)')
      i = 0
      doc_lines = docstr.split('\n')
      data['comments'].append(doc_lines[0])
      lineno = 0
      last_was_top = True
      section = None
      key = None  # stays None until we see Args: or Raises: or Returns:
      for line in doc_lines[1:]:
        lineno += 1
        line_match = line_re.match(line)
        if line_match:
          indent_str, kw, ws, text = line_match.groups()
          indent = len(indent_str)
          empty = not kw and not text
          if debug:
            print '%-80s #=%d indent=%d base=%d kw=%s key=%s text=%s' % (
              line, lineno, indent, base_indent, kw, key, text)

          if indent == base_indent and not empty:
            # A section header or a top-level comment.
            if kw in ['Args', 'Raises', 'Returns']:
              key = kw.lower()
              section = {}
              data[key] = section
              if key == 'returns':
                section['comments'] = []
                section['type'] = text or '?'
            else:
              # top-level comment.
              #if not last_was_top:
              #  data['comments'].append('')
              if kw:
                data['comments'].append('%s:%s%s' % (kw, ws, text))
              else:
                data['comments'].append(text)
              last_was_top = True
              key = None

          elif key:
            # A secondary (or lower) comment ... add to section
            if key == 'returns':
              section['comments'].append(' ' * (indent - 2) + text)
            elif indent == base_indent + 2:
              last_was_top = False
              if key == 'args':
                # We expect kw=arg_name, text=arg_type
                entry = {'name': kw, 'type': text, 'comments': []}
                section[kw] = entry
              elif key == 'raises':
                # We expect kw=exception name, text=start of description.
                entry = {'name': kw, 'comments': []}
                if text:
                  entry['first'] = text
                section.setdefault('list', []).append(entry)
              elif key == 'returns':
                # We expect kw=None, text=comment.
                entry = section
                entry['comments'] = []
            elif indent >= base_indent + 4:
              # We have a comment line within a section.
              last_was_top = False
              entry['comments'].append(' ' * (indent - base_indent - 4) + text)

          else:
            # We have a top-level comment indented some amount of space.
            data['comments'].append(' ' * indent + text)
            last_was_top = True

        else:
          raise Error('Unknown line %d: %s' % (lineno, line))
    while data['comments'] and not data['comments'][-1]:
      data['comments'].pop()
    return data

  def _parseArgSpec(self, argspec):
    """Parse the argument specification of this method into a four-tuple.

    Args:
      argspec: str
        The string instead the '(' and ')' within a method definition.

    Returns: four-tuple
     [0] list of positional arg names and keyword arg names
     [1] name of * or None
     [2] name of ** or None
     [3] list of the default values of the keyword args from [0]
   """
    names = []
    varargs = None
    varkw = None
    defaults = []

    if argspec:
      argspec = argspec + ','  # avoids special casing.

    n = len(argspec)
    i = 0      # current index within argspec being considered
    s = 0      # index of start of current arg
    eq = None  # index of '=' for current arg (None if not keyword arg)
    while i < n:
      c = argspec[i]
      if c == ',':
        # Found end-of-arg.
        if eq:
          names.append(argspec[s:eq].strip())
          defaults.append(argspec[eq+1:i].strip())
        else:
          nm = argspec[s:i].strip()
          if nm[0] == '*':
            if nm[1] == '*':
              varkw = nm[2:]
            else:
              varargs = nm[1:]
          else:
            names.append(nm)
        # Reset for next arg
        s = i + 1
        eq = None
      elif c == '=':
        # Found start of the value of a keyword arg.
        eq = i
      elif c == "'" or c == '"':
        # Scan forward to end of string.
        i += 1
        while i < n and argspec[i] != c:
          # TODO(wmh): Deal with \' and \".
          i += 1
        # argspec[i] is the end of a literal string - keep parsing.
      elif c == '(':
        # Scan forward to matching ')'
        pcnt = 1
        i += 1
        while i < n:
          if argspec[i] == ')':
            pcnt -= 1
            if pcnt <= 0:
              break
          elif argspec[i] == '(':
            pcnt += 1
          i += 1
        # We are either at the end of argspec, or on a ')', or both.
      i += 1
    
    return names, varargs, varkw, defaults

  def _parseArgInfo(self):
    """Obtain information for each arg.

    Returns: list of dict
      The order of elements matches the order of the args.  Each dict contains:
        name: str
          The name of the arg
        type: str (optional)
          The type of the arg (if known)
        comments: list of str (optional)
          A description of the arg (if known)
        default: str (if a default arg)
          The default value.  If this key does not exist, the arg is a
          positional arg, not a keyword arg.
        prefix: str (optional)
          Not present for normal positional or keyword args.  Has value '*'
          for the variadic list variable, '**' for the variadic keyword
          variable.
    """
    # Process parameters.
    #  - An argspec can look like this
    #       a, b, *args, c=0, d='hello, world', e=f(1,3), **kwds
    #    and thus splitting into components is not as easy as split(',')
    arg_list, varargs, varkw, arg_defs = self._parseArgSpec(
      self.source().argamble()[1:-1])
    arg_docs = self._comments.get('args', {})

    # We skip the first arg in arg_list for 'instance' and 'meta' methods.
    level = self.level()
    if level == 'instance':
      i = 1
      if arg_list[0] != 'self':
        raise Error(
          'Expecting instance method %s to start with self' % self.fullname())
    elif level == 'meta':
      if arg_list[0] != 'cls':
        raise Error('Expecting class method to start with cls')
      i = 1
    elif level == 'static':
      i = 0

    kwd_i = len(arg_list)
    def_i = len(arg_list) - len(arg_defs)
    if varargs:
      arg_list.append('*' + varargs)
    if varkw:
      arg_list.append('**' + varkw)

    # Form the return value: one dict for each arg.
    result = []
    for arg in arg_list[i:]:
      arg_info = {'name': arg}
      if i >= def_i and i < kwd_i:
        arg_info['default'] = arg_defs[i - def_i]
      if arg in arg_docs:
        arg_doc = arg_docs[arg]
        if 'type' in arg_doc:
          arg_info['type'] = arg_doc['type']
        if 'comments' in arg_doc:
          arg_info['comments'] = arg_doc['comments']
      result.append(arg_info)
      i += 1
    return result
  
  def parseFieldInfo(self):
    """Look for field definitions (possibly with comments) in the method body.

    Returns: list of dict
      The list of identified fields, as a collection of dicts with keys:
        name: str
          The name of the field (with preceeding '_' removed)
        field: str
          The raw name of the field
        type: str (optional)
          The type of the field, if found in a comment.
        comments: list of str (optional)
          The description of the field, if found in a comment.
        value: str (optional)
          The (first line of) the value the field is assigned.
    """
    order = []
    data = {}
    current = None
    comstart_re = re.compile('^\s*# field\s+([a-zA-Z0-9_]+)\s*:\s*(.*)')
    com_re = re.compile('^\s*# \s+(.*)')
    field_re = re.compile('.*self\.([a-zA-Z0-9_]+)\s*=\s*(.*)')
    for line in self.source().src().split('\n'):
      comstart_match = comstart_re.match(line)
      if comstart_match:
        # We have the start of a field comment.
        name, ftype = comstart_match.groups()
        name = name.lstrip('_')
        current = {'name': name, 'type': ftype, 'comments': []}
        data[name] = current
        order.append(name)
      else:
        com_match = com_re.match(line)
        if current and com_match:
          # We are still processing a field comment, so we look for comment
          # lines.
          current['comments'].append(com_match.group(1).strip())
        else:
          # We are no longer parsing a field comment (so we reset current), but
          # we may be on an actual field assignment line.
          current = None
          field_match = field_re.match(line)
          if field_match:
            # We've found a field assignment, which may or may not have been
            # documented.
            field, val = field_match.groups()
            name = field.lstrip('_')
            if name not in data:
              data[name] = {'name': name}
              order.append(name)
            data[name]['field'] = field
            data[name]['value'] = val
    return [data[name] for name in order]

  def _convertDefaultValueToMeta(self, value):
    """Convert a python value to a corresponding Meta value.

    Args:
      value: str
        Valid python that can appear as the default value associated with
        a keyword parameter (this means 'any python expression').  Convert
        to a corresponding Meta syntax.  In situations where there is no such
        syntax, the result is wrapped in {# and #} to indicate that it is
        an inlined baselang string.

    Returns: str
    """
    # TODO(wmh): We need access to the OoplPython instance within this code,
    # so that we can obtain meta-to-python mappings like 'true', 'false',
    # etc.
    config = {
      'self': 'self',
      'cls': 'cls',
      'null': 'None',
      'true': 'True',
      'false': 'False',
    }
    revconfig = {v: k for (k, v) in config.iteritems()}
    if value in revconfig:
      # We have an explicit python literal for which a corresponding Meta
      # literal exists.
      result = revconfig[value]
    elif re.match('^\d+(\.\d+)?$', value):
      # Literal numeric ... pass thru as is.
      result = value
    elif value[0] == '"' and value[-1] == '"':
      result = value
    elif value[0] == "'" and value[-1] == "'":
      # TODO(wmh): Currently, we only parse double-quote strings, so we convert
      # single-quoted strings to doubles. We will probably want Meta to allow
      # both single and double quoted strings though.
      result = value
      #result = '"' + value[1:-1].replace('"', '\\"') + '"'
    else:
      # Do not know how to convert python 'value' to Meta, so we escape it.
      result = '{#%s#}' % value
    return result

  def toMeta(self, metac, output, indent=''):
    """Convert this Method to Meta syntax.

    Args:
      metac: Compiler
        The compiler to use to obtain meta-specific info.
      output: Output
        Add lines to this list representing the meta source code.
      indent: str
        The indentatino before each newly added line.
    """
    term = ';'    # TODO(wmh): Get this from Context.tokens('term').  How?
    rem = '/#'  # TODO(wmh): Get this from Context.tokens('rem').  How?

    mname = self.name()
    cls = self.parent()
    field_map = cls.fieldMap()
    docs = self.comments()

    # ----------------------------------------
    # Establish whether this python method corresponds to a Meta method,
    # field or initializer.
    if mname == '__init__':
      mname = cls.name()
      cons_name = 'initializer'
    elif mname in field_map:
      # This method is a getter for field 'mname'
      field_map[mname]['accessors']['get'] = {
        'scope': pymeth, 'tests': test_pymeths}
      cons_name = 'field'
    elif mname + 'Is' in field_map:
      # This method is a setter for field 'mname'
      # TODO(wmh): Need to support set<Name> too, if we want this to be
      # a general purpose converter.
      field_map[mname]['accessors']['set'] = {
        'scope': pymeth, 'tests': test_pymeths}
      cons_name = 'field'
    elif mname + 'Ref' in field_map:
      # This method is a reffer for field 'mname'
      field_map[mname]['accessors']['ref'] = {
        'scope': pymeth, 'tests': test_pymeths}
      cons_name = 'field'
    else:
      cons_name = 'method'
    # TODO(wmh): Split out the code that determines if a method is an accessor
    # or not, and do not call this method for accessors.

    # We have a normal method (i.e. not a field accessor)
    if cons_name != 'field':
      # ----------------------------------------
      # Establish the feature attributes.
      features = []

      super_info = self.superInfo()
      if super_info:
        inheritance = super_info['inheritance']
        features.append(inheritance)

      level = self.level()
      if level != 'instance':
        features.append(level)

      output.newline()
      output.addPrimary(cons_name, mname, features=' '.join(features))

      # ----------------------------------------
      # Process parameters.
      args_info = self._parseArgInfo()
      if args_info:
        plist = []
        params_output = output.clone(output=plist, delta_indent='  ')
        params_output._newline_blocks = False
        for arg_info in args_info:
          params_output.reset()
          params_output.addPrimary('var', arg_info['name'])
          if 'type' in arg_info:
            basetype = arg_info['type']
            metatype = metac.baselang().baseTypeToMeta(basetype, metac)
            params_output.addSecondary(':', metatype)
          if 'default' in arg_info:
            params_output.addSecondary(
              '=', self._convertDefaultValueToMeta(arg_info['default']))
          comments = arg_info.get('comments', [])
          params_output.addBlock('#', comments, comments=True)
          if params_output.unblocked():
            params_output.addTerm('var', simple=True)
        output.addBlock('params', plist, already_indented=True)
            
      if super_info and super_info['args']:
        output.addSecondary(
          'pargs',
          '"%s"' % super_info['args'].replace('"', '\\\"'),
          newline=True)
        
      # ----------------------------------------
      # Process the 'returns' information
      return_info = docs.get('returns', None)
      if return_info:
        return_type = metac.baselang().baseTypeToMeta(
          return_info['type'], metac)
        output.addSecondary('returns', return_type, newline=True)
        return_comments = return_info['comments']
        if return_comments:
          output.addBlock('returns_', return_comments, comments=True)

      # ----------------------------------------
      # Add the 'comment' block.
      if docs['comments'] or 'raises' in docs:
        clist = []
        c = 0
        for line in docs['comments']:
          c += 1
          if line:
            clist.append('%s %s' % (rem, line))
          else:
            clist.append(rem)
        # TODO(wmh): Introduce syntax into Meta to allow specification of what
        # exceptions a method throws. Probably best if this is a complex-block
        # attribute ('throws', 'raises', etc.) with 'exception' child
        # constructs. This will allow us to specify the exception that is raised
        # AND document the reasons it is raised much more easily than if we use
        # a string-valued attribute that tries to encode name/comment for
        # multiple exceptions.
        if 'raises' in docs:
          if c:
            clist.append(rem)
          clist.append('%s Raises:' % rem)
          exceptions = docs['raises']
          for exception in exceptions.get('list', []):
            err = exception['name']
            clist.append('%s   %s:' % (rem, err))
            if 'first' in exception:
              clist[-1] += ' ' + exception['first']
            try:
              for line in exception['comments']:
                clist.append('%s     %s' % (rem, line))
            except TypeError:
              print '#' * 80
              print exception
              print '#' * 80
        output.addBlock('comment', clist, already_indented=False)

      # ----------------------------------------
      # Add the 'scope' block.
      #  - there is usually an initial newline (but not if this is a one-line def),
      #    that we want to ignore. More generally, we skip initial blank lines.
      #  - there is always a newline at the end of the body, so we ignore it.
      source = self.source()
      # We cannot simply ask for source.body(sub=False) or source.body(sub=True)
      # because neither source._source nor source._src has indented the 
      # triple-quoted strings with '>|'.
      quote_dent = metac.token('quote_dent') + '  '
      # TODO(wmh): We are adding '  ' to quote_dent because of a special case.
      # Suppose we have an inner method with a docstr, like this:
      #   def f(self):
      #     def inner():
      #       """This is a docstr
      #       on multiple lines.
      #       """
      #       pass
      # 
      # This will become meta as:
      #   method f scope:
      #     def inner():
      #       """This is a docstr
      #      >|on multiple lines.
      #      >|"""
      #      pass
      #
      # Which results in an error of the form:
      #    metalib.kernel.parser.SyntaxError:
      #    Line XXX of parser.meta has an invalid indentation.
      #
      # By adding '  ' to quote_dent, it becomes
      #
      #   method f scope:
      #     def inner():
      #       """This is a docstr
      #        >|on multiple lines.
      #        >|"""
      #      pass
      #
      # which avoids the error.  We need a better solution.
      bdy = source.body(sub=False, quote_dent='>>>>')
      #bdy = source.parent().resub(source.body(sub=True), indent='  ' + quote_dent)
      
      super_match = self.SUPER_RE.match(bdy)
      if super_match:
        if inheritance == 'post_extend':
          bdy = super_match.groups()[-1]
        elif inheritance == 'pre_extend':
          bdy = super_match.groups()[0]
        elif inheritance == 'super_extend':
          raise Error('Not yet supporting super_extend inheritance')

      if not bdy.strip():
        #bdy = '\n\n'
        #bdy = 'pass\n'
        bdy = '\n'
        
      
      body = bdy.split('\n')
      assert body[-1] == '', "Expecting empty last line, found: '%s'" % body[-1]
      i = 0
      n = len(body)
      while i < n and not body[i].strip():
        i += 1
      output.addBlock('scope', body[i:-1], force=True)

      # ----------------------------------------
      # Add the 'test' block(s) if there are unit test methods.
      tests = self.testSources()
      if tests:
        test_source = tests[0]
        # A single test method, no parameterization needed.
        body_str = test_source.body().replace('pass\n', '').rstrip()
        if body_str:
          tbody = body_str.split('\n')
          i = 0
          n = len(tbody)
          while i < n and not tbody[i].strip():
            i += 1
          output.addBlock('test', tbody[i:])
          if len(tests) > 1:
            # TODO(wmh): we must parameterize the 'test' attribute, but the
            # first parameter is reserved for the baselang, so we need to
            # support multiple-parameters (not yet done).
            print '***** NOT YET supporting multiple test methods (%s)' % (
              self.fullname())
          
      # ----------------------------------------
      # End the method.
      output.addTerm(cons_name, mname)
