"""Classes for interacting with Python as a base language.

Includes
 - classes for parsing python source code into constituent parts so that
   we can convert python code into meta code.
 - ...
"""

import collections
import os
import pprint
import re
import subprocess
import sys
import tempfile


# Sets up paths to find version-specific meta code.  The env var META_VERSION
# can be used to specify the version (current, beta, pervious, ...), namely
# $METAROOT/lib/$version/{meta,python}
import metameta
metameta.Metastrap()

import meta.compiler.errors

class Error(Exception):
  pass


class InternalError(Error):
  pass


DEBUG = False


class Source(object):
  """A class representing source code.

  Maintains various versions of the text.
   - the multi-line string
   - the multi-line string with triple-quoted strings replaced with placeholders
  """

  DOUBLE_RE = re.compile(r'(\"\"\".*?\"\"\")', re.S)
  SINGLE_RE = re.compile(r"(\'\'\'.*?\'\'\')", re.S)
  COMMENT_RE = re.compile(r'("""|\'\'\')(.*)(\1)', re.S)
  HOLDER_RE = re.compile(r'(<@STRING\d+@>)')
  HOLDER_INFIX_RE = re.compile(r'(.*)(<@STRING\d+@>)(.*)', re.S)
  FHOLDER_RE = re.compile(r'^\s*(<@STRING\d+@>)')

  @classmethod
  def Holder(cls, count):
    """Generate a placeholder string.

    Returns: str
    """
    return '<@STRING%d@>' % count

  def __init__(self, parent, name, start_line, source, points=None, offset=0,
               subdent=''):
    """Initializer.

    Args:
      parent: Source
        The parent Source.  Will be None for Source instances representing
        modules (aka entire files).
      name: str
        The name of this source str.
      start_line: int
        The line number of the first line in 'source'
      source: str
        The text representing the block.
      points: dict
        Specifies the positions of various elements of the entity, as integer
        indices into source.
        # TODO(wmh): Not yet implemented.        
      offset: int
        The position of the start-of-line on which the defining keyword occurs,
        within the source string of the *parent* scope.
        # TODO(wmh): Not yet implemented.
      subdent: str
        The amount of indentation of the body portion of this Source.
    """

    # We first replace all triple-quoted strings with placeholders.
    count = 0
    src = source
    placeholders = {}

    # Process triple-double-quoted strings.
    double_re = self.DOUBLE_RE
    while True:
      double_match = double_re.search(src)
      if double_match:
        count += 1
        placeholder = '<@STRING%d@>' % count
        placeholders[placeholder] = double_match.group(1)
        # TODO(wmh): Is there a more efficient way to replace each string
        # with *different* strings, while keeping track of both the original
        # and replacement strings?
        src = double_re.sub(placeholder, src, count=1)
      else:
        break

    # Process triple-single-quoted strings.
    single_re = self.SINGLE_RE
    while True:
      single_match = single_re.search(src)
      if single_match:
        count += 1
        placeholder = '<@STRING%d@>' % count
        src = single_re.sub(placeholder, src, count=1)
        placeholders[placeholder] = single_match.group(1)
      else:
        break

    def Verify(source, data, key, adj, legal_chars):
      """Ensure that source[data[key] + adj] is one of the chars in legal_chars.

      Args:
        source: str
        data: dict
        key: str
        adj: int
        legal_chars: str
      """
      if key not in data:
        # May no longer be necessary to include this check.
        return
      if data[key] is None:
        # For example, docstr is None if there isn't one.
        return
      index = data[key] + adj
      try:
        if source[index] not in legal_chars:
          print "***** source[data['%s']+%d=%d] == '%s' not in '%s' near '%s'" % (
            key, adj, index, source[index], legal_chars.replace('\n', '\\n'),
            source[index-5:index+5].replace('\n', '\\n'))
      except IndexError:
          print "***** source[data['%s']+%d=%d] invalid (len %d ending '%s')" % (
            key, adj, index, len(source),
            source[-10:len(source)].replace('\n', '\\n'))

    def VerifyStrs(obj, src, source, points, skey, ekey):
      """Ensure that both src and source yield same values.

      Args:
        obj: Source
          The source within which resubs can be performed.
        src: str
          The source string with literals substituted
        source: str
          The original source string.
        points: dict of dicts
          Should contain ['src'][skey] and ['source'][skey] and
          ['src'][ekey] and ['source'][ekey]
        skey: str
          The key to compare.
      """
      ssrc = points['src'].get(skey, None)
      esrc = points['src'].get(ekey, None)
      ssource = points['source'].get(skey, None)
      esource = points['source'].get(ekey, None)
      missing = False
      if ssrc is None:
        #print 'Missing src %s' % skey
        missing = True
      if esrc is None:
        #print 'Missing src %s' % ekey
        missing = True        
      if ssource is None:
        #print 'Missing source %s' % skey
        missing = True        
      if esource is None:
        #print 'Missing source %s' % ekey
        missing = True        
      if missing:
        return
      
      src_str = obj.resub(src[points['src'][skey]:points['src'][ekey]])
      source_str = source[points['source'][skey]:points['source'][ekey]]
      if src_str != source_str:
        print 'MISMATCH between src and source for skey=%s ekey=%s' % (skey, ekey)
        print '#' * 80
        print src_str
        print '-' * 80
        print source_str
        print '#' * 80        

    # Validate points
    if points:
      for stype in ('src', 'source'):
        data = points.get(stype, None)
        if not data:
          continue
        super_source = parent.src() if stype == 'src' else parent.source()
        # We assume there is some preamble before the first class definition,
        # or data['pre'] and data['start'] will be 0.
        Verify(super_source, data, 'pre', -1, '\n')
        Verify(super_source, data, 'start', -1, '\n')
        Verify(super_source, data, 'paren', 0, '(')
        Verify(super_source, data, 'scope', -1, ':')
        Verify(super_source, data, 'scope', -1, ':')
        Verify(super_source, data, 'docstr', 0, '<' if stype == 'src' else '\'"')
        Verify(super_source, data, 'post', -1, '\n')
        Verify(super_source, data, 'end', 0, '\n')

      tsrc = parent.src()
      tsource = parent.source()
      VerifyStrs(parent, tsrc, tsource, points, 'pre', 'start')    # preamble
      VerifyStrs(parent, tsrc, tsource, points, 'post', 'end')     # postamble
      VerifyStrs(parent, tsrc, tsource, points, 'paren', 'scope')  # argamble
      VerifyStrs(parent, tsrc, tsource, points, 'docstr', 'body')  # docstr
      VerifyStrs(parent, tsrc, tsource, points, 'body', 'end')     # body
    else:
      points = {'src': {}, 'source': {}}

    # field name: str
    #   The name of the construct represented by this source block.
    self._name = name

    # field parent: Source or None
    #   The parent Source.  None for modules.
    self._parent = parent

    # field start: int
    #   The line number at which this source starts.  For modules, it should
    #   be 1.  For classes, the linenumber of the 'class' keyword.  For
    #   For methods, the linenumber of the 'def' keyword.
    self._start = start_line

    # field source: str
    #   The entire text of the block as a single string.
    self._source = source

    # field points: dict
    #   Specifies the positions of various elements of the entity, as integer
    #   indices into source.
    self._points = points

    # field offset: int
    #   The position of the start-of-line on which the defining keyword occurs,
    #   within the source string of the *parent* scope.
    self._offset = offset
    # TODO(wmh): When offset is provided, we will be able to map the integers
    # in 'points' above into parental contexts, including being able to find
    # the exact character position within a file (i.e. module) of a particular
    # element of an arbitrary entity.

    # field src: str
    #   The entire text of the block with triple-quoted strings replaced
    #   with placeholders (which reside in self._placeholders).
    self._src = src

    # field placeholders: dict
    #   During parsing, all triple-quoted strings are replaced with
    #   placeholder strings.  This dict maps the placeholder strings to
    #   the triple-quoted text.
    self._placeholders = placeholders

    # field subdent: str
    #   The amount of indentation of the block portion of this Scope.
    self._subdent = subdent

  def name(self):
    return self._name

  def parent(self):
    return self._parent

  def source(self):
    return self._source

  def src(self):
    return self._src

  def start(self):
    return self._start

  def points(self):
    return self._points

  def _substr(self, start_key, end_key, end_adj=0, sub=False):
    """Obtain a substr from my parent's source (sub=False) or src (sub=True).

    The self._points dict contains indices into the *parents* 'src' and 'source'
    strings representing the start of various parts of this entity.  For
    example, 'pre' is the start of the preamble, 'start' is the start of the
    entity, 'paren' is the start of the args, 'scope' is the start of the body,
    etc.  This method obtains some portion of the parent's src/source given
    start/end keys.

    Args:
      start_key: str
        One of the secondary keys in self._points ('pre', 'start', 'paren',
        'scope', 'post', 'end')
      end_key: str
        One of the secondary keys in self._points ('pre', 'start', 'paren',
        'scope', 'post', 'end')
      end_adj: int
        How much to adjust the end index by.
      sub: bool
        If True, use self.src(), else self.source().
    """
    parent = self.parent()
    if parent:
      (key, text) = ('src', parent.src()) if sub else ('source', parent.source())
      points = self._points
      if (not points or
          points[key][start_key] is None or points[key][end_key] is None):
        # docstr is one example of where this happens ... None if it doesn't
        # exist, in which case we return an empty substr.
        result = ''
      else:
        result = text[points[key][start_key]:points[key][end_key]+end_adj]
    else:
      result = ''
    return result

  def preamble(self, sub=False):
    return self._substr('pre', 'start', sub=sub)
  
  def postamble(self, sub=False):
    return self._substr('post', 'end', end_adj=1, sub=sub)

  def argamble(self, sub=False):
    return self._substr('paren', 'scope', end_adj=-1, sub=sub)

  def docstr(self, sub=False, indent=False):
    result = self._substr('docstr', 'body', sub=sub)
    if not indent:
      result = result.replace('\n' + self._subdent, '\n')
    return result
  
  def body(self, sub=False, indent=False):
    result = self._substr('body', 'post', sub=sub)
    if not indent:
      result = result.replace('\n' + self._subdent, '\n')
    return result

  def placeholders(self):
    return self._placeholders

  def split(self, keyword, indent):
    """Split the source on the given keyword.

    Args:
      keyword: str
        Either 'class' (for splitting modules) or '  def' (for splitting
        classes)
      indent: str
        The amount of indentation expected before the keyword (which always
        appears on a new line).

    Returns: dict
      Maps names of keyword constructs to Source instances.
    """
    splitter = '\n' + indent + keyword + ' '
    splitter_re = re.compile(r'%s%s\s+([a-zA-Z0-9_]+)' % (indent, keyword))

    fullsrc = self.source()
    src = self.src()
    max_index = len(src) - 1
    full_max_index = len(fullsrc) - 1
    start = self.start()

    # dentlen: How much indentation appears before the target keywords.  For
    # modules, it will be 0.  For classes it will a number like 2 or 4
    # (depending on style guide conventions for the python code being parsed),
    # and for methods it will usually be 2 times what the number for classes
    # is (again, depending on style guide conventions for the python code
    # in question).
    dentlen = len(indent)

    # subs: The collection of Source instances (delimited by 'keyword') that
    # make up this source string.
    subs = collections.OrderedDict()

    # start_index: the first character in the current "block" of text.
    # The char before start_index should always be a newline (if
    # start_index > 0).
    start_index = 0

    # pre_index: the first character in the "preamble" for the block starting
    # with start_index and ending with end_index. Note that pre_index <=
    # start_index (i.e. it is at the tail end of the *previous* block). It is
    # usually equal to start_index, indicating that there is no preamble for
    # the current block.  The char before pre_index should always be a newline
    # (if pre_index > 0).
    pre_index = 0
        
    # end_index: the newline that starts the string represented by 'splitter'.
    # That is, it is the newline before indent + keyword.
    end_index = src.find(splitter)   

    if end_index == -1:
      # There are none of the desired keywords
      pass

    else:
      while True:
        # One of the primary purposes of this following code is to define a
        # special 'points' dict of dicts. It stores the character positions of
        # various starting points for this entity, within both the full source
        # of the parent entity, and the triple-quote-replaced src of the parent
        # entity. Source instances for modules do not maintain a points
        # data-structure. Furthermore, when parsing a module Source into class
        # Sources, or a class Source into method Sources, we create a special
        # Source instance to represent the text before the first class or
        # method definition. These initial Source instances also do not contain
        # points instances.
        #
        # The keys in points are 'src' and 'source', and each of these is a
        # dict containing the following keys:
        #    pre:
        #     - The first char of the preamble text for this entity.  This
        #       should always be the start of a line.
        #     - src[pre-1] == '\n'
        #    start:
        #     - The first char of the line that the keyword appears on
        #       (keyword will have indent before it).
        #     - src[start-1] == '\n'
        #    paren:
        #     - The char of the '(' in the declaration.
        #     - src[paren] == '('
        #    scope:
        #     - The char after the '):' closing out the signature.
        #     - src[scope] is usually a newline, but isn't if the body is 1 line
        #       and on the same line as the declaration.
        #     - src[scope-1] == ':'
        #    docstr:
        #     - the char starting the docstr
        #     - if there is no docstr, this key exists with value None
        #     - src[docstr] is either a '"' or a "'"
        #    body:
        #     - the char immediately following the docstr (if there is one),
        #       or equal to 'scope' if there is no docstr.
        #    post:
        #     - The char position after the body. The body always includes
        #       the newline at the end of the last line of the body, so
        #       post is the position after that newline
        #     - Is usually another newline (blank lines between entities), but
        #       need not be (if this is the last entity in the parent scope,
        #       or if there is text immediately after the body).
        #     - src[post-1] == '\n'
        #    end:
        #     - The position of the last character in this entity (i.e.
        #       last position of the post amble, NOT including the preamble
        #       of the next entity).
        #     - src[end] == '\n'.
        points = {'src': {}, 'source': {}}
        
        # start_index is the first character in this entity (either 0, or
        # immediately after the newline ending the previous entity).  However,
        # it is possible that some of the text at the end of the previous
        # entity is more accurately associated with this entity as preamble
        # (as opposed to postamble of that previous entity).
        #
        # The last time through this loop, pre_index will have been set to
        # the position marking the start of preamble text for this entity (i.e.
        # pre_index <= start_index. The first time through the loop, it is set
        # to 0, as is start_index.
        preamble = self.resub(src[pre_index:start_index])
        points['src']['pre'] = pre_index
        points['src']['start'] = start_index

        # Now that we've used pre_index to obtain the preamble for this
        # entity, we want to compute the preamble for the next entity.
        # Starting from end_index, if any line is indented exactly the same
        # amount as we expect from our construct definitions, it is
        # considered preamble for the next entity, not post amble for this
        # entity.  Note that this code applies equally to the first special
        # Source instance and all others, so it appears here, before we
        # special-case the first instance.
        nl_index = end_index
        while True:
          pre_index = nl_index + 1
          nl_index = src.rfind('\n', start_index, nl_index)
          
          if nl_index == -1:
            # There is no newline before pre_index
            break
          elif nl_index >= max_index:
            # No more lines
            break
          elif src[nl_index + 1:nl_index + dentlen + 1] != indent:
            # The previous line does not have sufficient indentation
            break
          else:
            first_after_indent = src[nl_index+dentlen+1]
            if first_after_indent.isspace():
              # The previous line has too much indentation.
              break
        
        if len(subs) == 0:
          # This is the first time through the loop, which means this isn't
          # an entity (it is all text of the parent before the first child,
          # including parent preamble, parent declaration, args, docstr and any
          # preamble before the first child).
          #
          # Because this doesn't represent a sub-entity, there is no point
          # in initializing the points datastructure (it is set to None).
          # However, we *do* want to ensure that this text is captured so that
          # it can be added as a native block in generated code.
          #
          # Consider the situation where we are parsing the methods within
          # a class. The previousSource() of the first Method within the cllass
          # provides access to this text.

          points = None
          preamble = ''
          argamble = ''
          postamble = ''
          body = ''
          sub_src = src[start_index:end_index + 1]
          sub_src_len = len(sub_src)
          name = None
          sub_source = self.resub(sub_src)
          subindent = ''

        else:
          # The starting linenumber of the current block needs to be adjusted
          # forward by the number of newlines in preamble.
          start += preamble.count('\n')

          # At this point, pre_index represents the start of any preamble for
          # the block that comes after this one, and thus pre_index - 1
          # represents the newline of the end of text for the current block.
          # Often, pre_index - 1 == end_index, indicating that there is no
          # preamble for the next block.
          actual_end_index = pre_index - 1

          # Identify where the paren, scope, docstr and body start.
          (close_paren_index, body_end_index,
           docstr, subindent, indent_len, single_line_def,
           sub_src, postamble) = (
            self._identifySrcParts(
              src, indent, start_index, actual_end_index, points))

          # Obtain the portion of 'src' representing this entity.
          sub_match = splitter_re.match(sub_src)
          name = sub_match.group(1) if sub_match else None
          sub_source = self.resub(sub_src)

          # Establish the body of this entity.  It consists of everything from
          # the character after '):' (usually a newline) up to the newline at
          # the end of the last line in the body.  If 'subindent' represents
          # the amount of indentation of the first line in the method body,
          # we remove 'subindent' spaces from the start of every line in the
          # body.
          subindent = indent + '  '  # TODO(wmh): Discover this, not hardcode!
          body = self.resub(src[points['src']['scope']:body_end_index + 1]).replace(
            '\n' + subindent, '\n')

          # Now establish the various indices within fullsrc for
          # points['source'] that correspond to those in points['src'] for src.
          decl_str = src[start_index:close_paren_index]
          full_start_index = fullsrc.find(decl_str)
          if full_start_index == -1:
            raise Error(
              'Failed to find\n-------------\n%s\n-------------\nin full source'
              % decl_str)
          points['source']['start'] = full_start_index
          points['source']['paren'] = fullsrc.find('(', full_start_index)
          points['source']['scope'] = fullsrc.find(
            '):', points['source']['paren']) + 2
          if docstr is None:
            points['source']['docstr'] = None
            points['source']['body'] = points['source']['scope']
          else:
            points['source']['docstr'] = points['source']['scope'] + indent_len
            points['source']['body'] = points['source']['docstr'] + len(docstr)
          # TODO(wmh): Need to establish the index within fullsrc at which this
          # enity ends, and limiting the subsequence search for postamble to
          # be between the start of the body (within fullsrc) and
          # full_end_index, searching from the end backward.  Even that might
          # still be heuristic rather than algorithmic.
          after_index = src.find('\n', end_index + 1)
          if after_index != -1:
            next_decl = src[end_index + 1:after_index + 1]
            idx = fullsrc.find(next_decl, full_start_index)
            if idx == -1:
              raise Error('Failed to find "%s" in full source' % next_decl)
            else:
              full_end_index = idx - 1
          else:
            # Last entity in the scope.
            full_end_index = len(fullsrc) - 1
          # We know that src[pre_index:end_index] represents preamble for
          # the next entity, so full_end_index is decremented by the same diff.
          full_end_index -= (end_index - pre_index + 1)
          if fullsrc[full_end_index] != '\n':
            raise Error('full_end_index = %d[%d] = %s in %s' % (
              full_end_index, full_max_index, fullsrc[full_end_index], fullsrc[full_end_index-10:full_end_index+10]))
          points['source']['end'] = full_end_index

          if not preamble:
            full_pre_index = full_start_index
          else:
            # We want to search backwards from full_start_index for preamble
            full_pre_index = fullsrc.rfind(
              preamble, 0, full_start_index + 1)
            if full_pre_index == -1:
              raise Error(
                'Failed to find\n-------------\n%s\n-------------\nin full source'
                % preamble)
          if fullsrc[full_pre_index-1] != '\n':
            raise Error(
              "Found fullsrc[%d] == '%s' != '%s'" %
              (full_pre_index-1, fullsrc[full_pre_index-1], '\n'))
          points['source']['pre'] = full_pre_index
          
          if not postamble:
            # There is no postamble for src, so there isn't for source either.
            full_post_index = full_end_index + 1
          elif single_line_def:
            full_post_index = fullsrc.find('\n', full_start_index) + 1
          else:
            # We want to search backwards from full_end_index
            full_post_index = fullsrc.rfind(
              postamble, points['source']['scope'], full_end_index + 1)
            if full_post_index == -1:
              raise Error(
                'Failed to find\n-------------\n%s\n-------------\nin full source'
                % postamble)
          if fullsrc[full_post_index - 1] != '\n':
            print "Looking for \n-------------\n'%s'\n-------------\nin\n=============\n%s\n=============" % (
              postamble.replace('\n', '\\n'), fullsrc[points['source']['scope']:full_end_index + 1])
          points['source']['post'] = full_post_index

          assert (postamble ==
                  fullsrc[points['source']['post']:points['source']['end']+1])
          assert (preamble ==
                  fullsrc[points['source']['pre']:points['source']['start']])
          argamble = src[points['src']['paren']:points['src']['scope']-1]
          assert (argamble ==
                  fullsrc[points['source']['paren']:points['source']['scope']-1])
          assert (body ==
                  fullsrc[points['source']['scope']:points['source']['post']].replace(
                    '\n' + subindent, '\n'))
          if docstr:
            assert (docstr ==
                    fullsrc[points['source']['docstr']:points['source']['body']])

        # Print out some debugging info.
        if DEBUG:
          if name is None:
            # points should be None here.
            print '  Parsing initial preamble at %d (%s)' % (start, str(points))
            print '    sub_src = %s' % sub_src
          else:
            print '  Parsed %s %s at line %d (%s)' % (keyword, name, start, str(points))
            if name.startswith('data'):
              print '    body: %s' % src[start_index:body_end_index]
        # Create the sub Source instance.
        sub_obj = Source(
          self, name, start, sub_source, points=points, subdent=subindent)
        if name in subs:
          raise Error('Found multiple %s with name %s' % (keyword, name))
        subs[name] = sub_obj

        # If we've reached the end of source, we are done.
        if end_index >= max_index:
          break

        # The line number of the next block is determined by adjusting the
        # current line number up by the number of newlines in sub_source
        # (plus one, because sub_source doesn't end with a newline).
        start += sub_source.count('\n') + postamble.count('\n')
        
        # The next sub block starts after the newline at src[end_index]
        start_index = end_index + 1
        end_index = src.find(splitter, start_index)
        if end_index == -1:
          end_index = max_index

      # If pre_index < max_index, there is a trailing portion of text that
      # was destined to be the preamble for the next child, but since we've
      # parsed all children, we need to add it into the last child.  This is
      # easily fixed by updating the 'end' of that last child to match the
      # end of its parents entire src/source string.
      last_child = subs[subs.keys()[-1]]
      last_points = last_child.points()
      last_points['src']['end'] = max_index
      last_points['source']['end'] = full_max_index

    return subs

  def _identifySrcParts(self, src, indent, start_index, end_index, points):
    """Extract a doctstring from a string.

    Args:
      src: str
        The string from which to extract the docstr.
      indent: str
        The indentation of the declaration being parsed (body is indented
        further).
      start_index: int
        The index within src from which to start parsing.
      end_index: int
        The index within src to stop parsing.
      points: dict of dicts
        Maps 'src' and 'source' to dicts mapping conceptual position names
        to integer indices within src where that position starts.

    Returns: tuple
      [0] str or None (the docstr)
      [1] int or None (the index within src where the docstr starts)
      [2] int (where the 

    """
    # Establish the "arg" portion of this entity.  Note that if any
    # optional arg has a string whose default value contains ':)', this
    # will break.  Also, we are currently not resubbing argamble under
    # the assumption that it won't have triple-quotas in it.
    open_paren_index = src.find('(', start_index)
    close_paren_index = src.find('):', open_paren_index)
    nl_paren_index = src.find('\n', close_paren_index)
    scope_index = close_paren_index + 2
    decl_line_trailing = src[scope_index:nl_paren_index]
    single_line_def = (
      decl_line_trailing and not decl_line_trailing.isspace())

    # Establish the subindent, whether this is a one-line defn,
    # and the position of the start of the docstr (if there is one).
    docstr = None
    pre_ws, token, post_ws = re.match(
      '(\s*)(\S+)(\s*)', src[scope_index:end_index]).groups()
    indent_len = len(pre_ws)
    token_index = scope_index + indent_len
    nl_index = src.rfind('\n', scope_index, token_index)
    if nl_index == -1:
      # This is a single line def.
      if not single_line_def:
        raise Error('invalid non single_line_def!')
      docstr_index = None
      subindent = ''
    else:
      if single_line_def:
        raise Error('invalid single_line_def!')
      # The body of this source code is indented by the amount between
      # nl_index+1 and docstr_index.
      subindent = src[nl_index+1:token_index]
      # Now determine if there is a docstr.
      ph_match = self.HOLDER_RE.match(token)
      if ph_match:
        # We've found a docstr.
        docstr_index = token_index
        docstr = self.strFor(ph_match.group(1))
      else:
        # There is no docstr.
        docstr_index = None

    # Now establish where the body starts.
    if docstr_index:
      if post_ws[0] != '\n':
        # TODO(wmh): This error handling needs to be cleaned up.  If there
        # are trailing spaces on the line that ends the docstr, we get into
        # this code.
        subname = src[start_index:open_paren_index]
        print (
          'WARNING: docstr %s expected to be followed by newline\n'
          '(somewhere after line %d in %s)'
          % (token, self.start(), subname))
      body_index = docstr_index + len(token)
    else:
      body_index = scope_index

    # We now establish the portion of src[start_index:body_end_index]
    # that represents the actual scope for the current construct. It is
    # possible that there is "postamble" text after the indented block.
    body_end_index = end_index
    more_dent = indent + ' '
    more_len = len(more_dent)
    nl_index = src.find('\n', scope_index)
    if single_line_def:
      # This method consists of a single line, so the postamble is
      # everything after nl_index.
      pass
    else:
      while nl_index != -1:
        if nl_index >= end_index - 1:
          break
        prev_index = nl_index
        s_index = nl_index + 1
        nl_index = src.find('\n', s_index)
        # src[s_index:nl_index] represents a line of text.  If it starts
        # with more_dent, it is part of the body of the construct being
        # parsed.  The first line that does not start with more_dent is
        # the start of the postamble.
        if src[s_index] == '\n' or src[s_index:s_index+more_len] == more_dent:
          # The line from s_index to nl_index is either empty, or contains
          # at least more_dent leading indentation ... we continue the loop.
          pass
        else:
          # We've found the first non-within-block line, and we need to
          # move back one line.
          nl_index = prev_index
          break
    if nl_index != -1:
      assert src[nl_index] == '\n'
      while src[nl_index - 1] == '\n':
        nl_index -= 1
    if nl_index != -1 and nl_index < end_index:
      body_end_index = nl_index
      postamble = self.resub(src[body_end_index + 1:end_index + 1])
    else:
      body_end_index = end_index
      postamble = ''
    sub_src = src[start_index:body_end_index + 1]

    points['src']['paren'] = open_paren_index
    points['src']['scope'] = scope_index
    points['src']['docstr'] = docstr_index
    points['src']['body'] = body_index
    points['src']['post'] = body_end_index + 1
    points['src']['end'] = end_index
        
    return (
      close_paren_index, body_end_index,
      docstr, subindent, indent_len, single_line_def, sub_src, postamble)

  def strFor(self, placeholder):
    """Given a placeholder string, return the triple-quoted string it represents.

    Args:
      placeholder: str
        A str that matches Source.HOLDER_RE$.

    Returns: str or None
      A triple-quoted string, with the triple quotes intact, start and end.
    """
    return self.placeholders().get(placeholder, None)

  def resub(self, text, indent=''):
    """Reinstated triple-quoted strings in text with placeholders.

    Args:
      text: str
        A multi-line string with placeholders instead of triple-quoted strings.
      indent: str
        What to add before each line.
    """
    holder_re = self.HOLDER_RE
    placeholders = self.placeholders()
    replaced = {}
    while True:
      match = holder_re.search(text)
      if not match:
        break
      placeholder = match.group(1)
      if placeholder in replaced:
        moreinfo = ''
        moreinfo = '\n' + text
        raise Error(
          'Encountered %s when it was supposedly already replaced.%s' % (
            placeholder, moreinfo))
      replaced[placeholder] = True
      quote = placeholders[placeholder]
      if indent:
        quote = quote.replace('\n', '\n' + indent)
      
      # TODO(wmh): This code causes an infinite loop.  To see a reduced
      # example of the problem:
      #   >>> tre = re.compile('(<T>)')
      #   >>> s = 'Some text with a <T> embedded within'
      #
      #   # We want to replace '<T>' in s with some verbatim text, for
      #   # example the value in 'r'.
      #   >>> r = r'test \1 it'
      #   >>> tre.sub(r, s, count=1)
      #   'Some text with a test <T> it embedded within'
      #   # Note that we did not replace <T> wth exactly r, because the \1
      #   # in 'r' has been interpreted as a group substitution request.
      #
      #   # Try using a non-raw string:
      #   >>> r2 = 'test \1 it'
      #   >>> tre.sub(r2, s, count=1)
      #   'Some text with a test \x01 it embedded within'
      #   # Which is at least not an infinite loop, but still does not
      #   # correctly replace <T> with the exact text requested.

      if False:
        text = holder_re.sub(quote, text, count=1)
      else:
        text = text.replace(placeholder, quote)
      
    return text

  def show(self, fp=sys.stdout):
    
    num = self.start()
    for line in self.source().split('\n'):
      fp.write('%4d: %s\n' % (num, line))
      num += 1
    postamble = self.postamble()
    if postamble:
      if postamble[-1] == '\n':
        postamble = postamble[:-1]
      for line in postamble.split('\n'):
        fp.write('%4d: %s\n' % (num, line))
        num += 1


class SourceGroup(object):
  """A collection of Source instances, with searching support."""

  def __init__(self, keyword):
    self._keyword = keyword
    self._sources = collections.OrderedDict()

  def sources(self):
    return self._sources

  def add(self, source):
    name = source.name()
    if name in self._sources:
      raise Error('Found multiple %s with name %s' % (self._keyword, name))
    self._sources[name] = source

  def find(self, name):
    """Obtain the Source instance with given name.

    Args:
      name: str

    Returns: Source or None
    """
    return self._sources.get(name, None)

  def names(self):
    """Reeturns the names of all Source instances, in order added.

    Returns: list of str
    """
    return self._sources.keys()


class Output(object):

  COMMENT_RE = re.compile('^(\s*)#')

  def __init__(self, output=None, indent='', newline_blocks=False, termcode=15,
               newline_features=True):
    """Initializer.

    Args:
      output: list of str or None
        Where to write output.
      indent: str
        How much indentation before each line
      newline_blocks: bool
        Always put new block attributes on a new line.
      termcode: int
        How to encode terminators
      newline_features: bool
        If True, newline after features before primary.
    """
    if output is None:
      output = []

    # field output: list of str
    #   Where to write output
    self._output = output

    # field indent: str
    #   What to add before each line
    self._indent = indent

    # field newline_blocks: bool
    #   If True, block-valued secondaries are always started on a new line.
    self._newline_blocks = newline_blocks

    # field newline_features: bool
    #   If True, each 
    self._newline_features = newline_features

    # field block_count: int
    #   Number of block attributes already written
    self._block_count = 0

    # field termcode: int
    self._termcode = termcode

    # field auto_idx: int
    #   The index to use for auto_ids.
    self._auto_idx = 0

    # field last_nonblock: bool
    #   True if the last written field was NOT block-valued. Affects
    #   how terminators are written.
    self._last_nonblock = True

    # field last_empty: bool
    #   True if the last attribute written had an empty value.
    self._last_empty = False

  def reset(self):
    """Clear block count."""
    self._block_count = 0

  def unblocked(self):
    """True if no block attributes have been added since last reset()."""
    return self._block_count <= 0

  def clone(self, output=None, delta_indent=''):
    return self.__class__(
      output=output, indent=self._indent + delta_indent, termcode=self._termcode,
      newline_blocks=self._newline_blocks,
      newline_features=self._newline_features)

  def lines(self):
    return self._output

  def addSecondary(self, key, value, newline=False):
    """Add a non-block-valued secondary key.

    If the key is block-valued, use addBlock() instead.

    Args:
      key: str
        The secondary attribute key
      value: str
        The value associated with the key.
      newline: bool
        If True, add on separate line, not at end of current line.
    """
    # For now, we always add secondaries to the end of the last output line,
    # but we will want to provide intelligent line wrapping at some point.
    if newline:
      self._output.append(self._indent + key + ' ' + value)
    else:
      self._output[-1] += ' ' + key + ' ' + value
    self._last_nonblock = True

  def addBlock(self, key, lines, comments=False, already_indented=False,
               force=False):
    """Add a block to output.

    Args:
      key: str
      lines: list of str
      comments: bool
      already_indented: bool
        If True, lines are already indented.
      force: bool
        If True, always add even if lines are empty.
    """
    rem = '/#' # TODO(wmh): Generalize this (from context)
    if not lines and not force:
      return
    output = self._output
    if already_indented:
      indent = ''
      subindent = ''
    else:
      indent = self._indent
      subindent = indent + '  '
    if comments:
      subindent += rem + ' '
    if not self._newline_blocks and not self._block_count:
      # We have not yet generated any block attributes, and
      # are not forcing block attriutes to new lines, so we add
      # this block key to the current line.
      output[-1] += ' %s:' % key
    else:
      output.append(self._indent + key + ':')
    self._block_count += 1
    for line in lines:
      adjline = self.COMMENT_RE.sub(r'\1' + rem, (subindent + line).rstrip())
      output.append(adjline)
    self._last_nonblock = False
    self._last_empty = len(lines) == 0

  def addPrimary(self, cons_name, uid=None, features=''):
    if uid is None:
      self._auto_idx += 1
      uid = '_%d' % self._auto_idx
    
    if self._newline_features:
      if features:
        self._output.append(self._indent + features)
      self._output.append(self._indent + cons_name + ' ' + uid)
    else:
      if features:
        features += ' '
      self._output.append(
        '%s%s%s %s' % (self._indent, features, cons_name, uid))

  def addTerm(self, cons_name, uid=None, termcode=None, simple=False):
    """Add a terminator.

    Args:
      cons_name: str
      uid: str
      simple: bool
        If True, always just insert ';' at end of last line.
    """
    tc = self._termcode

    # If the last attribute is block-valued and empty, do we want to insert
    # an explicit terminator, or no?
    if self._last_empty:
      # tc |= 0x3
      pass
    
    # Some special casing first
    if not self._block_count or simple or self._last_nonblock:
      self._output[-1] += ';'
    else:
      parts = []
      if tc & 0x2:
        parts.append('end')
      if tc & 0x4:
        parts.append(cons_name)
      if tc & 0x8 and uid:
        parts.append(uid)
      res = ' '.join(parts)    
      if tc & 0x1:
        res += ';'
      if res:
        self._output.append(self._indent + res)

  def newline(self, count=1):
    for i in range(0, count):
      self._output.append('')

  def size(self):
    return len(self._output)


class Entity(object):
  """An abstract superclass of Module, Class and Method."""

  def __init__(self, source, test_sources=None, previous_source=None,
               previous_test_sources=None, parent=None):
    """Initializer.

    Args:
      name: str
        The name of the Entity.  For modules, the module name.
      source: Source
        The entire code, as structured data.
      test_source: list of Source
        Any test code associated with this entity.  Some entities have one
        test entity, some have zero, some have many (i.e. modules and
        classes usually have one, methods may have multiple).
      previous_source: str or None
        The source before the source of this block.
      parent: Entity or None
        The block that contains this block.
      indent: str
        How much indentation to remove from each line of docstr, etc.
    """
    # field source: Source
    #   The source for this block
    self._source = source

    # field test_source: list of Source
    #   The source for this block
    self._test_sources = test_sources

    # field previous_source: Block
    #   The source for the previous block at the same level
    self._previous_source = previous_source

    # field previous_test_sources: list of Block
    #   The source for the previous test blocks at the same level
    self._previous_test_sources = previous_test_sources

    # field parent: Entity
    #   The source for the parent block.
    self._parent = parent

  def parseSource(self, source_kind, child_class, child_keyword, child_indent,
                  child_testname_func, source, test_sources=None):
    """Parse source into a collection of entities.

    Args:
      source_kind: str
        One of 'module' or 'class'.
      child_class: class
        One of python.Class or python.Method.
      child_keyword: str
        One of 'class' or 'def'
      child_indent: str
        The indentation at which the class keyword is expected.
      child_testname_func: function
        Accepts an entity name, and returns back the name of the test entity
        associated with that entity name.
      source: Source
        The source object for this Entity instance.
      test_sources: list of Source or None
        The source objects of all tests for this Entity instance.  We currently
        assume that there is only one test module for each module, and one
        test class for each class, but potentially multiple test methods for
        each method.

    Returns: two-tuple
     [0] list of Entity
       Instances of child_class parsed from source.
     [1] collections.OrderedDict
       Maps entities within the Test entity that do not match child_testname_func
    """
    entities = collections.OrderedDict()

    # Obtain the basics.
    if test_sources is None:
      test_sources = []
    elif isinstance(test_sources, Source):
      test_sources = [test_sources]
    if len(test_sources) > 1:
      raise Error(
        'Currently not supporting more than one %s test source' % source_kind)
    test_source = test_sources[0] if test_sources else None

    # Parse the module and test_module into Source instances representing
    # entities.
    #
    # TODO(wmh): Support multiple test sources!
    if DEBUG:
      print 'Parsing source %s' % source.name()
    sub_sources = source.split(child_keyword, child_indent)
    if DEBUG:
      print 'Parsing test sources...'    
    sub_test_sources = (
      test_source.split(child_keyword, child_indent) if test_source else {})

    unclaimed_test_entities = collections.OrderedDict()
    for k in sub_test_sources.keys():
      if k:
        unclaimed_test_entities[k] = sub_test_sources[k]

    # For each Source (except the first), create a Class instance that combines
    # the appropriate information from module and test_module.
    previous_source = None
    previous_test_sources = None
    for sub_name, sub_source in sub_sources.iteritems():
      if sub_name is None:
        # This is the preamble source ... we do not make a class from it.
        
        #print 'Here with %s' % sub_name
        #sub_source.show()
        previous_test_sources = None

      else:
        sub_test_name = child_testname_func(sub_name)
        entity_specific_test_sources = []
        for name in sub_test_sources.keys():
          if name and name.startswith(sub_test_name):
            entity_specific_test_sources.append(sub_test_sources[name])
            if name in unclaimed_test_entities:
              del unclaimed_test_entities[name]
        #print '%s = %s' % (sub_name, str([s.name() for s in entity_specific_test_sources]))
        sub_entity = child_class(
          sub_source,
          test_sources=entity_specific_test_sources,
          previous_source=previous_source,
          previous_test_sources=previous_test_sources,
          parent=self)
        #print '%s = %s' % (
        #  sub_entity.name(), [s.name() for s in sub_entity.testSources()])
        entities[sub_name] = sub_entity
        previous_test_sources = entity_specific_test_sources
      previous_source = sub_source

    for entity_name, entity_source in unclaimed_test_entities.iteritems():
      unclaimed_test_entities[entity_name] = child_class(
        entity_source, parent=self)
    return entities, unclaimed_test_entities

  def name(self):
    return self.source().name()

  def parent(self):
    return self._parent

  def source(self):
    return self._source

  def previousSource(self):
    return self._previous_source

  def testSources(self):
    return self._test_sources

  def previousTestSources(self):
    return self._previous_test_sources

  def start(self):
    return self.source().start()

  def fullname(self):
    names = []
    obj = self
    while obj:
      name = obj.name()
      names.append(name)
      obj = obj.parent()
    return '.'.join(reversed(names))

  def splitText(self, text):
    """Split text into lines, ignoring initial and trailing blank lines.

    Args:
      text: str

    Returns: list of str
    """
    lines = text.split('\n')
    n = len(lines)
    i = 0
    while i < n and not lines[i].strip():
      i += 1
    j = len(lines) - 1
    while j > 0 and not lines[j].strip():
      j -= 1
    return lines[i:j+1]

  def nativeToMeta(self, output, lines, newlines=1):
    """Write literal lines in a native block.

    Args:
      output: Output
        Where to write the output.
      lines: list of str
        The literal lines to write.
    """
    N = len(lines)

    # TODO(wmh): Do we want to remove leading and trailing blanks, or do
    # we leave them in so that we can get idempotent translations?

    # Remove trailing blank lines
    i = N - 1
    while i >= 0 and not lines[i].strip():
      lines.pop()
      N -= 1
      i -= 1

    # Skip past initial blank lines
    i = 0
    while i < N and not lines[i].strip():
      i += 1

    if i < N:
      # Establish indentation of the lines based on the first non-blank line.
      indent_re = re.compile('^(\s*)')
      indent_match = indent_re.match(lines[i])
      if not indent_match:
        # This exception may be too conservative if there are indeed times
        # when preamble contains lines with no indentation, but for  now we
        # keep it in to see if that situation occurs.
        raise Error('Expecting indent in line: "%s"' % lines[i])
      predent = len(indent_match.group(1))

      output.reset()
      output.newline(count=newlines)
      output.addPrimary('native', uid=None)
      output.addBlock('scope', [line[predent:].rstrip() for line in lines[i:N]])
      output.addTerm('native', uid=None)


class Module(Entity):
  """Represents a parsed version of a python module definition.

  This does not use any python introspection, so that the code can be
  migrated to any base language.
  """

  # Cached Module instances.
  # TODO(wmh): Should this go some place else so that it can be used
  # for more than just Class.New()?
  MODULES = {}

  @classmethod
  def New(cls, path, use_regions=False):
    """Create a new (interned) Module instance from a file."""
    module = cls.MODULES.get(path, None)
    if not module:
      basename, source, test_source = cls._Load(path, use_regions=use_regions)
      test_sources = [test_source]
      if basename != source.name():
        raise Error('Expecting %s == %s' % (basename, source.name()))
      module = cls(
        source, test_sources=test_sources, previous_source=None,
        parent=None, path=path)
      cls.MODULES[path] = module
    return module

  @classmethod
  def _Load(cls, path, use_regions=False):
    """Load a module's source (and its test module's source).

    Args:
      path: str

    Returns: three-tuple
     [0] basename
     [1] module source
     [2] test module source
    """
    basename = os.path.basename(path)
    if basename.endswith('.py'):
      basename = basename[:-3]
    else:
      raise Error('Expecting module paths to end in .py: %s' % path)
    test_path = os.path.join(os.path.dirname(path), '%s_test.py' % basename)
    if use_regions:
      source = Region(File(path), 'module', basename)
      test_source = Region(File(test_path), 'module', basename + '_test')
    else:
      with open(path, 'r') as fp:
        source = Source(None, basename, 1, fp.read())
      with open(test_path, 'r') as tfp:
        test_source = Source(None, basename + '_test', 1, tfp.read())
    return basename, source, test_source

  def __init__(self, source, test_sources=None, previous_source=None,
               previous_test_sources=None, parent=None, path=None):
    """Initializer.

    Args:
      source: Source
        The entire code, as structured data.
      test_sources: list of Source
        The entire test code, as structured data.
      previous_source: Source or None
        The source before the source of this block.
      parent: Entity or None
        The block that contains this block.
      path: str or None
        The path to the file containing the module.
    """
    super(Module, self).__init__(
      source, test_sources=test_sources,
      previous_source=previous_source,
      previous_test_sources=previous_test_sources,
      parent=parent)
    name = self.name()

    classes, unclaimed_test_classes = self.parseSource(
      'module', Class, 'class', '', lambda name: name + 'Test', source,
      test_sources=test_sources)
    
    # field path: str
    #   The path to the module.
    self._path = path

    # field classes: collections.OrderedDict
    #   Maintains a mapping from class name to Class instance.
    self._classes = classes

    # field unclaimed_test_classes: collections.OrderedDict
    #   The classes in the test module not acting as test cases.
    self._unclaimed_tests = unclaimed_test_classes

  def path(self):
    return self._path

  def sourceText(self):
    """The source code for the module.

    Returns: str
      The entire contents of the module as a single string.
    """
    with open(self._path, 'r') as fp:
      return fp.read()

  def classNamed(self, name):
    """Find a class.

    Args:
      name: str
        The name of the class.

    Returns: Class or None
    """
    return self._classes.get(name, None)

  def summarize(self, fp=sys.stdout, indent=''):
    subindent = indent + '  '
    for name, clazz in self._classes.iteritems():
      assert name == clazz.name()
      fp.write('\n%s%4d: Class %s\n' % (indent, clazz.start(), name))
      clazz.summarize(fp=fp, indent=subindent)

  def toMeta(self, metac, baselang, output, indent=''):
    """Convert this Module to Meta syntax.

    Args:
      metac: Compiler
        The compiler to use to obtain meta-specific info.
      output: Output
        Add lines to this list representing the meta source code.
      indent: str
        The indentatino before each newly added line.
    """
    rem = '/#'  # TODO(wmh): Get this from Context.tokens('rem').  How?    
    subindent = indent + '  '

    output.addPrimary('namespace', self.name())

    classes = self._classes.values()
    classes_output = output.clone(delta_indent='  ')
    tests_output = output.clone(delta_indent='  ')

    # Process the initial text of the module (the docstr and the imports,
    # everything before the first class).
    if classes:
      first_source = classes[0].previousSource()
      src = first_source.src()
      holder_match = Source.HOLDER_INFIX_RE.search(src)
      if holder_match:
        pre_text, module_docstr_key, post_text = holder_match.groups()
        module_docstr = first_source.parent().strFor(module_docstr_key)
        output.addBlock(
          'comment',
          module_docstr.replace('"""', '').rstrip().split('\n'),
          comments=True)
      self.nativeToMeta(classes_output, post_text.split('\n'))

      first_test_sources = classes[0].previousTestSources()
      if first_test_sources:
        print 'Here with %s' % self.name()
        first_test_source = first_test_source[0]
        test_src = first_test_source.src()
        test_holder_match = Source.HOLDER_INFIX_RE.search(src)
        if test_holder_match:
          pre_text, module_docstr_key, post_text = test_holder_match.groups()
          print '#' * 70
          print post_text
          print '#' * 70        
    
    # Now we generate the text for each class.
    for class_ in classes:
      classes_output.reset()
      classes_output.newline()
      class_.toMeta(metac, baselang, classes_output, indent=subindent)

    output.addBlock('scope', classes_output.lines(), already_indented=True)

    output.newline()
    output.addTerm('namespace', self.name())


class Class(Entity):
  """Represents a parsed version of a python class definition.

  This does not use any python introspection, so that the code can be
  migrated to any base language.
  """

  def __init__(self, source, test_sources=None, previous_source=None,
               previous_test_sources=None, parent=None):
    """Initializer.

    Args:
      source: Source
        The entire code, as structured data.
      test_sources: list of Source
        The entire test code, as structured data.
      previous_source: str or None
        The source before the source of this block.
      parent: Entity or None
        The block that contains this block.
    """
    super(Class, self).__init__(
      source, test_sources=test_sources, previous_source=previous_source,
      parent=parent)
    name = self.name()
    
    methods, unclaimed_test_methods = self.parseSource(
      'class', Method, 'def', '  ', lambda name: 'test_' + name,
      source, test_sources=test_sources)

    # Parse the field information for this class.
    acc_types = {'get': '', 'set': 'Is', 'ref': 'Ref'}
    field_map = collections.OrderedDict()
    acc_map = {}
    init_meth = methods.get('__init__', None)
    if init_meth:
      field_order = init_meth.parseFieldInfo()
      for field_info in field_order:
        # field_info contains:
        #    comments: list of str
        #      The description of the field
        #    field: str
        #      The actual field name (with underscore)
        #    name: str
        #      The conceptual field name
        #    type: str
        #      The user-provided type of the field
        #    value: str
        #      ?
        name = field_info['name']
        field_map[name] = field_info
        for acctype in acc_types:
          suffix = acc_types[acctype]
          acc_map[name + suffix] = (acctype, field_info)

    # field methods: collections.OrderedDict
    #   Maintains a mapping from method name to Method instance.
    self._methods = methods

    # field unclaimed_test: collections.OrderedDict
    #   The methods in the test class that are not test methods.
    self._unclaimed_tests = unclaimed_test_methods

    # field field_map: collections.OrderedDict
    #   Maintains field info.
    self._field_map = field_map

    # field acc_map: dict
    #   Accessor map.
    self._acc_map = acc_map

  def methods(self):
    return self._methods

  def fieldMap(self):
    return self._field_map

  def show(self, fp=sys.stdout):
    # TODO(wmh): Fix this.
    fp.write(self.source().source())

  def summarize(self, fp=sys.stdout, indent=''):
    for name, method in self._methods.iteritems():
      assert name == method.name()
      test_sources = method.testSources()
      if test_sources:
        first_test_summary = '%4d: %s' % (
          test_sources[0].start(), test_sources[0].name())
      else:
        first_test_summary = ''
      fp.write(
        '%s%4d: %-40s | %s\n' %
        (indent, method.start(), name, first_test_summary))
      for test_source in test_sources[1:]:
        fp.write(
          '%s%46s | %4d: %s\n' %
          (indent, '', test_source.start(), test_source.name()))

  def methodNamed(self, name):
    """Find a method.

    Args:
      name: str
        The name of the method.

    Returns: Method or None
    """
    return self._methods.get(name, None)

  def methodsStartingWith(self, prefix):
    """Find all methods starting with given prefix.

    Args:
      prefix: str
        A name prefix.

    Returns: list of Method
    """
    result = []
    for method in self._methods.values():
      if method.name().startswith(prefix):
        result.append(method)
    return result

  def toMeta(self, metac, baselang, output, indent=''):
    """Convert this Class to Meta syntax.

    Args:
      metac: Compiler
        The compiler to use to obtain meta-specific info.
      output: Output
        Add lines to this list representing the meta source code.
      indent: str
        The indentatino before each newly added line.
    """
    debug = False
    source = self.source()
    tests = self.testSources()
    # TODO(wmh): Support multiple test classes?
    test_source = tests[0] if tests else None
    docstr = source.docstr()
    cls_name = source.name()
    field_map = self.fieldMap()
    term = metac.token('term')
    rem = metac.token('rem')

    subindent = indent + '  '

    scope_lines = []
    scope_output = output.clone(output=scope_lines, delta_indent='  ')
    
    test_lines = []
    test_output = output.clone(output=test_lines, delta_indent='  ')
    
    dummy_field_map = {}  # TODO(wmh): Remove this (and from _methodToMeta()
    init_scope_index = -1
    methods = self.methods().values()

    if debug:
      print 'Class %s' % cls_name

    # We first deal with the text within the class before the first method. We
    # add a native block to capture it. We can obtain this text from
    # methods[0].previousSource(), but unfortunately that text represents
    # everything in the class definition up to the first child definition,
    # including the definition of the class itself and everything in between.
    # We want to obtain the part after the class definition and docstr.
    # I'd initial thought I could deal with this in Source.split() by obtaining
    # the offset of the end of docstr using
    #      self._points['src']['body'] - self._points['src']['start']
    # but this produces an offset within the parent's src, which
    # is not necessarily the same offset as the current src (because
    # the current src resubs a smaller set of strings so what looks
    # like <@STRING109@> in the parent becomes <@STRING8@> in current,
    # (producing different offsets))
    #
    # Instead, we apply a regexp to substr.  Something cleaner would
    # be nice.
    if methods:
      sub_src = methods[0].previousSource().src()
      
      init_re = re.compile('(.*\): *\n(?:\s*<@STRING\d+@> *\n)?)')
      init_match = init_re.match(sub_src)
      if init_match:
        index = len(init_match.group(1))
        inamble = sub_src[index:]
        
        if inamble.strip():
          inamble_lines = inamble.split('\n')

          # The very last index of inamble_lines is always empty. Any lines
          # before that that are not empty appear immediately before the
          # first method, and are thus preamble of that method, NOT the
          # initial class preamble.
          idx = len(inamble_lines) - 2
          while inamble_lines[idx].strip():
            idx -= 1
          self.nativeToMeta(scope_output, inamble_lines[:idx])
      else:
        raise Error('Failed to find init preamble')
      
    for method in methods:
      # Iniital setup for this method
      mname = method.name()
      if debug:
        print '  Method %s' % mname

      # TODO(wmh): If we always arrange for method preambles to contain ONLY
      # text associated with the method def (decorators, etc.), then we should
      # NOT generate a native block ... that will be handled by postamble.
      preamble = method.source().preamble()
      if preamble:
        if False:
          self.nativeToMeta(scope_output, preamble.split('\n'))

      # Establish the test sources for this method. This is a subset of those
      # in method.testSources(), which consists of all test methods with
      # test_<mname> as a prefix. Suppose <mname> is 'delim' and there is also
      # another method named 'delimIs'. Both 'test_delim' and 'test_delimIs'
      # will be in method.testSources(), but we want to remove test_delimIs.
      test_map = {}
      for test_method_source in method.testSources():
        tmsname = test_method_source.name()
        ok = True
        for ometh in self.methodsStartingWith(mname):
          oname = ometh.name()
          if oname == mname:
            continue
          otest_prefix = 'test_' + oname
          if tmsname.startswith(otest_prefix):
            ok = False
            break
        if ok:
          test_map[tmsname] = test_method_source
          if debug:
            print '    Test %s' % tmsname

      acc_map = self._acc_map
      if mname in acc_map:
        # This is an accessor for a field .. we note the kind of accessor
        # (get, set or ref) in field_info['accessor']
        acctype, field_info = acc_map[mname]
        field_info.setdefault('accessors', {})[acctype] = {
          'scope': method, 'tests': test_map.values()}
      else:
        if mname == '__init__':
          init_scope_index = len(scope_lines)
        scope_output.reset()
        method.toMeta(metac, baselang, scope_output, indent=subindent)
        postamble = method.source().postamble()
        if postamble:
          self.nativeToMeta(scope_output, postamble.split('\n'))

    # ----------------------------------------------------------------------
    # Compose the field code
    forder = {'get': 1, 'set': 2, 'ref': 3}
    fplates = {
      'get': 'return self._%(field)s',
      'set': 'self._%(field)s = %(field)s',
      'ref': 'return self._%(field)s'}

    if field_map:
      if init_scope_index < 0:
        raise Error('Found fields but no initializer.')

      # Accumulate all lines representing field definitions into 'field_lines'
      field_lines = []

      for field_info in field_map.values():

        # Define a field from field_info
        field_output = scope_output.clone()
        field_output.newline()

        # Establish the field name
        field_name = field_info['name']
        fullname = '%s.%s' % (cls_name, field_name)
  
        field = field_info.get('field', None)
        if not field:
          print (
            'WARNING: Found documentation for field %s but no self._%s '
            'was found in %s.__init__' % (field_name, field_name, cls_name))
          continue
        if field != '_' + field_name:
          raise Error('Not yet supporting %s vs %s' % (field, field_name))

        # Establish the feature attributes of the field
        accessors = field_info.get('accessors', {})
        accs = sorted(
          accessors.keys(), cmp=lambda a, b: cmp(forder[a], forder[b]))
        field_features = ''.join(accs) or 'raw'
        if field_features == 'getsetref':
          # This is the default.
          field_features = ''

        # Define the field construct
        field_output.addPrimary('field', field_name, features=field_features)

        # Add field typing information if present
        if 'type' in field_info:
          basetype = field_info['type']
          metatype = baselang.baseTypeToMeta(basetype, metac)
          field_output.addSecondary(':', metatype)

        # Add the field comment
        if 'comments' in field_info:
          field_output.addBlock('#', field_info['comments'], comments=True)

        # Define the accessors
        if accs:
          acc_lines = []
          for acc in accs:
            accessor_output = field_output.clone(delta_indent='  ')
            accessor_output.addPrimary('accessor', acc)
            acc_info = accessors[acc]

            acc_scope = acc_info['scope']
            acc_body = acc_scope.source().body()
            simple_acc_body = fplates[acc] % {'field': field_name}
            if acc_body.strip() != simple_acc_body:
              accessor_output.addBlock('scope', self.splitText(acc_body))

            if 'tests' in acc_info:
              ftests = acc_info['tests']
              if not ftests:
                # No unittests for this field.
                continue
              ftest = ftests[0]
              accessor_output.addBlock('test', self.splitText(ftest.body()))
            accessor_output.addTerm('accessor', acc)
            if accessor_output.size() > 1:
              acc_lines.extend(accessor_output.lines())
          # Add all accessor lines into the field scope.
          field_output.addBlock('scope', acc_lines, already_indented=True)

        # End the field construct
        field_output.addTerm('field', field_name)
        field_lines.extend(field_output.lines())

      # We insert the fields immediately before the initializer for this
      # class.
      scope_lines = (
        scope_lines[:init_scope_index] +
        field_lines +
        scope_lines[init_scope_index:])

    # ----------------------------------------------------------------------
    # Now process the test code.
    
    # For any method in test source that is not associated with a core method,
    # we insert the method into the test block of the class itself.
    for test_method in self._unclaimed_tests.values():
      test_method.toMeta(metac, baselang, test_output, indent=subindent)
      postamble = test_method.source().postamble()
      if postamble:
        self.nativeToMeta(scope_output, postamble.split('\n'))

    # ----------------------------------------------------------------------
    # Put it all together by writing the class definition to 'output'

    parents = source.argamble()[1:-1]
    output.addPrimary('class', cls_name)
    output.addSecondary(':', parents)
    if docstr:
      docstr_match = Source.COMMENT_RE.match(docstr)
      if not docstr_match:
        raise Error('Invalid docstr\n%s' % docstr)
      output.addBlock(
        'comment', docstr_match.group(2).split('\n'), comments=True)
    if test_lines:
      output.addBlock('test', test_lines, already_indented=True)
      output.newline()
    if scope_lines:
      # TODO(wmh): switch to scope<python>
      output.addBlock('scope', scope_lines, already_indented=True)
      output.newline()
    output.addTerm('class', cls_name)

    # ----------------------------------------------------------------------
    # Deal with any postamble of the class.

    postamble = source.postamble()
    if postamble.strip():
      self.nativeToMeta(output, postamble.split('\n'), newlines=0)


class Method(Entity):
  """Represents a parsed version of a python method definition.

  This does not use any python introspection, so that the code can be
  migrated to any base language.
  """

  SUPER_RE = re.compile(r"""
    (.*)                # the text before the super call.
    \s*super
    \(
    \s*([a-zA-Z0-9_]+)  # the subclass
    \s*,\s*
    ([a-zA-Z0-9_]+)\s*  # the receiver object
    \)
    \.
    ([a-zA-Z0-9_]+)     # the parent method being invoked
    \s*\(([^\)]*?)\)    # the (possibly multiline) arg list.  Does not work
                        # if any params have ')' in them.
    (.*)                # the text after the super call.
    """,
    re.S | re.X)

  def __init__(self, source, test_sources=None, previous_source=None,
               previous_test_sources=None, parent=None):
    """Initializer

    Args:
      source: Source
        The entire code, as structured data.
      test_sources: list of Source
        The entire test code, as structured data.
      previous_source: str or None
        The source before the source of this block.
      parent: Entity or None
        The block that contains this block.
    """
    super(Method, self).__init__(
      source, test_sources=test_sources, previous_source=previous_source,
      previous_test_sources=previous_test_sources, parent=parent)

    # Check to seee if there is a super() call in the body, and if so, whether
    # it is the first statement (post-extend), last statement (pre-extend) or
    # inner statement (super-extend).
    body = self.source().body()
    super_info = None
    super_match = self.SUPER_RE.match(body)
    if super_match:
      # Now parse the super data.
      pre_text, subcls_str, rec_str, parent_meth, super_args, post_text = (
        super_match.groups())
      if not pre_text.strip():
        # no text before the super call, so this is post-extend.
        inheritance = 'post_extend'
      elif not post_text.strip():
        # no text after the super call, so this is pre-extend
        inheritance = 'pre_extend'
      else:
        # super() call is somewhere in the middle of the method body.
        inheritance = 'super_extend'

      # When we generate Meta code, the body should not include the super
      # call, since it is either implicit (pre_extend or post_extend) or
      # specified by special syntax (super_extend).  We need to remember some
      # portion of the text matches here (\2\3\4\5 maybe?) to be replaced
      text = super_match.group(0)[len(super_match.group(1)):-len(super_match.group(6))]
      super_info = {
        'args': super_args.replace('\n', ' ').strip(),
        'inheritance': inheritance,
        'text': text,
      }

    # field level: str
    #   The level of this method ('instance', 'static' or 'meta')
    level = 'instance'
    preamble = self.source().preamble()
    if preamble:
      if '@classmethod' in preamble:
        level = 'meta'
      elif '@staticmethod' in preamble:
        level = 'static'
    self._level = level

    # field super_info: dict
    #   Maintains information about super class.
    self._super_info = super_info

    # field comments: dict
    #   Information from a structured comment.
    self._comments = self._parseComment()

  def superInfo(self):
    return self._super_info

  def level(self):
    return self._level

  def comments(self):
    return self._comments

  def _parseComment(self):
    """Parse my docstr."""
    debug = False
    docstr = self.source().docstr()[3:-3]

    data = {'comments': []}
    if docstr:
      arg_indent = -1
      base_indent = 0   # getdoc() cleans the doc so that it always starts at 0
      line_re = re.compile('^(\s*)(?:(\S+):)?(\s*)(.*)')
      i = 0
      doc_lines = docstr.split('\n')
      data['comments'].append(doc_lines[0])
      lineno = 0
      last_was_top = True
      section = None
      key = None  # stays None until we see Args: or Raises: or Returns:
      for line in doc_lines[1:]:
        lineno += 1
        line_match = line_re.match(line)
        if line_match:
          indent_str, kw, ws, text = line_match.groups()
          indent = len(indent_str)
          empty = not kw and not text
          if debug:
            print '%-80s #=%d indent=%d base=%d kw=%s key=%s text=%s' % (
              line, lineno, indent, base_indent, kw, key, text)

          if indent == base_indent and not empty:
            # A section header or a top-level comment.
            if kw in ['Args', 'Raises', 'Returns']:
              key = kw.lower()
              section = {}
              data[key] = section
              if key == 'returns':
                section['comments'] = []
                section['type'] = text or '?'
            else:
              # top-level comment.
              #if not last_was_top:
              #  data['comments'].append('')
              if kw:
                data['comments'].append('%s:%s%s' % (kw, ws, text))
              else:
                data['comments'].append(text)
              last_was_top = True
              key = None

          elif key:
            # A secondary (or lower) comment ... add to section
            if key == 'returns':
              section['comments'].append(' ' * (indent - 2) + text)
            elif indent == base_indent + 2:
              last_was_top = False
              if key == 'args':
                # We expect kw=arg_name, text=arg_type
                entry = {'name': kw, 'type': text, 'comments': []}
                section[kw] = entry
              elif key == 'raises':
                # We expect kw=exception name, text=start of description.
                entry = {'name': kw, 'comments': []}
                if text:
                  entry['first'] = text
                section.setdefault('list', []).append(entry)
              elif key == 'returns':
                # We expect kw=None, text=comment.
                entry = section
                entry['comments'] = []
            elif indent >= base_indent + 4:
              # We have a comment line within a section.
              last_was_top = False
              entry['comments'].append(' ' * (indent - base_indent - 4) + text)

          else:
            # We have a top-level comment indented some amount of space.
            data['comments'].append(' ' * indent + text)
            last_was_top = True

        else:
          raise Error('Unknown line %d: %s' % (lineno, line))
    while data['comments'] and not data['comments'][-1]:
      data['comments'].pop()
    return data

  def _parseArgSpec(self, argspec):
    """Parse the argument specification of this method into a four-tuple.

    Args:
      argspec: str
        The string instead the '(' and ')' within a method definition.

    Returns: four-tuple
     [0] list of positional arg names and keyword arg names
     [1] name of * or None
     [2] name of ** or None
     [3] list of the default values of the keyword args from [0]
   """
    names = []
    varargs = None
    varkw = None
    defaults = []

    if argspec:
      argspec = argspec + ','  # avoids special casing.

    n = len(argspec)
    i = 0      # current index within argspec being considered
    s = 0      # index of start of current arg
    eq = None  # index of '=' for current arg (None if not keyword arg)
    while i < n:
      c = argspec[i]
      if c == ',':
        # Found end-of-arg.
        if eq:
          names.append(argspec[s:eq].strip())
          defaults.append(argspec[eq+1:i].strip())
        else:
          nm = argspec[s:i].strip()
          if nm[0] == '*':
            if nm[1] == '*':
              varkw = nm[2:]
            else:
              varargs = nm[1:]
          else:
            names.append(nm)
        # Reset for next arg
        s = i + 1
        eq = None
      elif c == '=':
        # Found start of the value of a keyword arg.
        eq = i
      elif c == "'" or c == '"':
        # Scan forward to end of string.
        i += 1
        while i < n and argspec[i] != c:
          # TODO(wmh): Deal with \' and \".
          i += 1
        # argspec[i] is the end of a literal string - keep parsing.
      elif c == '(':
        # Scan forward to matching ')'
        pcnt = 1
        i += 1
        while i < n:
          if argspec[i] == ')':
            pcnt -= 1
            if pcnt <= 0:
              break
          elif argspec[i] == '(':
            pcnt += 1
          i += 1
        # We are either at the end of argspec, or on a ')', or both.
      i += 1
    
    return names, varargs, varkw, defaults

  def _parseArgInfo(self):
    """Obtain information for each arg.

    Returns: list of dict
      The order of elements matches the order of the args.  Each dict contains:
        name: str
          The name of the arg
        type: str (optional)
          The type of the arg (if known)
        comments: list of str (optional)
          A description of the arg (if known)
        default: str (if a default arg)
          The default value.  If this key does not exist, the arg is a
          positional arg, not a keyword arg.
        prefix: str (optional)
          Not present for normal positional or keyword args.  Has value '*'
          for the variadic list variable, '**' for the variadic keyword
          variable.
    """
    # Process parameters.
    #  - An argspec can look like this
    #       a, b, *args, c=0, d='hello, world', e=f(1,3), **kwds
    #    and thus splitting into components is not as easy as split(',')
    arg_list, varargs, varkw, arg_defs = self._parseArgSpec(
      self.source().argamble()[1:-1])
    arg_docs = self._comments.get('args', {})

    # We skip the first arg in arg_list for 'instance' and 'meta' methods.
    level = self.level()
    if level == 'instance':
      i = 1
      if arg_list[0] != 'self':
        raise Error(
          'Expecting instance method %s to start with self' % self.fullname())
    elif level == 'meta':
      if arg_list[0] != 'cls':
        raise Error('Expecting class method to start with cls')
      i = 1
    elif level == 'static':
      i = 0

    kwd_i = len(arg_list)
    def_i = len(arg_list) - len(arg_defs)
    if varargs:
      arg_list.append('*' + varargs)
    if varkw:
      arg_list.append('**' + varkw)

    # Form the return value: one dict for each arg.
    result = []
    for arg in arg_list[i:]:
      arg_info = {'name': arg}
      if i >= def_i and i < kwd_i:
        arg_info['default'] = arg_defs[i - def_i]
      if arg in arg_docs:
        arg_doc = arg_docs[arg]
        if 'type' in arg_doc:
          arg_info['type'] = arg_doc['type']
        if 'comments' in arg_doc:
          arg_info['comments'] = arg_doc['comments']
      result.append(arg_info)
      i += 1
    return result
  
  def parseFieldInfo(self):
    """Look for field definitions (possibly with comments) in the method body.

    Returns: list of dict
      The list of identified fields, as a collection of dicts with keys:
        name: str
          The name of the field (with preceeding '_' removed)
        field: str
          The raw name of the field
        type: str (optional)
          The type of the field, if found in a comment.
        comments: list of str (optional)
          The description of the field, if found in a comment.
        value: str (optional)
          The (first line of) the value the field is assigned.
    """
    order = []
    data = {}
    current = None
    comstart_re = re.compile('^\s*# field\s+([a-zA-Z0-9_]+)\s*:\s*(.*)')
    com_re = re.compile('^\s*# \s+(.*)')
    field_re = re.compile('.*self\.([a-zA-Z0-9_]+)\s*=\s*(.*)')
    for line in self.source().src().split('\n'):
      comstart_match = comstart_re.match(line)
      if comstart_match:
        # We have the start of a field comment.
        name, ftype = comstart_match.groups()
        name = name.lstrip('_')
        current = {'name': name, 'type': ftype, 'comments': []}
        data[name] = current
        order.append(name)
      else:
        com_match = com_re.match(line)
        if current and com_match:
          # We are still processing a field comment, so we look for comment
          # lines.
          current['comments'].append(com_match.group(1).strip())
        else:
          # We are no longer parsing a field comment (so we reset current), but
          # we may be on an actual field assignment line.
          current = None
          field_match = field_re.match(line)
          if field_match:
            # We've found a field assignment, which may or may not have been
            # documented.
            field, val = field_match.groups()
            name = field.lstrip('_')
            if name not in data:
              data[name] = {'name': name}
              order.append(name)
            data[name]['field'] = field
            data[name]['value'] = val
    return [data[name] for name in order]

  def _convertDefaultValueToMeta(self, value):
    """Convert a python value to a corresponding Meta value.

    Args:
      value: str
        Valid python that can appear as the default value associated with
        a keyword parameter (this means 'any python expression').  Convert
        to a corresponding Meta syntax.  In situations where there is no such
        syntax, the result is wrapped in {# and #} to indicate that it is
        an inlined baselang string.

    Returns: str
    """
    # TODO(wmh): We need access to the OoplPython instance within this code,
    # so that we can obtain meta-to-python mappings like 'true', 'false',
    # etc.
    config = {
      'self': 'self',
      'cls': 'cls',
      'null': 'None',
      'true': 'True',
      'false': 'False',
    }
    revconfig = {v: k for (k, v) in config.iteritems()}
    if value in revconfig:
      # We have an explicit python literal for which a corresponding Meta
      # literal exists.
      result = revconfig[value]
    elif re.match('^\d+(\.\d+)?$', value):
      # Literal numeric ... pass thru as is.
      result = value
    elif value[0] == '"' and value[-1] == '"':
      result = value
    elif value[0] == "'" and value[-1] == "'":
      # TODO(wmh): Currently, we only parse double-quote strings, so we convert
      # single-quoted strings to doubles. We will probably want Meta to allow
      # both single and double quoted strings though.
      result = value
      #result = '"' + value[1:-1].replace('"', '\\"') + '"'
    else:
      # Do not know how to convert python 'value' to Meta, so we escape it.
      result = '{#%s#}' % value
    return result

  def toMeta(self, metac, baselang, output, indent=''):
    """Convert this Method to Meta syntax.

    Args:
      metac: Compiler
        The compiler to use to obtain meta-specific info.
      output: Output
        Add lines to this list representing the meta source code.
      indent: str
        The indentatino before each newly added line.
    """
    term = ';'    # TODO(wmh): Get this from Context.tokens('term').  How?
    rem = '/#'  # TODO(wmh): Get this from Context.tokens('rem').  How?

    mname = self.name()
    cls = self.parent()
    field_map = cls.fieldMap()
    docs = self.comments()

    # ----------------------------------------
    # Establish whether this python method corresponds to a Meta method,
    # field or initializer.
    if mname == '__init__':
      mname = cls.name()
      cons_name = 'initializer'
    elif mname in field_map:
      # This method is a getter for field 'mname'
      field_map[mname]['accessors']['get'] = {
        'scope': pymeth, 'tests': test_pymeths}
      cons_name = 'field'
    elif mname + 'Is' in field_map:
      # This method is a setter for field 'mname'
      # TODO(wmh): Need to support set<Name> too, if we want this to be
      # a general purpose converter.
      field_map[mname]['accessors']['set'] = {
        'scope': pymeth, 'tests': test_pymeths}
      cons_name = 'field'
    elif mname + 'Ref' in field_map:
      # This method is a reffer for field 'mname'
      field_map[mname]['accessors']['ref'] = {
        'scope': pymeth, 'tests': test_pymeths}
      cons_name = 'field'
    else:
      cons_name = 'method'
    # TODO(wmh): Split out the code that determines if a method is an accessor
    # or not, and do not call this method for accessors.

    # We have a normal method (i.e. not a field accessor)
    if cons_name != 'field':
      # ----------------------------------------
      # Establish the feature attributes.
      features = []

      super_info = self.superInfo()
      if super_info:
        inheritance = super_info['inheritance']
        features.append(inheritance)

      level = self.level()
      if level != 'instance':
        features.append(level)

      output.newline()
      output.addPrimary(cons_name, mname, features=' '.join(features))

      # ----------------------------------------
      # Process parameters.
      args_info = self._parseArgInfo()
      if args_info:
        plist = []
        params_output = output.clone(output=plist, delta_indent='  ')
        params_output._newline_blocks = False
        for arg_info in args_info:
          params_output.reset()
          params_output.addPrimary('var', arg_info['name'])
          if 'type' in arg_info:
            basetype = arg_info['type']
            metatype = baselang.baseTypeToMeta(basetype, metac)
            params_output.addSecondary(':', metatype)
          if 'default' in arg_info:
            params_output.addSecondary(
              '=', self._convertDefaultValueToMeta(arg_info['default']))
          comments = arg_info.get('comments', [])
          params_output.addBlock('#', comments, comments=True)
          if params_output.unblocked():
            params_output.addTerm('var', simple=True)
        output.addBlock('params', plist, already_indented=True)
            
      if super_info and super_info['args']:
        output.addSecondary(
          'pargs',
          '"%s"' % super_info['args'].replace('"', '\\\"'),
          newline=True)
        
      # ----------------------------------------
      # Process the 'returns' information
      return_info = docs.get('returns', None)
      if return_info:
        return_type = baselang.baseTypeToMeta(
          return_info['type'], metac)
        output.addSecondary('returns', return_type, newline=True)
        return_comments = return_info['comments']
        if return_comments:
          output.addBlock('returns_', return_comments, comments=True)

      # ----------------------------------------
      # Add the 'comment' block.
      if docs['comments'] or 'raises' in docs:
        clist = []
        c = 0
        for line in docs['comments']:
          c += 1
          if line:
            clist.append('%s %s' % (rem, line))
          else:
            clist.append(rem)
        # TODO(wmh): Introduce syntax into Meta to allow specification of what
        # exceptions a method throws. Probably best if this is a complex-block
        # attribute ('throws', 'raises', etc.) with 'exception' child
        # constructs. This will allow us to specify the exception that is raised
        # AND document the reasons it is raised much more easily than if we use
        # a string-valued attribute that tries to encode name/comment for
        # multiple exceptions.
        if 'raises' in docs:
          if c:
            clist.append(rem)
          clist.append('%s Raises:' % rem)
          exceptions = docs['raises']
          for exception in exceptions.get('list', []):
            err = exception['name']
            clist.append('%s   %s:' % (rem, err))
            if 'first' in exception:
              clist[-1] += ' ' + exception['first']
            try:
              for line in exception['comments']:
                clist.append('%s     %s' % (rem, line))
            except TypeError:
              print '#' * 80
              print exception
              print '#' * 80
        output.addBlock('comment', clist, already_indented=False)

      # ----------------------------------------
      # Add the 'scope' block.
      #  - there is usually an initial newline (but not if this is a one-line def),
      #    that we want to ignore. More generally, we skip initial blank lines.
      #  - there is always a newline at the end of the body, so we ignore it.
      source = self.source()
      # We cannot simply ask for source.body(sub=False) or source.body(sub=True)
      # because neither source._source nor source._src has indented the 
      # triple-quoted strings with '>|'.
      quote_dent = metac.token('quote_dent')
      # TODO(wmh): We are adding '  ' to quote_dent because of a special case.
      # Suppose we have an inner method with a docstr, like this:
      #   def f(self):
      #     def inner():
      #       """This is a docstr
      #       on multiple lines.
      #       """
      #       pass
      # 
      # This will become meta as:
      #   method f scope:
      #     def inner():
      #       """This is a docstr
      #      >|on multiple lines.
      #      >|"""
      #      pass
      #
      # Which results in an error of the form:
      #    metalib.kernel.parser.SyntaxError:
      #    Line XXX of parser.meta has an invalid indentation.
      #
      # By adding '  ' to quote_dent, it becomes
      #
      #   method f scope:
      #     def inner():
      #       """This is a docstr
      #        >|on multiple lines.
      #        >|"""
      #      pass
      #
      # which avoids the error.  We need a better solution.
      bdy = source.parent().resub(source.body(sub=True), indent='  ' + quote_dent)
      
      super_match = self.SUPER_RE.match(bdy)
      if super_match:
        if inheritance == 'post_extend':
          bdy = super_match.groups()[-1]
        elif inheritance == 'pre_extend':
          bdy = super_match.groups()[0]
        elif inheritance == 'super_extend':
          raise Error('Not yet supporting super_extend inheritance')

      if not bdy.strip():
        #bdy = '\n\n'
        #bdy = 'pass\n'
        bdy = '\n'
        
      
      body = bdy.split('\n')
      assert body[-1] == '', "Expecting empty last line, found: '%s'" % body[-1]
      i = 0
      n = len(body)
      while i < n and not body[i].strip():
        i += 1
      output.addBlock('scope', body[i:-1], force=True)

      # ----------------------------------------
      # Add the 'test' block(s) if there are unit test methods.
      tests = self.testSources()
      if tests:
        test_source = tests[0]
        # A single test method, no parameterization needed.
        body_str = test_source.body().replace('pass\n', '').rstrip()
        if body_str:
          tbody = body_str.split('\n')
          i = 0
          n = len(tbody)
          while i < n and not tbody[i].strip():
            i += 1
          output.addBlock('test', tbody[i:])
          if len(tests) > 1:
            # TODO(wmh): we must parameterize the 'test' attribute, but the
            # first parameter is reserved for the baselang, so we need to
            # support multiple-parameters (not yet done).
            print '***** NOT YET supporting multiple test methods (%s)' % (
              self.fullname())
          
      # ----------------------------------------
      # End the method.
      output.addTerm(cons_name, mname)
