#!/usr/bin/python -S
"""
"Preprocessing and tokenization for JSON Pattern.
"""

__author__ = 'Andy Chu'


import re
import _exceptions


# For splitting into JSON Pattern instructions and regexes
SPLIT_RE = re.compile(r'({.*?})')

# Block tokens have an empty prefix.
# Things not in {} are regex's, things in {} are instructions
REGEX_TOKEN, INSTRUCTION_TOKEN, LITERAL_TOKEN, BLOCK_TOKEN = 0, 1, 2, 3

# See
# http://en.wikipedia.org/wiki/Regular_expression#POSIX_Extended_Regular_Expressions
# http://www.regular-expressions.info/posixbrackets.html

LITERALS = {
    '.lbrace': r'\{',
    '.rbrace': r'\}',
    '.space': ' ',
    '.tab': '\t',
    '.newline': '\n',

    # Syntactic sugar
    '.word': r'\w',
    '!word': r'\W',
    '.ws': r'\s',
    '!ws': r'\S',
    '.digit': r'\d',
    '!digit': r'\D',

    # "Zero-width assertions
    '.word-boundary': r'\b',
    '!word-boundary': r'\B',

    # TODO: What about ^ and $ ?
    '.begin': r'\A',
    '.end': r'\Z',
    }

#
# For backslash escaping
#

ESCAPED_RE = re.compile(r'([^\\]*)(\\.)')

BACKSLASH_ESCAPED = {
      r'\{': '{.lbrace}',
      r'\}': '{.rbrace}',
      }

def BackslashEscape(match):
  e = match.group(2)
  escaped = BACKSLASH_ESCAPED.get(e, e)  # \a == \a, \\ == \\, etc.
  return match.group(1) + escaped


# .char or .chars is accepted
READABLE_CHAR_CLASS = re.compile(r"""
    ([.!])chars?
    [ ]
    (.*)
    """, re.VERBOSE)


def MakeCharClass(bit, chars):
  """
  Args:
    bit: . or !
    chars: A space-separated list of characters
  """
  tokens = []
  if bit == '!':
    tokens.append('^')

  sets = chars.split()
  for s in sets:
    if s in ('-', '^', '[', ']', '\\'):
      s = '\\' + s  # Escape it
    else:
      literal = LITERALS.get(s)
      if literal is not None:
        s = literal

    tokens.append(s)
  return '[' + ''.join(tokens) + ']'


# e.g. {3}, {20,30}
REPEAT_RANGE_RE = re.compile(r'\{\d+(?:,\d+)?\}')

def CombineSpecial(tokens, whitespace_mode='T'):
  """Combine non-instructions surrounded with {} with surrounding regexes.

  Yields:
    An alternating sequence of regex, instruction, ..., regex
  """

  pending = []  # Tokens that haven't been yielded

  for i, token in enumerate(tokens):

    if i % 2 == 0:  # chars in between {} tokens
      if whitespace_mode != 'S':  # S = significant whitespace
        token = token.strip()
      pending.append(token)

    else:  # {} tokens
      assert token.startswith('{'), repr(token)
      assert token.endswith('}'), repr(token)

      combine = False
      word = token[1:-1]
      if word.startswith('.') or word.startswith('!'):
        literal = LITERALS.get(word)
        if literal is not None:
          result = literal
          combine = True
        else:
          match = READABLE_CHAR_CLASS.match(word)
          if match:
            result = MakeCharClass(match.group(1), match.group(2))
            combine = True
          else:
            raise _exceptions.PatternSyntaxError('Invalid literal %r' % token)

      elif REPEAT_RANGE_RE.match(token):
        combine = True
        result = token

      else:
        result = word

      if combine:
        pending.append(result)
      else:
        yield ''.join(pending)
        pending = []
        yield result 

  if pending:
    yield ''.join(pending)


LINE_RE = re.compile(r"""
    \s*
    (?: / ([a-z\-/]+)* \s+ )?
    ([PSIL'"]*)                # JSON Pattern line modes
    \s*
    \|
    \s*
    (.*)                       # the rest of the line
    """, re.VERBOSE)

MODIFIERS = {
    'ignore-case': re.IGNORECASE,
    'locale': re.LOCALE,
    'multiline': re.MULTILINE,
    'dot-all': re.DOTALL,
    'unicode': re.UNICODE,
    'verbose': re.VERBOSE,
    }


# There aren't that many metacharacters, so we search for them explicitly here.
# http://docs.python.org/library/re.html#re-syntax
# TODO:
# - Could I use string.maketrans?
# - It's possible to trade even more compile time for runtime by parsing the
# regex, e.g. so \. is treated as a literal string rather than a regex.
REGEX_RE = re.compile(r'[\[\]\\(){}.*+?\^$|]')


def _MakeFlags(modifiers):
  """Make an argument to re.compile from program flags."""
  if not modifiers:
    return 0

  # We're verbose by default.
  flags = re.VERBOSE
  modifiers = modifiers.split('/')
  for m in modifiers:
    flag = MODIFIERS.get(m)
    if flag:
      flags |= flag
    else:
      raise _exceptions.PatternSyntaxError('Invalid modifier %r' % m)
  return flags


def Tokenize(s):
  """Process lines, each of which may be tokenized in a different way."""

  for line in s.splitlines():
    line = line.strip()

    # TODO: Maybe use blank lines for fancy HTML formatting
    if not line:
      continue

    if line.startswith('#'):
      continue

    match = LINE_RE.match(line)
    if match:
      modifiers, mode, rest = match.groups()
    else:
      raise _exceptions.PatternSyntaxError('Invalid line format: %r' % line)

    if not rest:
      continue

    content_mode = ' '  # Block
    whitespace_mode = 'T'  # Strip whitespace between instructions
    for char in mode:
      # S = significant whitespace
      # I = insignificant whitespace
      if char in ('S', 'I'):
        whitespace_mode = char

      # R = regex is between instructions
      # L = literal is between instructions
      elif char in ('P', 'R', 'L', "'", '"'):  # TODO: Rename P -> R everywhere
        content_mode = char

      else:
        raise _exceptions.PatternSyntaxError('Invalid line mode %r' % mode)
    
    if content_mode == ' ':  # Block
      yield BLOCK_TOKEN, rest, None

    elif content_mode == "'":  # Literal
      yield LITERAL_TOKEN, rest, None

    elif content_mode in ('P', 'R', 'L'):
      flags = _MakeFlags(modifiers)

      for token_type, token in TokenizePatternLine(
          rest, content_mode, whitespace_mode):
        # Optimization: Any regex token which has no flags and no regex
        # metacharaters can be treated as a literal.
        if (token_type == REGEX_TOKEN and
            not flags and
            not REGEX_RE.search(token)):
          token_type = LITERAL_TOKEN

        yield token_type, token, flags


def TokenizePatternLine(line, content_mode='R', whitespace_mode='T'):
  """
  Split the program into a strictly alternating series of

  regex ( {instruction} regex ) *

  (Literal strings are a special case of regular expressions.)

  There are 2 things that must be taken care of:
  - Escaping metacharacters \{ \}  
  - {3,6} is not a JSON Pattern directive -- this should be seen literally by
    the regex engine
  """
  # \} -> {.lbrace}
  line = ESCAPED_RE.sub(BackslashEscape, line)
  # Split into ... {...} ... {...} ...
  tokens = SPLIT_RE.split(line)
  # Combine {m}  {m,n}  {.lbrace}  {.rbrace}, since they're not instructions
  tokens = CombineSpecial(tokens, whitespace_mode=whitespace_mode)

  if content_mode == 'L':
    token_type1 = LITERAL_TOKEN
  else:
    token_type1 = REGEX_TOKEN

  for i, token in enumerate(tokens):
    if not token:  # no blanks
      continue

    if i % 2 == 0:
      if token:  # no blanks
        yield token_type1, token
    else:
      yield INSTRUCTION_TOKEN, token


# For parsing in /repeated
REPETITION_RE = re.compile(r'(\d*),(\d*)')

def ParseRepetitions(s):
  """Parse a repetition string.

  This matches Python regex syntax.  TODO: Match greedy repetitions too?
  """
  if s == '*':
    return 0, None
  elif s == '+':
    return 1, None
  elif s == '?':
    return 0, 1
  elif s.isdigit():
    i = int(s)
    return i, i

  match = REPETITION_RE.match(s)
  if match:
    s = match.group(1)
    if s: 
      s = int(s)
    else:
      s = 0  # default is 0

    e = match.group(2)
    if e: 
      e = int(e)
    else:
      e = None  # None means no end

    return s, e

  raise _exceptions.PatternSyntaxError("Couldn't parse repetitions %r" % s)

