# encoding: utf-8

from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import re

from .util.scanner import *

inline_types = (
    '\n',
    '---',
    '--',
    '...',
    '~',
    'delimiter',
    'text'
)

def _tokenize_inline(input_string):
    def tokenize_delimiter(match):
        yield Token('delimiter', match.group())

    def tokenize_text(match):
        yield Token('text', match.group())

    def tokenize_symbol(match):
        yield Token(match.group())

    def escape(match):
        character = match.group()[1:]
        yield Token('text', character)

    rules = [
        #Escape character:
        (r'\\.', escape),
        #Line break:
        (r'\n', tokenize_symbol),
        #Em dash:
        (r'(?<!-)---(?!-)', tokenize_symbol),
        #En dash:
        (r'(?<!-)--(?!-)', tokenize_symbol),
        #Ellipsis:
        (r'(?<!\.)\.\.\.(?!\.)', tokenize_symbol),
        #Non-breaking space:
        (r'(?<=\S)~(?=\S)', tokenize_symbol),
        #Quotation delimiter:
        (r'"', tokenize_delimiter),
        #Strong delimiter:
        (r'\*', tokenize_delimiter),
        #Emphasis delimiter:
        (r'_', tokenize_delimiter),
        #Code delimiter:
        (r'`', tokenize_delimiter),
        #Pass-through:
        (r'.', tokenize_text)
    ]

    scanner = Scanner(rules)

    text = []
    for token in scanner.scan(input_string):
        if token.type == 'text':
            text.append(token.text)
        elif text:
            yield Token('text', ''.join(text))
            yield token
            text = []
        else:
            yield token
    if text:
        yield Token('text', ''.join(text))


def _tokenize_block(input_string):
    def tokenize_header(match):
        level = match.group('level')
        header_token = Token('header', level)
        header_token.level = len(level)
        yield header_token
        for token in _tokenize_inline(match.group('text')):
            yield token

    def tokenize_ordered(match):
        list_token = Token('ordered item', match.group())
        list_token.level = len(match.group('indent'))
        yield list_token

    def tokenize_unordered(match):
        list_token = Token('unordered item', match.group())
        list_token.level = len(match.group('indent'))
        yield list_token

    def tokenize_text(match):
        return _tokenize_inline(match.group())

    rules = [
        #Header:
        (r'^\s*(?P<level>=+)\s*(?P<text>[^=\s].*?)[=\s]*?$', tokenize_header),
        #Ordered item:
        (r'^(?P<indent>\s*)\d*\.\s', tokenize_ordered),
        #Unordered item:
        (r'^(?P<indent>\s*)\*\s', tokenize_unordered),
        #Text:
        (r'.*?\n', tokenize_text)
    ]

    scanner = Scanner(rules)
    for token in scanner.scan(input_string):
        yield token


def _tokenize(input_stream):
    def tokenize_block_quote(match):
        yield Token('block quote', match.group())

    def tokenize_other(match):
        return _tokenize_block(match.group())

    rules = [
        #Block quote:
        (r'\s*>\s*?(?=\n|\S)', tokenize_block_quote),
        #Everything else:
        (r'.*?\n', tokenize_other)
    ]

    scanner = Scanner(rules)

    line_continuation = re.compile(r'^(.*[^\\])?(\\\\)*\\\n$')
    while True:
        line = input_stream.readline()
        while line_continuation.match(line):
            line = line[:-2] + input_stream.readline().lstrip()
        if line == '':
            break
        for token in scanner.scan(line):
            yield token
    yield Token('EOF', '')


class Tokenizer(object):
    def __init__(self, input_stream):
        self._tokens = _tokenize(input_stream)
        self._peeked = None

    def __iter__(self):
        return self

    def peek(self):
        if self._peeked is None:
            self._peeked = self.next()
        return self._peeked

    def next(self):
        if self._peeked is not None:
            token = self._peeked
            self._peeked = None
        else:
            token = self._tokens.next()
        return token


class TokenListIter(object):
    def __init__(self, tokens):
        self._tokens = tokens

    def __iter__(self):
        return self

    def peek(self):
        if not self._tokens:
            return None
        return self._tokens[0]

    def next(self):
        if not self._tokens:
            return None
        return self._tokens.pop(0)

