# encoding: utf-8

from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import logging
import sys

from . import tokens

#TODO: Just use logging.NullHandler once support for Python 2.6 is removed.
class NullHandler(logging.Handler):
    def emit(self, record): pass
    def handle(self, record): pass
    def createLock(self): return None


logger = logging.getLogger(__name__)
logger.addHandler(NullHandler())


class DomNode(object):
    def __init__(self, type):
        self.type = type
        self.parent = None
        self.children = []

    def add_child(self, node):
        if node.parent:
            node.parent.children.remove(node)
        node.parent = self
        self.children.append(node)


def _append_text(parent_node, text):
    if parent_node.children and parent_node.children[-1].type == 'text':
        parent_node.children[-1].text += text
    else:
        node = DomNode('text')
        node.text = text
        parent_node.add_child(node)


def _in_hierarchy(node, type):
    if node.type == type:
        return True
    elif node.parent:
        return _in_hierarchy(node.parent, type)
    return False


def _parse_inline(token_stream, parent_node):
    unterminated_fmt = 'unterminated "{0}"'
    delim_map = {
        '"': 'quote',
        '*': 'strong',
        '_': 'emphasized',
        '`': 'code'
    }
    while True:
        token = token_stream.peek()
        if token is None or token.type not in tokens.inline_types:
            if parent_node.type in delim_map.values():
                logger.warning(unterminated_fmt.format(parent_node.type))
            break

        if token.type == 'delimiter':
            node_type = delim_map[token.text]
            if token.text == '`':
                token_stream.next()
                node = DomNode(node_type)
                parent_node.add_child(node)
                while True:
                    token = token_stream.peek()
                    if token is None or \
                            token.type not in tokens.inline_types:
                        logger.warning(unterminated_fmt.format(node.type))
                        break
                    token_stream.next()
                    if token.type == 'delimiter' and token.text == '`':
                        break
                    _append_text(node, token.text)
            elif _in_hierarchy(parent_node, node_type):
                if parent_node.type == node_type:
                    token_stream.next()
                else:
                    logger.warning(
                            unterminated_fmt.format(parent_node.type))
                break
            else:
                token_stream.next()
                node = DomNode(node_type)
                parent_node.add_child(node)
                _parse_inline(token_stream, node)
        else:
            token_stream.next()
            if token.type == '---':
                parent_node.add_child(DomNode('em dash'))
            elif token.type == '--':
                parent_node.add_child(DomNode('en dash'))
            elif token.type == '...':
                parent_node.add_child(DomNode('ellipsis'))
            elif token.type == '~':
                parent_node.add_child(DomNode('nbsp'))
            elif token.type == 'text' or token.type == '\n':
                _append_text(parent_node, token.text)


def _parse_header(token_stream):
    header = DomNode('header')
    inline_tokens = []

    token = token_stream.next()
    assert(token.type == 'header')
    header.level = token.level

    while True:
        token = token_stream.peek()
        if token is None:
            break

        if token.type == '\n':
            token_stream.next()
            break
        elif token.type not in tokens.inline_types:
            break
        inline_tokens.append(token_stream.next())

    _parse_inline(tokens.TokenListIter(inline_tokens), header)
    return header


def _parse_text(token_stream, parent):
    inline_tokens = []
    newline = False
    while True:
        token = token_stream.peek()
        if token is None:
            break

        if token.type == '\n':
            if newline:
                break
            inline_tokens.append(token_stream.next())
            newline = True
        elif token.type in tokens.inline_types:
            inline_tokens.append(token_stream.next())
            newline = False
        else:
            break
    _parse_inline(tokens.TokenListIter(inline_tokens), parent)


def _parse_list_item(token_stream, parent, indent):
    _parse_text(token_stream, parent)
    token = token_stream.peek()
    if token is not None:
        if token.type == 'ordered item' and token.level > indent:
            parent.add_child(_parse_ordered_list(token_stream))
        if token.type == 'unordered item' and token.level > indent:
            parent.add_child(_parse_unordered_list(token_stream))


def _parse_ordered_list(token_stream):
    ordered_list = DomNode('ordered list')
    indent = token_stream.peek().level
    while True:
        token = token_stream.peek()
        if token is None or \
                token.type != 'ordered item' or token.level < indent:
            break
        token_stream.next()
        list_item = DomNode('list item')
        _parse_list_item(token_stream, list_item, token.level)
        ordered_list.add_child(list_item)
    return ordered_list


def _parse_unordered_list(token_stream):
    unordered_list = DomNode('unordered list')
    indent = token_stream.peek().level
    while True:
        token = token_stream.peek()
        if token is None or \
                token.type != 'unordered item' or token.level < indent:
            break
        token_stream.next()
        list_item = DomNode('list item')
        _parse_list_item(token_stream, list_item, token.level)
        unordered_list.add_child(list_item)
    return unordered_list


def _parse_block_quote(token_stream):
    block_quote = DomNode('block quote')
    block_tokens = []
    newline = True
    while True:
        token = token_stream.peek()
        if token is None:
            break
        if newline:
            if token.type != 'block quote':
                break
            token_stream.next()
            newline = False
        else:
            newline = (token.type == '\n')
            block_tokens.append(token_stream.next())
    _parse_block(tokens.TokenListIter(block_tokens), block_quote)
    return block_quote


def _parse_block(token_stream, parent):
    while True:
        token = token_stream.peek()
        if token is None:
            break

        if token.type == '\n':
            token_stream.next()
        elif token.type == 'header':
            parent.add_child(_parse_header(token_stream))
        elif token.type == 'ordered item':
            parent.add_child(_parse_ordered_list(token_stream))
        elif token.type == 'unordered item':
            parent.add_child(_parse_unordered_list(token_stream))
        elif token.type == 'block quote':
            parent.add_child(_parse_block_quote(token_stream))
        elif token.type in tokens.inline_types:
            paragraph = DomNode('paragraph')
            _parse_text(token_stream, paragraph)
            parent.add_child(paragraph)
        elif token.type == 'EOF':
            break
        else:
            #TODO ???
            pass


def parse(input_stream):
    root = DomNode('root')
    _parse_block(tokens.Tokenizer(input_stream), root)
    return root

