# -*- coding: utf-8 -*-
import re
import xml.dom.minidom
from xml.dom.minidom import Node
import xml.parsers.expat
from general.generaltools import Debugger
from general.stringoperations import xml_escape, xml_unescape

class Window(Debugger):
    def __init__(self, left=5, right=5, words_num=100, isDebug=False, mode='std'):
        Debugger.__init__(self, isDebug, mode)
        self.left = left
        self.right = right
        self.words_num = words_num

class Document(Debugger):
    def __init__(self, docid="", offset=1, plain_text_path="", postag_text_path="",\
            isDebug=False, mode='std'):
        Debugger.__init__(self, isDebug, mode)
        self.docid = docid
        self.offset = offset
        self.errorRecord = set()
        try:
            self.plain_text = open(plain_text_path, 'r').read()
            self.postag_text = open(postag_text_path, 'r').read()
        except IOError as e:
            self.errorRecord.add(str(e))
            self.plain_text = ""
            self.postag_text = ""
        self.plain_text_len = len(self.plain_text)
        self.sentences = []
        self.sentences_len = 0
        self.segmentalized_sentences = []
        self.tags_of_sentences = []


    def add_line(self, line):
        if line != "":
            plain_line = ""
            segmentalized_line = []
            tags_of_line = []
            for ntagged_word in line.split(' '):
                tagged_word = ntagged_word.strip()
                if tagged_word.strip() != "":
                    word = tagged_word.split('#')[0]
                    tag = tagged_word.split('#')[1]
                    plain_line += word
                    segmentalized_line.append(word)
                    tags_of_line.append(tag)
            if plain_line != "":
                self.sentences.append(plain_line)
                self.segmentalized_sentences.append(segmentalized_line)
                self.tags_of_sentences.append(tags_of_line)

    def get_lines(self):
        assert False, 'parse must be defined!'

    def get_query_context(self, query, window=Window()):
        context_lines = []
        half = (window.words_num - len(self.segmentalized_sentences[query.line_num])) // 2
        words_left = half
        current_line = query.line_num - 1
        while words_left > 0 and current_line > -1:
            context_lines.append(current_line)
            words_left -= len(self.segmentalized_sentences[current_line])
            current_line -= 1
        context_lines.append(query.line_num)
        words_left = half
        current_line = query.line_num + 1
        while words_left > 0 and current_line < self.sentences_len:
            context_lines.append(current_line)
            words_left -= len(self.segmentalized_sentences[current_line])
            current_line += 1
        return context_lines

    def __get_query_context(self, query, window):
        #return a unescaped context
        left = window.left
        right = window.right
        #limit the range of window to deal with no newlines in the QUOTE value
        left = left if left < 25 else 25
        right = right if right < 25 else 25
        begin = query.beg - left
        end = query.end + right

        unescape_context = xml_unescape(self.plain_text[begin:end]).replace('\n', '').replace(' ', '').replace(' ', '')

        #remove the partial XML entity that can't be unescaped
        name_pos_in_context = unescape_context.find(query.name_nospace)
        amp_pos_in_context = unescape_context.find('&')
        quot_pos_in_context = unescape_context.find(';')

        left_edge = 0
        right_edge = len(unescape_context)

        if quot_pos_in_context != -1 and quot_pos_in_context < 5 \
                and quot_pos_in_context < name_pos_in_context:
            left_edge = quot_pos_in_context
        if amp_pos_in_context != -1 and right_edge - amp_pos_in_context < 6 \
                and amp_pos_in_context > name_pos_in_context:
            right_edge = amp_pos_in_context

        return unescape_context[left_edge + 1:right_edge]

    def __update_query_plain_text_location(self, query):
        temp = self.plain_text[self.offset + query.beg:]
        bias = temp.find(xml_escape(query.name))
        if bias == -1:
            #Include the situation where there is a newline between the query name
            bias = temp.find(query.name_with_newline)
            if bias == -1:
                self.bugInfo += "\tError:\tCan't find corresponding query:\n\t%s" % (str(query))
                self.bugInfo += "\tresult:{:<}\n".format(xml_escape(query.name))
                return
        query.beg = self.offset + query.beg + bias
        query.end = query.beg + len(query.name_nospace)
        query.offset_from_end = self.plain_text_len - query.end

    def __update_query_sentences_location(self, query) :
        count = -1
        for line in self.sentences:
            count += 1
            list_of_beg = [m.start() for m in re.finditer(re.escape(query.name_nospace), line)]
            for beg_in_line in list_of_beg:
                try:
                    offset_from_end_of_line = len(line) - (beg_in_line + len(query.name_nospace))
                    left = beg_in_line if beg_in_line < query.beg else query.beg
                    right = offset_from_end_of_line\
                            if offset_from_end_of_line < query.offset_from_end\
                            else query.offset_from_end
                except AttributeError as e:
                    self.bugInfo += "%s\n" % (str(e))
                    return

                context = self.__get_query_context(query, Window(left, right))

                #limit the line in local range to exactly match the context
                local_line = line[beg_in_line - left : beg_in_line + len(query.name_nospace) + right]

                #print('\tline: ', line)
                #print("\tlocal_line:\n\t" + local_line)
                #print("\tcontext:\n\t" + context)
                if len(context) != 0 and local_line.find(context) != -1:
                    query.line_num = count
                    query.line_beg = beg_in_line
                    return

        #if query.name_with_newline != "":
        for index in range(self.sentences_len):
            if index == self.sentences_len - 1:
                break
            joint_line = self.sentences[index] + self.sentences[index + 1]
            list_of_beg = [m.start() for m in re.finditer(query.name_nospace, joint_line)]
            for beg_in_line in list_of_beg:
                try:
                    offset_from_end_of_line = len(line) - (beg_in_line + len(query.name_nospace))
                    left = beg_in_line if beg_in_line < query.beg else query.beg
                    right = offset_from_end_of_line\
                            if offset_from_end_of_line < query.offset_from_end\
                            else query.offset_from_end
                except AttributeError as e:
                    self.bugInfo += "%s\n" % (str(e))
                    return

                context = self.__get_query_context(query, Window(left, right))

                #limit the line in local range to exactly match the context
                local_line = joint_line[beg_in_line - left:beg_in_line + len(query.name_nospace) + right]

                #print("\tlocal_line:\n\t" + local_line)
                #print("\tcontext:\n\t" + context)
                if local_line.find(context) != -1:
                    query.line_num = index
                    query.line_beg = beg_in_line
                    self.sentences_len -= 1
                    self.sentences = self.sentences[:index] + [joint_line] + self.sentences[index + 2:]

                    joint_segmentalized_line = self.segmentalized_sentences[index] + \
                            self.segmentalized_sentences[index + 1]
                    joint_tags_of_line = self.tags_of_sentences[index] + \
                            self.tags_of_sentences[index + 1]
                    self.segmentalized_sentences = self.segmentalized_sentences[:index] + [joint_segmentalized_line] + \
                            self.segmentalized_sentences[index + 2:]
                    self.tags_of_sentences = self.tags_of_sentences[:index] + [joint_tags_of_line] + \
                            self.tags_of_sentences[index + 2:]
                    return
        self.bugInfo += "\t%s\n" %(query) + \
                "\t%s\n" %(xml_unescape(self.plain_text[query.beg - 20: query.end + 20])) + \
                "\tError:\tCan't find corresponding line\n"

    def __update_query_segmentation_location(self, query):
        if query.line_num == -1 or query.line_beg == -1:
            self.bugInfo += "\tError: Can't updata_segmentation_location!\n"
            return

        segmentalized_line = self.segmentalized_sentences[query.line_num]
        tags_of_line = self.tags_of_sentences[query.line_num]
        current_beg = 0
        count = -1
        words_len = len(segmentalized_line)
        for word in segmentalized_line:
            count += 1
            if current_beg + len(word) > query.line_beg and current_beg <= query.line_beg:
                temp_word = word
                if temp_word == query.name_nospace:
                    #The simplest situation
                    query.segmentalized_line_beg = count
                    return
                #Normal situation: Query.name_nospace has not been correctly segmentalized.
                temp_word = ""
                for i in range(count, words_len):
                    temp_word += segmentalized_line[i]
                    if temp_word.find(query.name_nospace) != -1:
                        split_pos_beg = temp_word.find(query.name_nospace)
                        split_pos_end = len(temp_word) if split_pos_beg + len(query.name_nospace) == len(temp_word) \
                                else split_pos_beg + len(query.name_nospace)
                        left_extra_word = temp_word[:split_pos_beg]
                        correct_word = temp_word[split_pos_beg:split_pos_end]
                        right_extra_word = temp_word[split_pos_end:]
                        #remove the empty left or right extra word
                        extra_word_list = []
                        extra_tag_list = []
                        for j in [left_extra_word, correct_word, right_extra_word]:
                            if j != "":
                                extra_word_list.append(j)
                                extra_tag_list.append('ZS')
                        self.segmentalized_sentences[query.line_num] = segmentalized_line[:count] + \
                                extra_word_list + segmentalized_line[i + 1:]
                        self.tags_of_sentences[query.line_num] = tags_of_line[:count] + \
                                extra_tag_list + tags_of_line[i + 1:]
                        query.segmentalized_line_beg = count
                        if left_extra_word != "":
                            query.segmentalized_line_beg = count + 1
                        return

            current_beg += len(word)
        if words_len == count + 1:
            self.bugInfo+= "\tError: Can't match segmentalized line!\n"

    def update_query_location(self, query):
        self.__update_query_plain_text_location(query)
        self.__update_query_sentences_location(query)
        self.__update_query_segmentation_location(query)

    def print_query_context(self, query):
        segmentalized_line = self.segmentalized_sentences[query.line_num]
        temp_info = ""
        for i in range(len(segmentalized_line)):
            if i == query.segmentalized_line_beg:
                temp_info += "(%s) " %(segmentalized_line[i])
            else:
                temp_info += "%s " %(segmentalized_line[i])
        print('+' + temp_info)
        print()
        for i in self.get_query_context(query):
            print(self.sentences[i])
        #ngram_list = []
        #for i in self.get_query_context(query):
        #    print(''.join(self.segmentalized_sentences[i]))
        #    ngram_list += ngram(self.segmentalized_sentences[i], 5)
        #print('#'.join(sorted(set(ngram_list))))
        if len(self.tags_of_sentences[query.line_num]) != len(segmentalized_line):
            self.bugInfo += "\tError: Unequal between the numbers of tags.\n"
        if query.name_nospace != segmentalized_line[query.segmentalized_line_beg]:
            self.bugInfo += "\tError: Unequal between the numbers of words.\n"


class News(Document):
    def __init__(self, docid="", offset=1, plain_text_path="", postag_text_path="",\
            isDebug=False, mode='std'):
        Document.__init__(self, docid, offset, plain_text_path, postag_text_path,\
                isDebug, mode)

    def get_lines(self):
        doc = xml.dom.minidom.parseString(self.postag_text)
        headlineElement = doc.getElementsByTagName('HEADLINE')[0]
        headline = headlineElement.childNodes[0].nodeValue.strip()
        self.add_line(headline)

        datetimeElements = doc.getElementsByTagName('DATETIME')
        if len(datetimeElements) == 1:
            datetime = datetimeElements[0].childNodes[0].nodeValue.strip()
            self.add_line(datetime)

        for paragraphElement in doc.getElementsByTagName('P'):
            for linesNode in paragraphElement.childNodes:
                if linesNode.nodeType == Node.TEXT_NODE:
                    for line in linesNode.data.strip().split('\n'):
                        self.add_line(line)

        self.sentences_len = len(self.sentences)

class Web(Document):
    def __init__(self, docid="", offset=1, plain_text_path="", postag_text_path="",\
            isDebug=False, mode='std'):
        Document.__init__(self, docid, offset, plain_text_path, postag_text_path,\
                isDebug, mode)

    def get_lines(self):
        doc = xml.dom.minidom.parseString(self.postag_text)
        headlineElement = doc.getElementsByTagName('HEADLINE')[0]
        headline = headlineElement.childNodes[0].nodeValue.strip()
        self.add_line(headline)

        datetimeElements = doc.getElementsByTagName('DATETIME')
        if len(datetimeElements) == 1:
            datetime = datetimeElements[0].childNodes[0].nodeValue.strip()
            self.add_line(datetime)

        for postElement in doc.getElementsByTagName('POST'):
            posterElements = postElement.getElementsByTagName('POSTER')
            if len(posterElements) != 0:
                posterChilds = posterElements[0].childNodes
                if len(posterChilds) != 0:
                    self.add_line(posterChilds[0].nodeValue.strip())
            postdateElements = postElement.getElementsByTagName('POSTDATE')
            if len(postdateElements) != 0:
                postdate = postdateElements[0].childNodes[0].nodeValue.strip()
                self.add_line(postdate)
            quoteElements = postElement.getElementsByTagName('QUOTE')
            if len(quoteElements) != 0:
                previouspost = quoteElements[0].getAttribute('PREVIOUSPOST')
                for line in previouspost.split('\n'):
                    self.add_line(line.strip())
            linesNodes = postElement.childNodes
            if len(linesNodes)!= 0:
                for linesNode in linesNodes:
                    if linesNode.nodeType == Node.TEXT_NODE:
                        for line in linesNode.data.strip().split('\n'):
                            self.add_line(line.strip())

        self.sentences_len = len(self.sentences)
