import os
import pickle
import xml.dom.minidom
import re
import numpy as np
from sklearn.model_selection import train_test_split
import jieba

'''
处理CEC数据集,将其处理为字符级别的标注数据
'''

punct = set(u''':!),.:;?]}¢'"、。〉》」』】〕〗〞︰︱︳﹐､﹒
﹔﹕﹖﹗﹚﹜﹞！），．：；？｜｝︴︶︸︺︼︾﹀﹂﹄﹏､～￠
々‖•·ˇˉ―--′’”([{£¥'"‵〈《「『【〔〖（［｛￡￥〝︵︷︹︻
︽︿﹁﹃﹙﹛﹝（｛“‘-—_…''')

# 对str/unicode
filterpunt = lambda s: ''.join(filter(lambda x: x not in punct, s))

# 对list
filterpuntl = lambda l: list(filter(lambda x: x not in punct, l))


def get_word_BIO(token, label):
    '''
    返回token对应的BIO编码
    '''
    global origin_sent_list
    global bio_sent_list
    token = filterpunt(token)
    seg_list = jieba.cut(token, cut_all=False)
    tokens = list(seg_list)
    if label == "O":
        next_label = "," + label
        s = label + "".join((len(tokens) - 1) * next_label)
    else:
        next_label = "," + label
        s = label + "".join((len(tokens) - 1) * next_label)
        # s = "B-" + label + "".join((len(tokens) - 1) * next_label)
    bio = s.split(",")

    origin_sent_list.append(tokens)
    bio_sent_list.append(bio)


def search_subnote(node, end, bio_list=None):
    '''
    对event节点内进行遍历，并进行处理
    '''
    if node.nodeName == "Event":
        print(node.firstChild)
        for e in node.childNodes:
            search_subnote(e, end)
    else:
        if not node.firstChild:
            return
        words = node.firstChild.data.strip()  # 获取标签对间的信息
        # words = words.replace(' ', '')
        words = re.sub('\s', '', words)
        # 获取子标签信息
        if node.nodeName == "Denoter":
            get_word_BIO(words, node.getAttribute('type'))
        elif node.nodeName == "Time":
            get_word_BIO(words, "T")
        elif node.nodeName == "Location":
            get_word_BIO(words, "L")
        elif node.nodeName == "Participant":
            get_word_BIO(words, "P")
        elif node.nodeName == "Object":
            get_word_BIO(words, "J")
        if node.childNodes:
            for e in node.childNodes:
                search_subnote(e, end)


def deal_text(node):
    '''
    处理text 类别的输出，对仅为换行的情况进行处理
    存在没有标签内节点的标记 <Denoter did="d13" type="stateChange"></Denoter>损失
    '''
    try:
        st = node.data.strip()
        st = re.sub('\s', '', st)
        if st:
            get_word_BIO(st, "O")
    except:
        pass


origin_sent_list = []
bio_sent_list = []
origin_data_list = []
bio_data_list = []


def read_xml(file_name):
    '''
    读取一份xml
    '''
    dom = xml.dom.minidom.parse(file_name)
    root = dom.documentElement
    paragraph_list = root.getElementsByTagName("Paragraph")

    global origin_sent_list
    global bio_sent_list
    global origin_data_list
    global bio_data_list

    for p in paragraph_list:
        for s in p.getElementsByTagName("Sentence"):
            origin_sent_list = []
            bio_sent_list = []
            events = s.getElementsByTagName("Denoter")
            if len(events) != 1: continue
            for e in s.childNodes:
                if e.firstChild:
                    for i in e.childNodes:
                        if i.firstChild:
                            search_subnote(i, end=None)
                        else:
                            deal_text(i)
                else:
                    deal_text(e)
            origin_sent_list = [i for token in origin_sent_list for i in token]
            bio_sent_list = [i for token in bio_sent_list for i in token]
            origin_data_list.append(origin_sent_list)
            bio_data_list.append(bio_sent_list)


file_list = []


def search_file(filepath):
    global file_list
    # 遍历filepath下所有文件，包括子目录
    files = os.listdir(filepath)
    for fi in files:
        fi_d = os.path.join(filepath, fi)
        if os.path.isdir(fi_d):
            search_file(fi_d)
        else:
            file_list.append(fi_d)


def save_labeled_data(file_list, data_path, file_name):
    global origin_data_list
    global bio_data_list

    for file_path in file_list:
        read_xml(file_path)
    print("find {} sentence in this corpus".format(len(bio_data_list)))
    with open(os.path.join(data_path, file_name), "w") as f:
        for sentent, bio_sent in zip(origin_data_list, bio_data_list):
            for line, bio_line in zip(sentent, bio_sent):
                f.write(line + "/" + bio_line + " ")
            f.write("\n")
    origin_data_list, bio_data_list = [], []


def main(mode):
    data_path = os.path.abspath("../data")
    file_path = os.path.abspath('../data/ori/CEC-Corpus/CEC')
    search_file(file_path)

    if not os.path.exists(data_path):
        os.makedirs(data_path)

    if mode == "build_vocab":
        save_labeled_data(file_list, data_path, "dataset.txt")
    elif mode == "partition":
        train_files, test_files = train_test_split(file_list, train_size=0.8)
        print("train file counts {}".format(len(train_files)))
        save_labeled_data(train_files, data_path, "train.txt")
        print("test file counts {}".format(len(test_files)))
        save_labeled_data(test_files, data_path, "test.txt")


if __name__ == '__main__':
    main("build_vocab")
