# -*- coding: utf-8 -*-

import os
import sys
import time
import numpy
import re
from collections import defaultdict, OrderedDict
from lxml import etree
import threading
from concurrent.futures.thread import ThreadPoolExecutor
import random
from typing import List, Dict, TextIO, Any

# ## define the dataset path
DATA_DIR = "ccks_7_1_competition_data"
TRAINING_DIR = "训练集"
TESTING_DIR = "验证集"
training_file = "entity_type.txt"
testing_file = "test.txt"
pages_file = ["entity_pages_1.xml", "entity_pages_2.xml", "entity_pages_3.xml", "entity_pages_4.xml"]
# ## NER schema
ner_schema = "BMEOS"
# ## type class list
UNK = "UNK"
type_dict = {"医学专科": "SPE", "检查科目": "CHK", "药物": "MED", "症状": "SYM", "疾病": "DIS", "细菌": "BAC", "病毒": "VIR", "NoneType": "Non", UNK: UNK}
# 利用训练文件标注得到的数据示例:
# 脉血康胶囊，破血，逐瘀，通脉止痛。用于癥瘕痞块，血瘀经闭，跌打损伤。
# 脉/B-MED 血/M-MED 康/M-MED 胶/M-MED 囊/E-MED, ...
# ## global lock for parse.
lock = threading.Lock()


def tagging_sentence(line: str, entity_label: Dict[str, str]):
    # search all the matches
    matches = [None for _ in range(len(line))]
    for entity, label in entity_label.items():
        if re.search(entity, line):
            # print('=' * 40)
            # print(entity, " ", label)
            # print('-' * 40)
            # print(line)
            match_iter = re.finditer(entity, line)
            for item in match_iter:
                start, end = item.span()
                for index in range(start, end):
                    matches[index] = (entity, label)
        pass
    if sum([item is not None for item in matches]) == 0:
        return None

    sentence = []
    flag = False  # 是否在一个词
    for index in range(len(line) - 1):
        if matches[index] is not None:
            if not flag:
                if matches[index + 1] is not None:
                    if matches[index + 1][0] == matches[index][0]:
                        sentence.append(line[index] + '/' + 'B' + '-' + matches[index][1])
                        flag = True
                    else:
                        sentence.append(line[index] + '/' + 'S' + '-' + matches[index][1])
                        flag = False
                else:
                    sentence.append(line[index] + '/' + 'S' + '-' + matches[index][1])
                    flag = False
            else:  # flag is True
                if matches[index + 1] is not None:
                    if matches[index + 1][0] == matches[index + 1][0]:
                        sentence.append(line[index] + '/' + 'M' + '-' + matches[index][1])
                    else:
                        sentence.append(line[index] + '/' + 'E' + '-' + matches[index][1])
                        flag = False
                else:
                    sentence.append(line[index] + '/' + 'E' + '-' + matches[index][1])
                    flag = False
        else:  # matches[index] is None
            sentence.append(line[index] + '/' + 'O' + '-' + UNK)
        pass
    # last position
    if matches[-1] is not None:
        if flag:
            sentence.append(line[-1] + '/' + 'E' + '-' + matches[-1][1])
        else:
            sentence.append(line[-1] + '/' + 'S' + '-' + matches[-1][1])
    else:
        sentence.append(line[-1] + '/' + 'O' + '-' + UNK)
        pass
    sentence = " ".join(sentence)
    return sentence


# ## parse page content
def parse(content: str,
          entitys_label: Dict[str, str],
          out_file: TextIO) -> None:
    global lock
    # remove <page><title>*</title> and </page>
    content = re.sub(r"^\s*<page>\s*<title>[\s\S]*?</title>", "", content)
    content = re.sub(r"</page>\s*$", "", content)

    # extract text from html using xpath
    html = etree.HTML(content)
    text = html.xpath(r"//*[not(self::script)]/text()")
    text = "".join(text)

    # extract entity related content into dataset.
    for line in text.split('\n'):
        if len(line) < 20:
            continue
        if "©" in line:
            continue

        sentence = tagging_sentence(line, entitys_label)
        if sentence is not None:
            lock.acquire()
            out_file.write(sentence+"\n")
            lock.release()
    pass


def gen_train():
    # 利用线程池同时运行多个 parse.
    excutor = ThreadPoolExecutor(32)

    # ## load entity-type from training file.
    entity_type = {}
    with open(os.path.join(DATA_DIR, TRAINING_DIR, training_file), 'r') as f:
        for line in f:
            line = line.strip()
            if line is None or line == "":
                continue
            entity, label = line.split("\t")
            entity_type[entity] = type_dict[label]
        pass

    # ## convert data to target data format.
    # 提取这样的句子, 句子中必需包含至少一个 named entity.
    out_train = os.path.join("train.txt")
    if os.path.exists(out_train):
        os.remove(out_train)

    for page_file in pages_file:
        page_file = os.path.join(DATA_DIR, TRAINING_DIR, page_file)
        count = 0
        print("[Train]: ", page_file)
        with open(page_file, 'r') as f, open(out_train, 'a+') as out_f:
            content = ""
            flag = False
            futures = []
            for line in f:
                if line.strip() == "<page>":
                    content = line
                    flag = True
                elif line.strip() == "</page>":
                    content += line
                    flag = False
                    # parse(content, entity_type, out_f)
                    future = excutor.submit(parse, content, entity_type, out_f)
                    futures.append(future)
                    count += 1
                    print("\rprocessing %d pages." % count, end="")
                else:
                    if flag:
                        content += line
            # out_f 是局域变量, pool wait 需要在其有效范围内
            for future in futures:
                future.result()
            pass
        print()
        pass


def parse_for_test(content: str,
          entitys_label: Dict[str, Any],
          out_file: TextIO) -> None:
    global lock
    # remove <page><title>*</title> and </page>
    content = re.sub(r"^\s*<page>\s*<title>[\s\S]*?</title>", "", content)
    content = re.sub(r"</page>\s*$", "", content)

    # extract text from html using xpath
    html = etree.HTML(content)
    text = html.xpath(r"//*[not(self::script)]/text()")
    text = "".join(text)

    # extract entity related content into dataset.
    for line in text.split('\n'):
        if len(line) < 20:
            continue
        if "©" in line:
            continue

        for entity in entitys_label.keys():
            if re.search(entity, line) is not None:
                lock.acquire()
                out_file.write(line+"\n")
                lock.release()
                break
            pass
    pass


def gen_test():
    excutor = ThreadPoolExecutor(32)
    # ## extract testing data
    # read testing entities from file.
    entity_type = {}
    with open(os.path.join(DATA_DIR, TESTING_DIR, testing_file), 'r') as f:
        for line in f:
            line = line.strip()
            if line is not None and line != "":
                entity_type[line] = None
        pass

    # load testing data from page file.
    out_test = os.path.join("test.txt")
    if os.path.exists(out_test):
        os.remove(out_test)

    for page_file in pages_file:
        page_file = os.path.join(DATA_DIR, TRAINING_DIR, page_file)
        print("[Test]: ", page_file)
        count = 0
        with open(page_file, 'r') as f, open(out_test, 'a+') as out_f:
            content = ""
            flag = False
            futures = []
            for line in f:
                if line.strip() == "<page>":
                    content = line
                    flag = True
                elif line.strip() == "</page>":
                    content += line
                    flag = False
                    # parse_for_test(content, entity_type, out_f)
                    # reqs = threadpool.makeRequests(parse_for_test, [([content, entity_type, out_f], None)])
                    # [pool.putRequest(req) for req in reqs]
                    future = excutor.submit(parse_for_test, content, entity_type, out_f)
                    futures.append(future)
                    count += 1
                    print("\rprocessing %d pages." % count, end="")
                else:
                    if flag:
                        content += line
                pass
            # pool.wait()
            for future in futures:
                future.result()  # block main threads.
            pass
        print()
        pass


def split_train_eval():
    out_train = os.path.join("train.txt")
    # ## split training file to train and eval
    lines = 0
    with open(out_train, 'r') as f:
        for _ in f:
            lines += 1

    os.rename(out_train, "train_.txt")

    eval_indices = random.sample(range(0, lines), int(lines * 0.2))

    with open("train.txt", 'w') as f_train, \
            open("eval.txt", 'w') as f_eval, \
            open("train_.txt", 'r') as f:
        count = 0
        for line in f:
            if count in eval_indices:
                f_eval.write(line + "\n")
            else:
                f_train.write(line + "\n")
            count += 1
        pass


def analyze_testset():
    validate_file = "./ccks_7_1_competition_data/验证集/test.txt"
    test_file = './test.txt'
    analyze_file = "./analyze_test.txt"
    entities = defaultdict(lambda : 0)
    with open(validate_file, 'r') as f:
        for line in f:
            line = line.strip()
            if line is None or line == "":
                continue
            entities[line] = 0

    lines = 0
    with open(test_file, 'r') as f:
        for line in f:
            print("\rprocessing line: %d" % lines, end='')
            for entity in entities.keys():
                nums = len(re.findall(entity, line))
                entities[entity] += nums
            lines += 1

    tmp_dict = OrderedDict(sorted(entities.items()))
    zeros = sum(x == 0 for x in tmp_dict.values())
    with open(analyze_file, 'w') as f:
        f.write("[zeros]: %d \n" % zeros)
        for k, v in tmp_dict.items():
            f.write("{}\t{}\n".format(k, v))
    pass


if __name__ == "__main__":
    if len(sys.argv) < 2:
        print("python3 gen_dataset_for_span.py action")
        exit(0)

    actions = ["all", "train", "test", "split", 'analyze-test']
    if sys.argv[1] not in actions:
        raise Exception("Action %s not supported!" % sys.argv[1])

    if sys.argv[1] in 'all':
        gen_train()
        gen_test()
        split_train_eval()
        analyze_testset()
    elif sys.argv[1] in "train":
        gen_train()
    elif sys.argv[1] in "test":
        gen_test()
    elif sys.argv[1] in "split":
        split_train_eval()
    elif sys.argv[1] in 'analyze-test':
        analyze_testset()
        pass
