import pandas as pd
import re
import json
import random
from tqdm import tqdm


prefix = "/data2/workspace/2021_comp/ccks-ner/data/"

test_file = prefix+"base/final_test.txt"

conll_files = [
    prefix+"base/dev.conll",
    prefix+"base/train.conll"
]

similar_files = [
    prefix+"Xeon3NLP_round1_test_20210524.txt",
    prefix+"Xeon3NLP_round1_train_20210524.txt"
]

# output_train_path = "/data2/workspace/2021_comp/ccks-ner/data/pretrain_train.txt"
# output_val_path = "/data2/workspace/2021_comp/ccks-ner/data/pretrain_val.txt"


def get_seq_entity(lines):
    """Split seq_entity into sequence lists

    Args:
        lines ([type]): [description]

    Returns:
        [type]: [description]
    """
    seq_lists = []

    entity_lists = []

    input_list = []
    label_list = []

    for line in lines:

        line = line.strip()
        if line == "":
            seq_lists.append("".join(input_list))
            assert len(input_list) == len(label_list)
            entity_lists.append(label_list)
            input_list = []
            label_list = []
            continue

        input_list.append(line.split(" ")[0])
        label_list.append(line.split(" ")[1])

    return seq_lists, entity_lists


def build_pretrain_file(pretrain_type=1):
    """Build a pretraining file from CONLL_TYPE .

    Args:
        pretrain_type (int, optional): [description]. Defaults to 1.

    Returns:
        [type]: [description]
    """
    locationset = set()

    def insert_test():
        final_test = open(test_file).readlines()
        for item in final_test:
            locationset.add(item.split("\x01")[1].strip())

    def insert_train():
        for path in conll_files:

            lines = open(path).readlines()

            seq_lists, _ = get_seq_entity(lines)

            for item in seq_lists:
                locationset.add(item)

    def insert_extend():
        for path in similar_files:

            for item in open(path).readlines():

                item = json.loads(item.strip())
                locationset.add(item["query"])
                for sub_item in item["candidate"]:
                    locationset.add(sub_item["text"])

    if pretrain_type == 1:
        insert_test()

    if pretrain_type == 2:
        insert_test()
        insert_train()

    if pretrain_type == 3:
        insert_test()
        insert_train()
        insert_extend()

    lines = list(locationset)
    random.shuffle(lines)
    split_num = int(len(lines)//20)

    return lines[:-split_num], lines[-split_num:]


def write_pretrain_lines(pretrain_type,
                         output_train_path,
                         output_val_path):
    """Write train and val lines to output_path .

    Args:
        pretrain_type ([type]): [description]
        output_train_path ([type]): [description]
        output_val_path ([type]): [description]
    """                         
    

    train_lines, val_lines = build_pretrain_file(pretrain_type)
    open(output_train_path, mode="w").writelines(
        [item.strip()+"\n" for item in train_lines])
    open(output_val_path, mode="w").writelines(
        [item.strip()+"\n" for item in val_lines])
