
import pandas as pd
import numpy as np
import random
import os
import json
import re
from tqdm import tqdm
from sklearn.utils import shuffle
from augmentation import Augment, Entity
from config import Config




def convert_pairs_to_seq_labels(pairs):

    seq_str = ""
    labels = []
    for pair in pairs:
        seq_str += pair.content

        if pair.e_type == "O":

            labels.extend(["O"]*len(pair.content))
            continue

        if len(pair.content) == 1:

            labels.append("S-"+pair.e_type)
            continue

        ls = ["I-"+pair.e_type for _ in range(len(pair.content))]

        ls[0] = ls[0] .replace("I-", "B-")
        ls[-1] = ls[-1] .replace("I-", "E-")

        labels.extend(ls)
    return seq_str, labels


def augment(seq_lists, entity_lists, augment_times, regex_rate):
    """Augment a sequence of seq_lists and entity_lists .

    Args:
        seq_lists ([type]): [description]
        entity_lists ([type]): [description]

    Returns:
        [type]: [description]
    """

    seq_label_pairs_list = []

    label_str_dic = {}

    for idx in tqdm(range(len(seq_lists))):

        seq = seq_lists[idx]

        label_list = entity_lists[idx]

        tmp_str = ""
        seq_label_pairs = []

        for i, label in enumerate(label_list):
            label_simple = label.replace(
                "E-", "").replace("S-", "").replace("B-", "").replace("I-", "")

            tmp_str += seq[i]

            if label.startswith("E-") or label.startswith("S-") or (
                    label == "O" and (i == len(label_list)-1 or label_list[i+1] != "O")):

                if label_simple in Config.augment_labels:

                    label_str_dic.setdefault(label_simple, [])

                    label_str_dic[label_simple].append(tmp_str)

                seq_label_pairs.append(Entity(tmp_str, label_simple))

                tmp_str = ""

        seq_label_pairs_list.append(seq_label_pairs)

    augmentor = Augment()

    for entity_name, text_list in label_str_dic.items():

        re_str_list = Config.label_regex_dic.get(entity_name, [])

        # keywords_list = label_keywords_dic.get(entity_name, [])

        for re_str in re_str_list:

            augmentor.add_regex_str(re_str, text_list, [entity_name])

        # for keyword in keywords_list:

        #     augmentor.add_keyword(keyword, text_list, [entity_name])

    aug_seq_list = []
    aug_entity_lists = []

    for seq_label_pairs in seq_label_pairs_list:
        augment_list = augmentor.entity_augment(
            seq_label_pairs, augment_times, rp=regex_rate, ep=0, sp=0)

        for new_pairs in augment_list:
            seq_str, labels = convert_pairs_to_seq_labels(new_pairs)
            if seq_str in aug_seq_list:
                continue
            aug_seq_list.append(seq_str)
            aug_entity_lists.append(labels)

    seq_lists.extend(aug_seq_list)
    entity_lists.extend(aug_entity_lists)

    seq_entity_pair_lists_ = list(zip(seq_lists, entity_lists))
    random.shuffle(seq_entity_pair_lists_)

    seq_lists, entity_lists = list(zip(*seq_entity_pair_lists_))

    return list(seq_lists), list(entity_lists)


def label_combain(label_dic):
    """Combine all label strings into a single dictionary .

    Args:
        label_dic ([type]): [description]

    Returns:
        [type]: [description]
    """

    new_dic = {"O": 0}

    for label in label_dic:
        label = label.replace("B-", "").replace("I-",
                                                "").replace("E-", "").replace("S-", "")

        new_dic.setdefault(label, len(new_dic))

    return new_dic


def post_process_label(labels):
    """the post - processing function .

    Args:
        labels ([type]): [description]

    Returns:
        [type]: [description]
    """

    label_tmp = ""

    for i in range(len(labels)):

        if labels[i] == "O":
            continue
        if labels[i][:2] in ["B-", "I-", "E-", "S-"]:
            continue
        label_now = labels[i]

        if label_now != label_tmp:
            if i == len(labels)-1 or label_now != labels[i+1]:
                labels[i] = "S-"+label_now

            else:

                labels[i] = "B-"+label_now
        else:
            if i == len(labels)-1 or label_now != labels[i+1]:
                labels[i] = "E-"+label_now

            else:

                labels[i] = "I-"+label_now

        label_tmp = label_now

    return labels


def post_process(line):
    """Post - process labels .

    Args:
        line ([type]): [description]

    Returns:
        [type]: [description]
    """
    line = line.strip()
    labels = line.split("\x01")[2]
    labels = labels.split(" ")
    labels = post_process_label(labels)
    line = line.replace(line.split("\x01")[2], " ".join(labels))+"\n"
    return line


def get_seq_entity(lines):
    """Split seq_entity into sequence lists

    Args:
        lines ([type]): [description]

    Returns:
        [type]: [description]
    """

    seq_lists = []

    entity_lists = []

    input_list = []
    label_list = []

    for line in lines:

        line = line.strip()
        if line == "":
            seq_lists.append("".join(input_list))
            assert len(input_list) == len(label_list)
            entity_lists.append(label_list)
            input_list = []
            label_list = []
            continue

        input_list.append(line.split(" ")[0])
        label_list.append(line.split(" ")[1])

    return seq_lists, entity_lists


def build_label_dic(entity_lists, is_combain):
    """Builds a dict mapping entity_lists to a label_dic value .

    Args:
        entity_lists ([type]): [description]
        is_combain (bool): [description]

    Returns:
        [type]: [description]
    """
    label_dic = {'O': 0}
    for line in tqdm(entity_lists, desc="[building label_dic]"):
        for entity in line:

            if is_combain:
                entity = entity.split("-")[1]
            label_dic.setdefault(entity, len(label_dic))

    return label_dic


def preprocess_label(entity_lists, label_dic, max_len, is_combain):
    """Preprocess labels for entity labels .

    Args:
        entity_lists ([type]): [description]
        label_dic ([type]): [description]
        max_len ([type]): [description]
        is_combain (bool): [description]

    Returns:
        [type]: [description]
    """

    labels = np.zeros((len(entity_lists), max_len, len(label_dic)))
    for i, line in tqdm(enumerate(entity_lists), desc="[building labels]"):

        for j, entity in enumerate(line):

            if j == max_len-1:
                break
            if entity != "O" and is_combain:
                entity = entity.split("-")[1]

            entity_idx = label_dic[entity]

            labels[i, j+1, entity_idx] = 1

        # labels[i,0,0] =1
        # labels[i,j+2:,0] =1
    return labels


def preprocess_inputs(seq_lists, tokenizer, max_len):
    """Preprocess input lists for each sequence of seq_lists .

    Args:
        seq_lists ([type]): [description]
        tokenizer ([type]): [description]
        max_len ([type]): [description]

    Returns:
        [type]: [description]
    """
    input_ids = np.zeros((len(seq_lists), max_len))
    token_type_ids = np.zeros((len(seq_lists), max_len))
    attention_mask = np.zeros((len(seq_lists), max_len))

    for i, line in tqdm(enumerate(seq_lists), desc="[tokenizing]"):

        encodes = tokenizer.convert_tokens_to_ids(list(line))

        encodes = [tokenizer.cls_token_id]+encodes + [tokenizer.sep_token_id]
        for j, encode in enumerate(encodes):
            if j == max_len:
                break
            input_ids[i, j] = encode

            attention_mask[i, j] = 1

    return input_ids, attention_mask, token_type_ids


def preprocess_train(
        file_path_list,
        tokenizer,
        max_len,
        is_combain,
        include_fake_label=False,
        is_augment=False,
        augment_times=1,
        regex_rate=1,
        sample_weight_rate=None
):
    """Preprocess the input file for training .

    Args:
        file_path_list ([type]): [description]
        tokenizer ([type]): [description]
        max_len ([type]): [description]
        is_combain (bool): [description]
        include_fake_label (bool, optional): [description]. Defaults to False.
        is_augment (bool, optional): [description]. Defaults to False.
        augment_times (int, optional): [description]. Defaults to 1.
        regex_rate (int, optional): [description]. Defaults to 1.
        sample_weight_rate (int, optional): [description]. Defaults to 1.

    Returns:
        [type]: [description]
    """
    lines = []
    sample_weight = None
    for file_path in file_path_list:

        lines_ = open(file_path).readlines()
        lines += lines_

    seq_lists, entity_lists = get_seq_entity(lines)

    if include_fake_label:  # merge fake labels

        # test_df = pd.read_excel(fake_label_test_path)
        # test_seq = test_df["test_seqs"]
        # test_labels = [json.loads(test_df["test_labels"][idx].replace(
        #     "'", '"')) for idx in test_df.index]
        similar_fake = open(Config.fake_label_2).readlines()
        similar_fake = similar_fake[1:]

        random.shuffle(similar_fake)

        similar_fake = similar_fake[:30000]
        for line in similar_fake:

            seq, entity_labels = line.split("\x01")
            seq_lists.append(seq)
            entity_lists.append(entity_labels.strip().split(" "))

        test_lines = open(Config.fake_label_test_path).readlines()

        for line in test_lines:

            _id, seq, entity_labels = line.split("\x01")
            seq_lists.append(seq)
            entity_lists.append(entity_labels.strip().split(" "))

        # seq_lists.extend(test_seq)
        # entity_lists.extend(test_labels)

        seq_entity_pair_lists_ = list(zip(seq_lists, entity_lists))
        random.shuffle(seq_entity_pair_lists_)

        seq_lists, entity_lists = list(zip(*seq_entity_pair_lists_))

    if is_augment:  # augmentation
        seq_lists, entity_lists = augment(
            seq_lists, entity_lists, augment_times, regex_rate)

    if sample_weight_rate :  # assign sample weight
        sample_weight = [1]*len(entity_lists)

        for i in range(len(entity_lists)):

            if "B-subpoi" in entity_lists[i]:

                sample_weight[i] = sample_weight_rate

        sample_weight = np.array(sample_weight)

    path = Config.label_dic_path
    if is_combain:
        path = Config.combain_label_dic_path

    if not os.path.exists(path):
        obj = build_label_dic(entity_lists, is_combain)
        json.dump(obj, open(path, mode="w"))

    label_dic = json.load(open(path))

    input_ids, attention_mask, token_type_ids = preprocess_inputs(
        seq_lists, tokenizer, max_len)

    labels = preprocess_label(entity_lists, label_dic, max_len, is_combain)

    return input_ids, attention_mask, token_type_ids, labels, sample_weight


def preprocess_test(file_path, tokenizer, max_len):
    lines = open(file_path).readlines()
    seq_lists = [item.split("\x01")[1].strip() for item in lines]

    input_ids, attention_mask, token_type_ids = preprocess_inputs(
        seq_lists, tokenizer, max_len)

    return input_ids, attention_mask, token_type_ids, lines


# seq, labels = get_seq_entity(open("data/train.conll").readlines())

# seq_lists,entity_lists = augment(seq, labels)

# print()
