
import pandas as pd
import numpy as np

import os
import json
import re
from tqdm import tqdm

from transformers import *
import tensorflow as tf
import pandas as pd
import numpy as np
from tqdm import tqdm
import tensorflow_addons as tfa
import os
import json
import re
from sklearn.model_selection import KFold

from model import build_model
from data_process import *
from config_colab import Config
from data_loader import writer_tfrecord, read_tf_record


def build_model_data_fn(
    architecture,
    max_len,
    is_combain,
    lr,
    inputs_fold,
    is_generate=False,
    batch_size=None,
    sample_weight_rate=1,
    include_fake_label=False,
    is_augment=False,
    augment_times=1,
    regex_rate=1,
    cnn=False,
    cnn_max_feature=512,
    similar_fake_num=0,
    cnn_depth=2,
    cnn_kernel_size=3,
    cnn_connect_layer=12,
    transformer_out_layer=12,
    min_feature=64,
    drop_rate=0.1,
    regularizer=0,
    freeze_layers=-1,
    **kwargs
):
    """Builds a preprocessed model for training .

    Args:
        architecture ([type]): [description]
        max_len ([type]): [description]
        is_combain (bool): [description]
        lr ([type]): [description]
        inputs_fold ([type]): [description]
        include_fake_label (bool, optional): [description]. Defaults to False.
        is_augment (bool, optional): [description]. Defaults to False.
        cnn (bool, optional): [description]. Defaults to False.
        cnn_max_feature (int, optional): [description]. Defaults to 512.
        cnn_depth (int, optional): [description]. Defaults to 2.
        cnn_kernel_size (int, optional): [description]. Defaults to 3.
        cnn_connect_layer (int, optional): [description]. Defaults to 12.
        transformer_out_layer (int, optional): [description]. Defaults to 12.
        min_feature (int, optional): [description]. Defaults to 64.
        drop_rate (float, optional): [description]. Defaults to 0.1.
        regularizer (int, optional): [description]. Defaults to 0.
        freeze_layers (int, optional): [description]. Defaults to -1.
        is_augment (bool, optional): [description]. Defaults to False.

    Returns:
        [type]: [description]
    """

    label_dic = json.load(open(Config.label_dic_path))
    if is_combain:
        label_dic = json.load(open(Config.combain_label_dic_path))

    label_num = len(label_dic)

    reverse_label_dic = dict([(item[1], item[0])
                             for item in label_dic.items()])
    train_path = os.path.join(inputs_fold, "train.conll")
    val_path = os.path.join(inputs_fold, "dev.conll")

    if architecture.startswith("clue/"):
        tokenizer = BertTokenizer.from_pretrained(architecture)
    else:
        tokenizer = AutoTokenizer.from_pretrained(architecture)

    char_set = set()

    seq_lists, _ = get_seq_entity(
        open(train_path).readlines()+open(val_path).readlines())

    for item in seq_lists:
        for char in item:
            char_set.add(char)

    for item in list(char_set - set(tokenizer.vocab)):
        tokenizer.add_tokens(item)

    test_lines = [item.split("\x01")[1].strip()
                  for item in open(Config.test_path).readlines()]
    for item in test_lines:
        for char in item:
            char_set.add(char)

    def data_fn(stage):
        """Preprocess data .

        Args:
            data_path ([type]): [description]
            stage ([type]): [description]

        Returns:
            [type]: [description]
        """
        if stage == "train":

            if is_generate:
                gen_fn = preprocess_train(
                    [os.path.join(inputs_fold, "train.conll")],
                    tokenizer,
                    max_len,
                    is_combain,
                    is_generate=is_generate,
                    similar_fake_num = similar_fake_num,
                    batch_size=batch_size,
                    include_fake_label=include_fake_label,
                    is_augment=is_augment,
                    sample_weight_rate=sample_weight_rate)

                return gen_fn

            input_ids, attention_mask, token_type_ids, labels, sample_weight = preprocess_train(
                [os.path.join(inputs_fold, "train.conll")],
                tokenizer,
                max_len,
                is_combain,
                is_generate=is_generate,
                similar_fake_num = similar_fake_num,
                include_fake_label=include_fake_label,
                is_augment=is_augment,
                sample_weight_rate=sample_weight_rate)
            return [input_ids, attention_mask, token_type_ids], labels, sample_weight, len(input_ids)

        if stage == "val":
            input_ids, attention_mask, token_type_ids, labels, _ = preprocess_train(
                [os.path.join(inputs_fold, "dev.conll")], tokenizer, max_len, is_combain)
            return [input_ids, attention_mask, token_type_ids], labels
        if stage == "test":

            input_ids, attention_mask, token_type_ids, text_list = preprocess_test(
                Config.test_path, tokenizer, max_len)
            return [input_ids, attention_mask, token_type_ids], text_list

    def submission_fn(test_text, test_probs):
        """Function that takes a test_text and runs the submission function on each test_probs .

        Args:
            test_text ([type]): [description]
            test_probs ([type]): [description]

        Returns:
            [type]: [description]
        """

        probs_max = np.argmax(test_probs, axis=-1)

        submission_list = []

        for i, item in tqdm(enumerate(probs_max)):

            text = test_text[i].split("\x01")[1].strip()

            labels = [reverse_label_dic[idx] for idx in item[1:len(text)+1]]

            submission_list.append(
                test_text[i].strip() + "\u0001"+" ".join(labels)+"\n")
        if not is_combain:

            return submission_list

        submission_list_new = []

        for line in submission_list:

            line = post_process(line)
            submission_list_new.append(line)

        return submission_list_new

    def model_fn():
        """Construct a model function that can be used to build a model .

        Returns:
            [type]: [description]
        """

        return build_model(
            architecture=architecture,
            max_len=max_len,
            label_num=label_num,
            lr=lr,
            cnn=cnn,
            cnn_max_feature=cnn_max_feature,
            cnn_depth=cnn_depth,
            cnn_kernel_size=cnn_kernel_size,
            cnn_connect_layer=cnn_connect_layer,
            transformer_out_layer=transformer_out_layer,
            min_feature=min_feature,
            drop_rate=drop_rate,
            regularizer=regularizer,
            freeze_layers=freeze_layers)

    return model_fn, data_fn, submission_fn


# def build_model_tfdataset(
#     group_name,
#     train_name,
#     architecture,
#     max_len,
#     is_combain,
#     lr,
#     inputs_fold,
#     batch_size,
#     train_tfrecord_fold,
#     sample_weight_rate=1,
#     include_fake_label=False,
#     is_augment=False,
#     augment_times=1,
#     regex_rate=1,
#     cnn=False,
#     cnn_max_feature=512,
#     cnn_depth=2,
#     cnn_kernel_size=3,
#     cnn_connect_layer=12,
#     transformer_out_layer=12,
#     min_feature=64,
#     drop_rate=0.1,
#     regularizer=0,
#     freeze_layers=-1,
#     **kwargs
# ):


#     label_dic = json.load(open(Config.label_dic_path))
#     if is_combain:
#         label_dic = json.load(open(Config.combain_label_dic_path))

#     label_num = len(label_dic)

#     reverse_label_dic = dict([(item[1], item[0])
#                              for item in label_dic.items()])
#     train_path = os.path.join(inputs_fold, "train.conll")
#     val_path = os.path.join(inputs_fold, "dev.conll")

#     tokenizer = AutoTokenizer.from_pretrained(architecture)

#     char_set = set()

#     seq_lists, _ = get_seq_entity(
#         open(train_path).readlines()+open(val_path).readlines())

#     for item in seq_lists:
#         for char in item:
#             char_set.add(char)

#     for item in list(char_set - set(tokenizer.vocab)):
#         tokenizer.add_tokens(item)

#     test_lines = [item.split("\x01")[1].strip()
#                   for item in open(Config.test_path).readlines()]
#     for item in test_lines:
#         for char in item:
#             char_set.add(char)

#     def data_fn(stage):
#         """Preprocess data .

#         Args:
#             data_path ([type]): [description]
#             stage ([type]): [description]

#         Returns:
#             [type]: [description]
#         """


#         if stage == "train":

#             dataset = read_tf_record(os.path.join(train_tfrecord_fold, "train**"),batch_size)

#             return dataset

#         if stage == "val":
#             input_ids, attention_mask, token_type_ids, labels,_ = preprocess_train(
#                 [os.path.join(inputs_fold, "dev.conll")], tokenizer, max_len, is_combain)
#             return [input_ids, attention_mask, token_type_ids], labels
#         if stage == "test":

#             input_ids, attention_mask, token_type_ids, text_list = preprocess_test(
#                 Config.test_path, tokenizer, max_len)
#             return [input_ids, attention_mask, token_type_ids], text_list

#     def submission_fn(test_text, test_probs):
#         """Function that takes a test_text and runs the submission function on each test_probs .

#         Args:
#             test_text ([type]): [description]
#             test_probs ([type]): [description]

#         Returns:
#             [type]: [description]
#         """

#         probs_max = np.argmax(test_probs, axis=-1)

#         submission_list = []

#         for i, item in tqdm(enumerate(probs_max)):

#             text = test_text[i].split("\x01")[1].strip()

#             labels = [reverse_label_dic[idx] for idx in item[1:len(text)+1]]

#             submission_list.append(
#                 test_text[i].strip() + "\u0001"+" ".join(labels)+"\n")
#         if not is_combain:

#             return submission_list

#         submission_list_new = []

#         for line in submission_list:

#             line = post_process(line)
#             submission_list_new.append(line)

#         return submission_list_new

#     def model_fn():
#         """Construct a model function that can be used to build a model .

#         Returns:
#             [type]: [description]
#         """

#         return build_model(
#             architecture=architecture,
#             max_len=max_len,
#             label_num=label_num,
#             lr=lr,
#             cnn=cnn,
#             cnn_max_feature=cnn_max_feature,
#             cnn_depth=cnn_depth,
#             cnn_kernel_size=cnn_kernel_size,
#             cnn_connect_layer=cnn_connect_layer,
#             transformer_out_layer=transformer_out_layer,
#             min_feature=min_feature,
#             drop_rate=drop_rate,
#             regularizer=regularizer,
#             freeze_layers=freeze_layers)

#     return model_fn, data_fn, submission_fn
