import torch
from allennlp.data.iterators import BasicIterator
from allennlp.data.token_indexers import SingleIdTokenIndexer, ELMoTokenCharactersIndexer
from allennlp.modules import Embedding, Elmo
from pathlib import Path
from torch import nn
import numpy as np
from neural_modules.bert_servant import BertServant,RobertaServant

import os

import config
from data_util.data_readers.fever_sselection_reader import SSelectorReader
from data_util.data_readers.fever_bert_reader import RobertaReader
from sentence_retrieval.sampler_for_nmodel import get_full_list, post_filter, get_additional_list, \
    get_full_list_from_list_d
from data_util.exvocab import load_vocab_embeddings, ExVocabulary

from log_util import save_tool
import utils

from flint import torch_util
import torch.optim as optim
import torch.nn.functional as F
from tqdm import tqdm

from neural_modules import biDafAttn
from sample_for_nli.tf_idf_sample_v1_0 import sample_v1_0, select_sent_for_eval, convert_evidence2scoring_format
from utils import c_scorer, common
from allennlp.modules import ScalarMix
from transformers import AutoTokenizer,AutoModel

class Model(nn.Module):
    def __init__(self,
                 bert_servant=None,
                 bert_batch_size=32,
                 max_l=80,mlp_d=300, num_of_class=3, drop_r=0.5, activation_type='relu'):
        super(Model, self).__init__()
        self.max_l = max_l
        #bert
        print("*"*20,"initialize the pretrained model")
        self.bert_servant = bert_servant
        self.mlp = nn.Linear(768, mlp_d)
        self.sm = nn.Linear(mlp_d, num_of_class)

        if activation_type == 'relu':
            activation = nn.ReLU()
        elif activation_type == 'tanh':
            activation = nn.Tanh()
        else:
            raise ValueError("Not a valid activation!")

        self.classifier = nn.Sequential(*[self.mlp, activation, self.sm])


    def display(self, exclude=None):
        total_p_size = 0
        if exclude is None:
            exclude = {'glove'}

        for name, param in self.named_parameters():
            if param.requires_grad:
                print(name, param.data.size())

                exclude_this = False
                for exclude_name in exclude:
                    if exclude_name in str(name):
                        exclude_this = True

                if exclude_this:
                    continue

                nn = 1
                for s in list(param.size()):
                    nn = nn * s
                total_p_size += nn

        print('Total Size:', total_p_size)

    def forward(self, batch):
        paired_tokens = batch['paired_sequence'].to(next(self.parameters()).device)
        paired_input_mask=batch['paired_input_mask'].to(next(self.parameters()).device)
        paired_out = self.bert_servant.run_paired_seq(paired_tokens,paired_input_mask)
        return self.classifier(paired_out)



def hidden_eval(model, data_iter, dev_data_list):
    # select < (-.-) > 0
    # non-select < (-.-) > 1
    # hidden < (-.-) > -2
    print("*"*20,"hidden eval:")
    with torch.no_grad():
        id2label = {
            0: "true",
            1: "false",
            -2: "hidden"
        }

        print("Evaluating ...")
        model.eval()
        totoal_size = 0

        y_pred_logits_list = []
        y_pred_prob_list = []
        y_id_list = []

        for batch_idx, batch in enumerate(tqdm(data_iter)):
            out = model(batch)
            prob = F.softmax(out, dim=1)

            y = batch['selection_label']
            y_id_list.extend(list(batch['pid']))
            y_pred_logits_list.extend(out[:, 0].tolist())
            y_pred_prob_list.extend(prob[:, 0].tolist())

            totoal_size += y.size(0)

        assert len(y_id_list) == len(dev_data_list)
        assert len(y_pred_logits_list) == len(dev_data_list)

        for i in range(len(dev_data_list)):
            assert str(y_id_list[i]) == str(dev_data_list[i]['selection_id'])
            # Matching id

            dev_data_list[i]['score'] = y_pred_logits_list[i]
            dev_data_list[i]['prob'] = y_pred_prob_list[i]
            # Reset neural set

        print('total_size:', totoal_size)

    return dev_data_list

def hidden_sent_retri_eval():
    batch_size = 64
    lazy = True
    max_l=80
    SAVE_PATH = config.PRO_ROOT / "new_saved_models/03-03-16:37:54_sentence_retrieval/i(800)_epoch(0)_(tra_score:0.3333333333333333|raw_acc:1.0|pr:1.0|rec:0.0|f1:0.0)"
    print("Model From:", SAVE_PATH)
    #train and dev upstream file
    dev_upstream_file = config.RESULT_PATH / "pipeline_r/2021_03_02_10:54:53_r/nn_doc_retr_1_shared_task_dev.jsonl"
    #bert pretrained
    bert_src=config.DEP_PATH / "roberta"
    bert_servant = RobertaServant(bert_type_name=bert_src)
    #data reader
    dev_fever_data_reader = RobertaReader(bert_servant, lazy=lazy, max_l=max_l)
    complete_upstream_dev_data = get_full_list(config.T_FEVER_DEV_JSONL, dev_upstream_file, pred=True)
    dev_actual_list = common.load_jsonl(config.DATA_ROOT / "fever/shared_task_dev.jsonl")
    print("Dev size:", len(complete_upstream_dev_data))
    dev_instances = dev_fever_data_reader.read(complete_upstream_dev_data)
    #iterator
    dev_biterator = BasicIterator(batch_size=batch_size)
    #vocabulary
    unk_token_num = {'tokens': 2600}  # work around for initiating vocabulary.
    vocab = ExVocabulary(unk_token_num=unk_token_num)
    vocab.add_token_to_namespace("true", namespace="labels")
    vocab.add_token_to_namespace("false", namespace="labels")
    vocab.add_token_to_namespace("hidden", namespace="labels")
    vocab.change_token_with_index_to_namespace("hidden", -2, namespace='labels')
    print(vocab.get_token_to_index_vocabulary('labels'))
    print(vocab)
    #index with vocabulary
    dev_biterator.index_with(vocab)

    # Build Model
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu", index=0)
    device_num = -1 if device.type == 'cpu' else 0
    #bert to device
    model = Model(bert_servant=bert_servant, bert_batch_size=batch_size,
                  max_l=max_l, num_of_class=2)
    model.load_state_dict(torch.load(SAVE_PATH))
    model.display()
    model.to(device)

    eval_iter = dev_biterator(dev_instances, shuffle=False, num_epochs=1)
    complete_upstream_dev_data = hidden_eval(model, eval_iter, complete_upstream_dev_data)

    dev_results_list = score_converter_v0(config.T_FEVER_DEV_JSONL, complete_upstream_dev_data,sent_retri_top_k=5)
    eval_mode = {'check_sent_id_correct': True, 'standard': True}
    for a, b in zip(dev_actual_list, dev_results_list):
        b['predicted_label'] = a['label']
    strict_score, acc_score, pr, rec, f1 = c_scorer.fever_score(dev_results_list, dev_actual_list,
                                                                mode=eval_mode, verbose=False)
    total = len(dev_results_list)
    hit = eval_mode['check_sent_id_correct_hits']
    tracking_score = hit / total

    print(f"Dev(fever_score/pr/rec/f1):{strict_score}/{pr}/{rec}/{f1}")
    print(f"Tracking score:", f"{tracking_score}")



def train_fever():
    num_epoch = 2
    seed = 12
    batch_size = 8
    experiment_name = "roberta_sent_retrieval"
    lazy = True
    torch.manual_seed(seed)
    keep_neg_sample_prob = 0.4
    sample_prob_decay = 0.05
    max_l=128
    top_k_doc = 5
    #train and dev upstream file
    dev_upstream_file = config.RESULT_PATH / "pipeline_r/2021_03_08_17:40:14_r/nn_doc_retr_1_shared_task_dev.jsonl"
    train_upstream_file = config.RESULT_PATH / "pipeline_r/2021_03_08_14:35:16_r/nn_doc_retr_1_train.jsonl"
    #bert pretrained
    bert_src=config.DEP_PATH / "roberta-base"
    bert_servant = RobertaServant(bert_type_name=bert_src)
    #data reader
    dev_fever_data_reader = RobertaReader(bert_servant, lazy=lazy, max_l=max_l)
    train_fever_data_reader = RobertaReader(bert_servant, lazy=lazy, max_l=max_l)

    complete_upstream_dev_data = get_full_list(config.T_FEVER_DEV_JSONL, dev_upstream_file, pred=True,top_k=top_k_doc)
    print("Dev size:", len(complete_upstream_dev_data))
    dev_instances = dev_fever_data_reader.read(complete_upstream_dev_data)
    dev_actual_list = common.load_jsonl(config.DATA_ROOT / "fever/shared_task_dev.jsonl")
    #iterator
    biterator = BasicIterator(batch_size=batch_size)
    dev_biterator = BasicIterator(batch_size=batch_size)
    #vocabulary
    unk_token_num = {'tokens': 2600}  # work around for initiating vocabulary.
    vocab = ExVocabulary(unk_token_num=unk_token_num)
    vocab.add_token_to_namespace("true", namespace="selection_labels")
    vocab.add_token_to_namespace("false", namespace="selection_labels")
    vocab.add_token_to_namespace("hidden", namespace="selection_labels")
    vocab.change_token_with_index_to_namespace("hidden", -2, namespace='selection_labels')
    print(vocab.get_token_to_index_vocabulary('selection_labels'))
    print(vocab)
    #index with vocabulary
    biterator.index_with(vocab)
    dev_biterator.index_with(vocab)

    # Build Model
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu", index=1)
    device_num = -1 if device.type == 'cpu' else 1
    #bert to device
    model = Model(bert_servant=bert_servant, bert_batch_size=batch_size,
                  max_l=max_l, num_of_class=2)
    model.display()
    model.to(device)
    bert_servant.bert_model.to(device)
    # create Log File
    file_path_prefix, date = save_tool.gen_file_prefix(f"{experiment_name}")
    # save the source code.
    script_name = os.path.basename(__file__)
    with open(os.path.join(file_path_prefix, script_name), 'w') as out_f, open(__file__, 'r') as it:
        out_f.write(it.read())
        out_f.flush()

    best_dev = -1
    iteration = 0

    start_lr = 1e-5
    optimizer = optim.Adam(filter(lambda p: p.requires_grad, model.parameters()), lr=start_lr)
    criterion = nn.CrossEntropyLoss()
    for i_epoch in range(num_epoch):
        print("Resampling...")
        # Resampling
        #complete_upstream_train_data = get_full_list(config.T_FEVER_DEV_JSONL, train_upstream_file, pred=False)
        complete_upstream_train_data = get_full_list(config.T_FEVER_TRAIN_JSONL, train_upstream_file, pred=False)
        print("all train sentence size:{}".format(len(complete_upstream_train_data)))
        print("negative sample prob.:", keep_neg_sample_prob)
        filtered_train_data = post_filter(complete_upstream_train_data, keep_prob=keep_neg_sample_prob,
                                          seed=12 + i_epoch)
        # Change the seed to avoid duplicate sample...
        keep_neg_sample_prob -= sample_prob_decay
        if keep_neg_sample_prob <= 0:
            keep_neg_sample_prob = 0.005
        print("sampled train data size:",len(filtered_train_data))
        sampled_train_instances = train_fever_data_reader.read(filtered_train_data)
        train_iter = biterator(sampled_train_instances, shuffle=True, num_epochs=1)
        items=next(iter(train_iter))['paired_sequence']
        for i, batch in tqdm(enumerate(train_iter)):
            model.train()
            out = model(batch)
            y = batch['selection_label'].to(device_num)
            loss = criterion(out, y)
            # No decay
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
            iteration += 1

            if (i+1)%10==0:
                print('epoch: {}, batch: {}, loss: {}'.format(i_epoch + 1, i + 1, loss.data))

            if i_epoch <= 1:
                mod = 20000
            else:
                mod = 8000

            if iteration % mod == 0:
                eval_iter = dev_biterator(dev_instances, shuffle=False, num_epochs=1)
                complete_upstream_dev_data = hidden_eval(model, eval_iter, complete_upstream_dev_data)

                dev_results_list = score_converter_v0(config.T_FEVER_DEV_JSONL, complete_upstream_dev_data,sent_retri_top_k=5)
                eval_mode = {'check_sent_id_correct': True, 'standard': True}
                for a, b in zip(dev_actual_list, dev_results_list):
                    b['predicted_label'] = a['label']
                strict_score, acc_score, pr, rec, f1 = c_scorer.fever_score(dev_results_list, dev_actual_list,
                                                                            mode=eval_mode, verbose=False)
                total = len(dev_results_list)
                hit = eval_mode['check_sent_id_correct_hits']
                hit_rate = hit / total

                print(f"Dev(raw_acc/pr/rec/f1):{acc_score}/{pr}/{rec}/{f1}/")
                print("Strict score:", strict_score)
                print(f"Eval hit rate:", f"{hit_rate}")

                need_save = False
                if hit_rate > best_dev:
                    best_dev = hit_rate
                    need_save = True

                if need_save:
                    save_path = os.path.join(
                        file_path_prefix,
                        f'i({iteration})_epoch({i_epoch})_'
                        f'(hit_rate:{hit_rate}|raw_acc:{acc_score}|pr:{pr}|rec:{rec}|f1:{f1})'
                    )
                    print("save epoch {} step {}'s model into {}".format(i_epoch,i+1,save_path))
                    torch.save(model.state_dict(), save_path)

        print("Epoch Evaluation...")
        eval_iter = dev_biterator(dev_instances, shuffle=False, num_epochs=1)
        complete_upstream_dev_data = hidden_eval(model, eval_iter, complete_upstream_dev_data)

        dev_results_list = score_converter_v0(config.T_FEVER_DEV_JSONL, complete_upstream_dev_data,sent_retri_top_k=5)
        eval_mode = {'check_sent_id_correct': True, 'standard': True}
        for a, b in zip(dev_actual_list, dev_results_list):
            b['predicted_label'] = a['label']
        strict_score, acc_score, pr, rec, f1 = c_scorer.fever_score(dev_results_list, dev_actual_list,
                                                                    mode=eval_mode, verbose=False)
        total = len(dev_results_list)
        hit = eval_mode['check_sent_id_correct_hits']
        hit_rate = hit / total

        print(f"Dev(raw_acc/pr/rec/f1):{acc_score}/{pr}/{rec}/{f1}/")
        print("Strict score:", strict_score)
        print(f"Eval hit rate:", f"{hit_rate}")

        if hit_rate > best_dev:
            best_dev = hit_rate

        save_path = os.path.join(
            file_path_prefix,
            f'i({iteration})_epoch({i_epoch})_'
            f'(hit_rate:{hit_rate}|raw_acc:{acc_score}|pr:{pr}|rec:{rec}|f1:{f1})_epoch'
        )

        torch.save(model.state_dict(), save_path)


def score_converter_v0(org_data_file, full_sent_list,sent_retri_top_k):
    """
        :param org_data_file:
        :param full_sent_list: append full_sent_score list to evidence of original data file
        :return:
        """
    d_list = common.load_jsonl(org_data_file)
    augmented_dict = dict()
    print("Build selected sentences file:", len(full_sent_list))
    for sent_item in tqdm(full_sent_list):
        selection_id = sent_item['selection_id']  # The id for the current one selection.
        org_id = int(selection_id.split('<##>')[0])
        if org_id in augmented_dict:
            augmented_dict[org_id].append(sent_item)
        else:
            augmented_dict[org_id] = [sent_item]

    for item in d_list:
        if int(item['id']) not in augmented_dict:
            cur_predicted_sentids = []
        else:
            cur_predicted_sentids = []  # formating doc_id + c_score.SENTLINT + line_number
            sents = augmented_dict[int(item['id'])]
            # Modify some mechaism here to selection sentence whether by some score or label
            for sent_i in sents:
                cur_predicted_sentids.append((sent_i['sid'], sent_i['prob']))
                # del sent_i['prob']

            cur_predicted_sentids = sorted(cur_predicted_sentids, key=lambda x: -x[1])

        item['scored_sentids'] = cur_predicted_sentids
        item['predicted_sentids'] = [sid for sid, _ in item['scored_sentids']][:sent_retri_top_k]
        item['predicted_evidence'] = convert_evidence2scoring_format(item['predicted_sentids'])
        if 'label' in item.keys():
            item['predicted_label'] = item['label']  # give ground truth label

    # Removing all score and prob
    for sent_item in full_sent_list:
        if 'score' in sent_item.keys():
            del sent_item['score']
            del sent_item['prob']

    return d_list


def pipeline_first_sent_selection(org_t_file, upstream_in_file, model_save_path, top_k):
    batch_size = 64
    lazy = True
    max_l=128
    SAVE_PATH = model_save_path
    print("Model From:", SAVE_PATH)

    #bert pretrained
    bert_src=config.DEP_PATH / "roberta"
    bert_servant = RobertaServant(bert_type_name=bert_src)
    #data reader
    dev_upstream_file = upstream_in_file
    dev_fever_data_reader = RobertaReader(bert_servant, lazy=lazy, max_l=max_l)

    complete_upstream_dev_data = get_full_list(org_t_file,dev_upstream_file, pred=True,top_k=top_k)
    print("Dev size:", len(complete_upstream_dev_data))
    dev_instances = dev_fever_data_reader.read(complete_upstream_dev_data)

    #iterator
    dev_biterator = BasicIterator(batch_size=batch_size)
    #vocabulary
    unk_token_num = {'tokens': 2600}  # work around for initiating vocabulary.
    vocab = ExVocabulary(unk_token_num=unk_token_num)
    vocab.add_token_to_namespace("true", namespace="selection_labels")
    vocab.add_token_to_namespace("false", namespace="selection_labels")
    vocab.add_token_to_namespace("hidden", namespace="selection_labels")
    vocab.change_token_with_index_to_namespace("hidden", -2, namespace='selection_labels')
    print(vocab.get_token_to_index_vocabulary('selection_labels'))
    print(vocab)
    #index with vocabulary
    dev_biterator.index_with(vocab)
    # Build Model
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu", index=0)
    device_num = -1 if device.type == 'cpu' else 0

    model = Model(bert_servant=bert_servant, bert_batch_size=batch_size,
                  max_l=max_l, num_of_class=2)
    model.load_state_dict(torch.load(SAVE_PATH))
    model.display()
    model.to(device)
    bert_servant.bert_model.to(device)

    eval_iter = dev_biterator(dev_instances, shuffle=False, num_epochs=1)
    dev_sent_full_list = hidden_eval(model, eval_iter, complete_upstream_dev_data)

    return dev_sent_full_list


class SModelForDemo():
    def __init__(self, model_path):
        lazy = False
        SAVE_PATH = model_path
        # print("Model From:", SAVE_PATH)

        # Prepare Data
        token_indexers = {
            'tokens': SingleIdTokenIndexer(namespace='tokens'),  # This is the raw tokens
            'elmo_chars': ELMoTokenCharactersIndexer(namespace='elmo_characters')  # This is the elmo_characters
        }

        self.dev_fever_data_reader = SSelectorReader(token_indexers=token_indexers, lazy=lazy)

        # Load Vocabulary
        self.dev_biterator = BasicIterator(batch_size=64)

        vocab, weight_dict = load_vocab_embeddings(config.DATA_ROOT / "vocab_cache" / "nli_basic")
        # THis is important
        vocab.add_token_to_namespace("true", namespace="selection_labels")
        vocab.add_token_to_namespace("false", namespace="selection_labels")
        vocab.add_token_to_namespace("hidden", namespace="selection_labels")
        vocab.change_token_with_index_to_namespace("hidden", -2, namespace='selection_labels')
        # Label value
        vocab.get_index_to_token_vocabulary('selection_labels')

        # print(vocab.get_token_to_index_vocabulary('selection_labels'))
        # print(vocab.get_vocab_size('tokens'))

        self.dev_biterator.index_with(vocab)

        # exit(0)
        # Build Model
        device = torch.device("cuda" if torch.cuda.is_available() else "cpu", index=0)
        device_num = -1 if device.type == 'cpu' else 0

        model = Model(weight=weight_dict['glove.840B.300d'],
                      vocab_size=vocab.get_vocab_size('tokens'),
                      embedding_dim=300, max_l=300, num_of_class=2)

        model.load_state_dict(torch.load(SAVE_PATH))
        model.display()
        model.to(device)

        self.model = model
        self.device_num = device_num

    def evaluate(self, org_t_file, upstream_in_file, top_k):
        print("Prepare Data")
        complete_upstream_dev_data = get_full_list_from_list_d(org_t_file, upstream_in_file, pred=True, top_k=top_k)
        print("Read Data")
        dev_instances = self.dev_fever_data_reader.read(complete_upstream_dev_data)

        # Build Model
        device = torch.device("cuda" if torch.cuda.is_available() else "cpu", index=0)
        device_num = -1 if device.type == 'cpu' else 0

        print("Indexing")
        eval_iter = self.dev_biterator(dev_instances, shuffle=False, num_epochs=1, cuda_device=device_num)
        dev_sent_full_list = hidden_eval(self.model, eval_iter, complete_upstream_dev_data)

        return dev_sent_full_list




if __name__ == "__main__":
    train_fever()
    #hidden_sent_retri_eval()
