# This is the one with shortcut
import torch
from allennlp.data.iterators import BasicIterator
from allennlp.data.token_indexers import SingleIdTokenIndexer, ELMoTokenCharactersIndexer
from allennlp.modules import Embedding, Elmo

from data_util.data_readers.fever_reader_with_wn_simi import WNSIMIReader
from neural_modules.ema import EMA
from sample_for_nli.adv_sampler_v01 import get_adv_sampled_data
from sentence_retrieval.nn_postprocess_ablation import score_converter_scaled
from torch import nn
import copy
from neural_modules.ema import load_ema_to_model, save_ema_to_file
import os

import config

from data_util.exvocab import load_vocab_embeddings,ExVocabulary
from simi_sampler_nli_v0.simi_sampler import paired_selection_score_dict, threshold_sampler, \
    select_sent_with_prob_for_eval, adv_simi_sample_with_prob_v1_0, adv_simi_sample_with_prob_v1_1, \
    select_sent_with_prob_for_eval_list
from nli.nli_sample import sample_v1_0,select_sent_for_eval,get_openie_for_claim, select_sent_for_test,convert_evidence2scoring_format,load_data
from utils import common

from log_util import save_tool

from flint import torch_util
import torch.optim as optim
import torch.nn.functional as F
from tqdm import tqdm
from data_util.data_readers.fever_bert_reader import RobertaNLIReader,XlnetGCNReader
from neural_modules.bert_servant import BertServant,RobertaServant,DebertaServant,XlnetServant
from neural_modules import biDafAttn
from utils import c_scorer
from wn_featurizer import wn_persistent_api
from utils.print_log import get_info_log
from nli.graph_model import GCNGraphAgg
logger=get_info_log(__name__)

class Model(nn.Module):
    def __init__(self,
                 bert_servant=None,
                 bert_batch_size=32,
                 max_l=80,mlp_d=300, num_of_class=3, drop_r=0.5,kernal_size=10 ,node_size=100,max_evi_node_l=40,activation_type='relu'):
        super(Model, self).__init__()
        self.bert_dim=1024
        self.max_l = max_l
        # self.max_claim_node_l=max_claim_node_l
        self.max_evi_node_l=max_evi_node_l
        self.node_size=node_size
        #bert
        print("*"*20,"initialize the pretrained model")
        self.bert_servant = bert_servant
        self.kernal_size=kernal_size
        self.mlp = nn.Linear(self.bert_dim, mlp_d)
        self.sm = nn.Linear(mlp_d+20, num_of_class)
        self.logits_proj = nn.Linear(mlp_d, num_of_class)
        self.graph_aggre = GCNGraphAgg(self.bert_dim,self.kernal_size,self.node_size)
        self.graph_logits_proj = nn.Linear(self.node_size,num_of_class)
        self.graph_mlp=nn.Linear(self.node_size,20)
        # self.final_mlp=nn.Linear(mlp_d+self.node_size,mlp_d)
        # self.final_sm=nn.Linear(mlp_d,num_of_class)
        # self.final_mlp=nn.Linear(self.node_size,mlp_d/6)
        # self.final_sm=nn.Linear(mlp_d/6,num_of_class)
        if activation_type == 'relu':
            activation = nn.ReLU()
        elif activation_type == 'tanh':
            activation = nn.Tanh()
        else:
            raise ValueError("Not a valid activation!")

        self.graph_final_logits_proj=nn.Linear(2*num_of_class,num_of_class)
        # self.final_classifier = nn.Sequential(*[self.final_mlp, activation, self.final_sm])
        self.classifier = nn.Sequential(*[activation, self.sm])
        #GCNs
        self.graph_node_proj = nn.Linear(self.bert_dim,self.node_size)
        self.GCN_W=[]
        self.GCN_W_self=[]
        for i in range(2):
            self.GCN_W.append(nn.Linear(self.node_size, self.node_size) )
            self.GCN_W_self.append( nn.Linear(self.node_size, self.node_size) )
        self.GCN_W=nn.ModuleList(self.GCN_W)
        self.GCN_W_self=nn.ModuleList(self.GCN_W_self)


    def display(self, exclude=None):
        total_p_size = 0
        if exclude is None:
            exclude = {'glove'}

        for name, param in self.named_parameters():
            if param.requires_grad:
                print(name, param.data.size())

                exclude_this = False
                for exclude_name in exclude:
                    if exclude_name in str(name):
                        exclude_this = True

                if exclude_this:
                    continue

                nn = 1
                for s in list(param.size()):
                    nn = nn * s
                total_p_size += nn

        print('Total Size:', total_p_size)

    def forward(self, batch):
        token_ids= batch['token_ids'].to(next(self.parameters()).device)
        input_mask=batch['input_mask'].to(next(self.parameters()).device)
        token_type_ids=batch['token_type_ids'].to(next(self.parameters()).device)
        #claim
        # claim_node_ids=batch['claim_node_ids'].to(next(self.parameters()).device)
        # claim_node_mask=batch['claim_node_mask'].to(next(self.parameters()).device)
        # claim_edge_matrix=batch['claim_edge_matrix'].to(next(self.parameters()).device)
        #evidence
        evidence_node_ids=batch['evidence_node_ids'].to(next(self.parameters()).device)
        # evidence_node_mask=batch['evidence_node_mask'].to(next(self.parameters()).device)
        evidence_edge_matrix=batch['evidence_edge_matrix'].to(next(self.parameters()).device)
        hidden,context= self.bert_servant.run_bert(token_ids,input_mask,token_type_ids)
        #(2,300)
        context_logits=self.mlp(context)
        #node embed,(batch,ndoe,node_size)
        evidence_node_ids=evidence_node_ids.float()
        evidence_edge_matrix=evidence_edge_matrix.float()
        evi_nodes_rep=torch.einsum('abc,acd->abd',evidence_node_ids,hidden)
        evi_nodes_rep=evi_nodes_rep/(evidence_node_ids.sum(-1)+1e-30)[:,:,None]
        evi_nodes_rep = torch.relu(self.graph_node_proj(evi_nodes_rep))
        #(batch,node,node_size)
        for i in range(2):
            evi_nodes_emb_avg=torch.einsum('abc,acd->abd', evidence_edge_matrix,self.GCN_W[i](evi_nodes_rep))
            evi_nodes_emb_avg=evi_nodes_emb_avg/(evidence_edge_matrix.sum(-1)+1e-30)[:,:,None]
            evi_nodes_emb=nn.functional.tanh(self.GCN_W_self[i](evi_nodes_rep)+evi_nodes_emb_avg)
        #attention
        graph_context=self.graph_node_proj(context)
        atte_score=torch.einsum("ab,acb->ac",graph_context,evi_nodes_emb)
        atten_weight=torch.softmax(atte_score,dim=-1)
        claim_evi_rep=torch.einsum("ab,abc->ac",atten_weight,evi_nodes_emb)
        # claim_evi_rep = self.graph_aggre(hidden,claim_node_ids,claim_edge_matrix,evidence_node_ids,evidence_edge_matrix)
        # graph_logits=self.graph_logits_proj(claim_evi_rep)
        graph_logits=self.graph_mlp(claim_evi_rep)
        concat_logits=torch.cat([context_logits,graph_logits],dim=-1)
        # finla_logits= torch.cat([context_logits,claim_evi_rep],dim=-1)
        # xlnet_logits = torch.nn.functional.relu(self.logits_proj(context_logits))
        # graph_logits = torch.nn.functional.relu(self.graph_logits_proj(claim_evi_rep))
        # logits = self.graph_final_logits_proj(torch.cat([xlnet_logits,graph_logits],dim=-1))
        # logits = torch.nn.functional.softmax(logits,dim=-1)
        # return logits
        # return self.final_classifier(finla_logits)
        return self.classifier(concat_logits)
        # return logits



def get_sampled_data(tokenized_data_file, additional_data_file):
    # This is for sampling training data.
    sampled_d_list = sample_v1_1(tokenized_data_file, additional_data_file, tokenized=True)
    return sampled_d_list


def get_actual_data(tokenized_data_file, additional_data_file):
    # This is for get actual data:.
    actual_d_list = select_sent_for_eval(tokenized_data_file, additional_data_file, tokenized=True)
    return actual_d_list

def get_test_data(tokenized_data_file, additional_data_file):
    # This is for get actual data:.
    test_d_list = select_sent_for_test(tokenized_data_file, additional_data_file, tokenized=True)
    return test_d_list


def full_eval_model(model, data_iter, criterion, dev_data_list,device_num):
    # SUPPORTS < (-.-) > 0
    # REFUTES < (-.-) > 1
    # NOT ENOUGH INFO < (-.-) > 2

    id2label = {
        0: "SUPPORTS",
        1: "REFUTES",
        2: "NOT ENOUGH INFO"
    }

    print("Evaluating ...")
    model.eval()
    n_correct = loss = 0
    totoal_size = 0

    y_pred_list = []
    y_true_list = []
    y_id_list = []

    with torch.no_grad():  # Important fixing.

        for batch_idx, batch in enumerate(data_iter):
            out = model(batch)
            #y = batch['label']
            y = batch['label'].to(device_num)
            y_id_list.extend(list(batch['pid']))

            n_correct += (torch.max(out, 1)[1].view(y.size()) == y).sum().item()

            y_pred_list.extend(torch.max(out, 1)[1].view(y.size()).tolist())
            y_true_list.extend(y.tolist())

            loss += criterion(out, y).item() * y.size(0)
            totoal_size += y.size(0)

        assert len(y_id_list) == len(dev_data_list)
        assert len(y_pred_list) == len(dev_data_list)
        assert len(y_true_list) == len(dev_data_list)

        for i in range(len(dev_data_list)):
            assert str(y_id_list[i]) == str(dev_data_list[i]['id'])
            # Matching id

            dev_data_list[i]['predicted_label'] = id2label[y_pred_list[i]]
            # Reset neural set
            if len(dev_data_list[i]['predicted_sentids']) == 0:
                dev_data_list[i]['predicted_label'] = "NOT ENOUGH INFO"

                # dev_data_list[i]['predicted_evidence'] = convert_evidence2scoring_format(dev_data_list[i]['predicted_sentids'])

        print('n_correct:', n_correct)
        print('total_size:', totoal_size)

        eval_mode = {'check_sent_id_correct': True, 'standard': True}
        strict_score, acc_score, pr, rec, f1 = c_scorer.fever_score(dev_data_list, dev_data_list, mode=eval_mode,verbose=False)
        print("Fever Score(Strict/Acc./Precision/Recall/F1):", strict_score, acc_score, pr, rec, f1)

        avg_acc = 100. * n_correct / totoal_size
        avg_loss = loss / totoal_size

    return strict_score, avg_loss

def hidden_eval(model, data_iter, dev_data_list, with_logits=False, with_probs=False):
    # SUPPORTS < (-.-) > 0
    # REFUTES < (-.-) > 1
    # NOT ENOUGH INFO < (-.-) > 2
    id2label = {
        0: "SUPPORTS",
        1: "REFUTES",
        2: "NOT ENOUGH INFO"
    }
    logger.info("id2label:{}".format(id2label))
    print("*"*20,"Evaluating ...")
    with torch.no_grad():
        model.eval()
        totoal_size = 0

        y_pred_list = []
        y_id_list = []
        y_logits_list = []
        y_probs_list = []

        print("*"*20,"running the model for batch sample:")
        for batch_idx, batch in enumerate(tqdm(data_iter)):
            out = model(batch)
            y_id_list.extend(list(batch['pid']))

            y_pred_list.extend(torch.max(out, 1)[1].view(out.size(0)).tolist())

            if with_logits:
                y_logits_list.extend(out.tolist())

            if with_probs:
                y_probs_list.extend(F.softmax(out, dim=1).tolist())

            totoal_size += out.size(0)

        assert len(y_id_list) == len(dev_data_list)
        assert len(y_pred_list) == len(dev_data_list)

        for i in range(len(dev_data_list)):
            assert str(y_id_list[i]) == str(dev_data_list[i]['id'])

            # Matching id
            dev_data_list[i]['predicted_label'] = id2label[y_pred_list[i]]
            if with_logits:
                dev_data_list[i]['logits'] = y_logits_list[i]

            if with_probs:
                dev_data_list[i]['probs'] = y_probs_list[i]

            # Reset neural set
            if len(dev_data_list[i]['predicted_sentids']) == 0:
                dev_data_list[i]['predicted_label'] = "NOT ENOUGH INFO"

        print('total_size:', totoal_size)

    return dev_data_list



def open_ie(upstream_file,mode="train"):
    if mode=="train":
        data_list=get_sampled_data(config.T_FEVER_TRAIN_JSONL, upstream_file)
    elif mode=="shared_task_dev":
        data_list=get_actual_data(config.T_FEVER_DEV_JSONL, upstream_file)
    elif mode=="shared_task_test":
        data_list=get_test_data(config.DATA_ROOT / "fever/shared_task_test.jsonl",upstream_file)
    
    return data_list

def open_ie_for_claim(upstream_file):
    data_list=get_openie_for_claim(upstream_file)
    print("save openie result into{}".format(upstream_file))
    common.save_jsonl(data_list,upstream_file)

def train_fever():
    num_epoch = 2
    seed = 12
    batch_size = 6
    experiment_name = "xlnet_gnn_nli"
    lazy = True
    max_l=256
    max_evi_node_l=20
    # max_claim_node_l=10
    kernal_size=10
    node_size=100

    dev_upstream_file = config.RESULT_PATH / "pipeline_r/2021_03_08_17:40:14_r/nli_openie_shared_task_dev.jsonl"
    train_upstream_file = config.RESULT_PATH / "pipeline_r/2021_03_08_14:35:16_r/nli_openie_train.jsonl"
    #train_upstream_file=dev_upstream_file
    #bert pretrained
    bert_src=config.DEP_PATH / "xlnet-base"
    bert_servant = XlnetServant(bert_type_name=bert_src)

    dev_fever_data_reader = XlnetGCNReader(bert_servant, lazy=lazy, max_l=max_l,max_evi_node_l=max_evi_node_l)
    train_fever_data_reader = XlnetGCNReader(bert_servant, lazy=lazy, max_l=max_l,max_evi_node_l=max_evi_node_l)
    #dict_keys(['id', 'verifiable', 'label', 'claim', 'evidence', 'predicted_evidence', 'predicted_sentids', 'evidence_text_list', 'evidence_tuple_list'])
    complete_upstream_dev_data=load_data(dev_upstream_file)
    print("Dev size:", len(complete_upstream_dev_data))
    dev_instances = dev_fever_data_reader.read(complete_upstream_dev_data)
    dev_actual_list = common.load_jsonl(config.DATA_ROOT / "fever/shared_task_dev.jsonl")
    # Load Vocabulary
    biterator = BasicIterator(batch_size=batch_size)
    dev_biterator = BasicIterator(batch_size=batch_size)

    unk_token_num = {'tokens': 2600}  # work around for initiating vocabulary.
    vocab = ExVocabulary(unk_token_num=unk_token_num)
    vocab.add_token_to_namespace('SUPPORTS', namespace='labels')
    vocab.add_token_to_namespace('REFUTES', namespace='labels')
    vocab.add_token_to_namespace('NOT ENOUGH INFO', namespace='labels')
    print(vocab.get_token_to_index_vocabulary('labels'))
    print(vocab)

    biterator.index_with(vocab)
    dev_biterator.index_with(vocab)

    # Build Model
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu",index=1)
    device_num = -1 if device.type == 'cpu' else 1
    #bert to device
    model = Model(bert_servant=bert_servant, bert_batch_size=batch_size,kernal_size=kernal_size,
                  max_l=max_l,max_evi_node_l=max_evi_node_l,node_size=node_size,num_of_class=3)
    model_path=config.PRO_ROOT / 'saved_models/concat_nli/03-24-08:56:25_xlnet_gnn_nli/i(66000)_epoch(1)_dev(0.7287728772877288)_loss(0.7018586273988032)_seed(12)'
    model_dict=model.state_dict()
    state_dict = torch.load(model_path)
    dict_name=list(state_dict)
    for key in dict_name:
        if "bert_servant" not in key:
            state_dict.pop(key)
    # for i,p in enumerate(list(model_dict.keys())):
    #     print(i,p)
    model_dict.update(state_dict)
    model.load_state_dict(model_dict)
    for i,p in enumerate(model.named_parameters()):
        # print(i,p[0])
        if i <= 409:
            p[1].requires_grad = False
    model.display()
    model.to(device)
    bert_servant.bert_model.to(device)

    # Create Log File
    file_path_prefix, date = save_tool.gen_file_prefix(f"{experiment_name}")
    # Save the source code.
    script_name = os.path.basename(__file__)
    with open(os.path.join(file_path_prefix, script_name), 'w') as out_f, open(__file__, 'r') as it:
        out_f.write(it.read())
        out_f.flush()
    # Save source code end.

    best_dev = -1
    iteration = 0

    start_lr = 2e-6
    optimizer = optim.Adam(filter(lambda p: p.requires_grad, model.parameters()), lr=start_lr)
    criterion = nn.CrossEntropyLoss()
    # for i,p in enumerate(model.named_parameters()):
    #     if p[1].requires_grad==False:
    #         print(i,p[0])
    for i_epoch in range(num_epoch):
        print("Resampling...")
        # Resampling
        complete_upstream_train_data=load_data(train_upstream_file)
        sampled_train_instances = train_fever_data_reader.read(complete_upstream_train_data)
        train_iter = biterator(sampled_train_instances, shuffle=True, num_epochs=1)
        for i, batch in tqdm(enumerate(train_iter)):
            model.train()
            out = model(batch)
            y = batch['label'].to(device_num)
            loss = criterion(out, y)
            # No decay
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
            iteration += 1

            if iteration%10==0:
                print('epoch: {}, batch: {},loss: {}'.format(i_epoch + 1, i + 1,loss.data))

            if i_epoch <= 1:
                mod = 3000
            else:
                mod = 1000

            if iteration % mod == 0:
                eval_iter = biterator(dev_instances, shuffle=False, num_epochs=1)
                dev_score, dev_loss = full_eval_model(model, eval_iter, criterion, complete_upstream_dev_data,device_num)

                print(f"Dev:{dev_score}/{dev_loss}")

                need_save = False
                if dev_score > best_dev:
                    best_dev = dev_score
                    need_save = True

                if need_save:
                    save_path = os.path.join(
                        file_path_prefix,
                        f'i({iteration})_epoch({i_epoch})_dev({dev_score})_loss({dev_loss})_seed({seed})'
                    )

                    torch.save(model.state_dict(), save_path)
                



def pipeline_gnn_nli_run(t_org_file, complete_upstream_dev_data, model_path,
                     with_logits=False, with_probs=False, load_from_dict=False):
    batch_size = 16
    lazy = True
    max_l=256
    max_evi_node_l=40
    max_claim_node_l=40
    kernal_size=10
    print("*"*20,"pipeline nli run start:")
    print("Size:", len(complete_upstream_dev_data))

    #bert pretrained
    bert_src=config.DEP_PATH / "xlnet-large-cased"
    bert_servant = XlnetServant(bert_type_name=bert_src)

    dev_fever_data_reader = XlnetGCNReader(bert_servant, lazy=lazy, max_l=max_l,max_claim_node_l=max_claim_node_l,max_evi_node_l=max_evi_node_l)

    complete_upstream_dev_data= append_hidden_label(complete_upstream_dev_data)
    print("*"*20,"read the dev data:")
    dev_instances = dev_fever_data_reader.read(complete_upstream_dev_data)
    print("*"*20,"initialize the BasicIterator:")
    # Load Vocabulary
    biterator = BasicIterator(batch_size=batch_size)

    print("*"*20,"load the vocab embedding:")
    unk_token_num = {'tokens': 2600}  # work around for initiating vocabulary.
    vocab = ExVocabulary(unk_token_num=unk_token_num)
    vocab.change_token_with_index_to_namespace('hidden', -2, namespace='labels')
    print(vocab.get_token_to_index_vocabulary('labels'))
    print(vocab)

    biterator.index_with(vocab)
    # Build Model
    print("*"*20,"build the model")
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu", index=0)
    device_num = -1 if device.type == 'cpu' else 0

    model = Model(bert_servant=bert_servant, bert_batch_size=batch_size,
                    max_l=max_l, num_of_class=3)

    if not load_from_dict:
        model.load_state_dict(torch.load(model_path))
    else:
        load_ema_to_model(model, model_path)

    print("*"*20,"model display:")
    model.display()
    model.to(device)
    print("*"*20,"build the BasicIterator:")
    eval_iter = biterator(dev_instances, shuffle=False, num_epochs=1)
    print("*"*20,"start evaluate the hidden")
    complete_upstream_dev_data = hidden_eval(model, eval_iter, complete_upstream_dev_data,
                                             with_logits=with_logits,
                                             with_probs=with_probs)

    return complete_upstream_dev_data



def pipeline_concat_nli_run(t_org_file, complete_upstream_dev_data, model_path,
                    with_logits=False, with_probs=False, load_from_dict=False):
    batch_size = 16
    lazy = True
    max_l=256
    print("*"*20,"pipeline nli run start:")
    print("Size:", len(complete_upstream_dev_data))

    #bert pretrained
    bert_src=config.DEP_PATH / "roberta"
    bert_servant = RobertaServant(bert_type_name=bert_src)
    dev_fever_data_reader = RobertaNLIReader(bert_servant, lazy=lazy, max_l=max_l)

    complete_upstream_dev_data= append_hidden_label(complete_upstream_dev_data)
    print("*"*20,"read the dev data:")
    dev_instances = dev_fever_data_reader.read(complete_upstream_dev_data)
    print("*"*20,"initialize the BasicIterator:")
    # Load Vocabulary
    biterator = BasicIterator(batch_size=batch_size)

    print("*"*20,"load the vocab embedding:")
    unk_token_num = {'tokens': 2600}  # work around for initiating vocabulary.
    vocab = ExVocabulary(unk_token_num=unk_token_num)
    vocab.change_token_with_index_to_namespace('hidden', -2, namespace='labels')
    print(vocab.get_token_to_index_vocabulary('labels'))
    print(vocab)

    biterator.index_with(vocab)
    # Build Model
    print("*"*20,"build the model")
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu", index=0)
    device_num = -1 if device.type == 'cpu' else 0

    model = Model(bert_servant=bert_servant, bert_batch_size=batch_size,
                    max_l=max_l, num_of_class=3)

    if not load_from_dict:
        model.load_state_dict(torch.load(model_path))
    else:
        load_ema_to_model(model, model_path)

    print("*"*20,"model display:")
    model.display()
    model.to(device)
    print("*"*20,"build the BasicIterator:")
    eval_iter = biterator(dev_instances, shuffle=False, num_epochs=1)
    print("*"*20,"start evaluate the hidden")
    complete_upstream_dev_data = hidden_eval(model, eval_iter, complete_upstream_dev_data,
                                             with_logits=with_logits,
                                             with_probs=with_probs)

    return complete_upstream_dev_data


def append_hidden_label(d_list):
    for item in d_list:
        item['label'] = 'hidden'
    return d_list


if __name__ == "__main__":
    #upstream_file=config.RESULT_PATH / "pipeline_r" / "2021_03_08_14:35:16_r" / "nli_openie_train.jsonl"
    #open_ie_for_claim(upstream_file)
    train_fever()

