# This is the one with shortcut
import torch
from allennlp.data.iterators import BasicIterator
from allennlp.data.token_indexers import SingleIdTokenIndexer, ELMoTokenCharactersIndexer
from allennlp.modules import Embedding, Elmo

from data_util.data_readers.fever_reader_with_wn_simi import WNSIMIReader
from neural_modules.ema import EMA
from sample_for_nli.adv_sampler_v01 import get_adv_sampled_data
from sentence_retrieval.nn_postprocess_ablation import score_converter_scaled
from torch import nn
import copy
from neural_modules.ema import load_ema_to_model, save_ema_to_file
import os
import math
import argparse

import config

from data_util.exvocab import load_vocab_embeddings,ExVocabulary
from simi_sampler_nli_v0.simi_sampler import paired_selection_score_dict, threshold_sampler, \
    select_sent_with_prob_for_eval, adv_simi_sample_with_prob_v1_0, adv_simi_sample_with_prob_v1_1, \
    select_sent_with_prob_for_eval_list
from nli.nli_sample import sample_v1_0,select_sent_for_eval,get_openie_for_claim, select_sent_for_test,convert_evidence2scoring_format,load_data
from utils import common

from log_util import save_tool

from flint import torch_util
import torch.optim as optim
import torch.nn.functional as F
from tqdm import tqdm
from data_util.data_readers.fever_bert_reader import RobertaNLIReader,XlnetGCNReader,XlnetGCNGloveReader,XlnetGCNXlnetReader
from neural_modules.bert_servant import BertServant,RobertaServant,DebertaServant,XlnetServant
from neural_modules import biDafAttn
from utils import c_scorer
from wn_featurizer import wn_persistent_api
from utils.print_log import get_info_log
from nli.graph_model import GCNGraphAgg
logger=get_info_log(__name__)
from torch.nn.parameter import Parameter
from pytorch_transformers.modeling_xlnet import XLNetForFEVER,XLNetConfig

class ESIM(nn.Module):
    # This is ESIM sequence matching model
    # lstm
    def __init__(self, rnn_size_in=(1024+300, 1024+300), rnn_size_out=(300, 300), max_l=256,
                 mlp_d=300, num_of_class=3, drop_r=0.5, activation_type='relu'):

        super(ESIM, self).__init__()
        self.dropout_layer = nn.Dropout(drop_r)

        self.lstm_1 = nn.LSTM(input_size=rnn_size_in[0], hidden_size=rnn_size_out[0],
                              num_layers=1, bidirectional=True, batch_first=True)

        self.lstm_2 = nn.LSTM(input_size=rnn_size_in[1], hidden_size=rnn_size_out[1],
                              num_layers=1, bidirectional=True, batch_first=True)

        self.projection = nn.Linear(rnn_size_out[0] * 2 * 4, rnn_size_out[0])

        self.max_l = max_l
        self.bidaf = biDafAttn(300)

        self.mlp_1 = nn.Linear(rnn_size_out[1] * 2 * 4, mlp_d)
        self.sm = nn.Linear(mlp_d, num_of_class)

        if activation_type == 'relu':
            activation = nn.ReLU()
        elif activation_type == 'tanh':
            activation = nn.Tanh()
        else:
            raise ValueError("Not a valid activation!")

        self.classifier = nn.Sequential(*[nn.Dropout(drop_r), self.mlp_1, activation, nn.Dropout(drop_r), self.sm])

    def count_params(self):
        total_c = 0
        for param in self.parameters():
            if len(param.size()) == 2:
                d1, d2 = param.size()[0], param.size()[1]
                total_c += d1 * d2
        print("Total count:", total_c)

    def display(self):
        for name, param in self.named_parameters():
            print(name, param.data.size())

    def forward(self, layer1_s1, layer2_s1, l1, layer1_s2, layer2_s2, l2):  # [B, T]

        p_s1 = self.dropout_layer(layer1_s1)
        p_s2 = self.dropout_layer(layer1_s2)

        s1_layer1_out = torch_util.auto_rnn(self.lstm_1, p_s1, l1)
        s2_layer1_out = torch_util.auto_rnn(self.lstm_1, p_s2, l2)

        S = self.bidaf.similarity(s1_layer1_out, l1, s2_layer1_out, l2)
        s1_att, s2_att = self.bidaf.get_both_tile(S, s1_layer1_out, s2_layer1_out)

        s1_coattentioned = torch.cat([s1_layer1_out, s1_att, s1_layer1_out - s1_att,
                                      s1_layer1_out * s1_att], dim=2)

        s2_coattentioned = torch.cat([s2_layer1_out, s2_att, s2_layer1_out - s2_att,
                                      s2_layer1_out * s2_att], dim=2)

        p_s1_coattentioned = F.relu(self.projection(s1_coattentioned))
        p_s2_coattentioned = F.relu(self.projection(s2_coattentioned))

        s1_coatt_features = torch.cat([p_s1_coattentioned, layer2_s1], dim=2)
        s2_coatt_features = torch.cat([p_s2_coattentioned, layer2_s2], dim=2)

        s1_coatt_features = self.dropout_layer(s1_coatt_features)
        s2_coatt_features = self.dropout_layer(s2_coatt_features)

        s1_layer2_out = torch_util.auto_rnn(self.lstm_2, s1_coatt_features, l1)
        s2_layer2_out = torch_util.auto_rnn(self.lstm_2, s2_coatt_features, l2)

        s1_lay2_maxout = torch_util.max_along_time(s1_layer2_out, l1)
        s2_lay2_maxout = torch_util.max_along_time(s2_layer2_out, l2)

        features = torch.cat([s1_lay2_maxout, s2_lay2_maxout,
                              torch.abs(s1_lay2_maxout - s2_lay2_maxout),
                              s1_lay2_maxout * s2_lay2_maxout], dim=1)

        return self.classifier(features)

class GraphConvolution(nn.Module):
    """
    Simple GCN layer, similar to https://arxiv.org/abs/1609.02907
    """

    def __init__(self, in_features, out_features, bias=True):
        super(GraphConvolution, self).__init__()
        self.in_features = in_features
        self.out_features = out_features
        self.weight = Parameter(torch.FloatTensor(in_features, out_features))
        if bias:
            self.bias = Parameter(torch.FloatTensor(out_features))
        else:
            self.register_parameter('bias', None)
        self.reset_parameters()

    def reset_parameters(self):
        stdv = 1. / math.sqrt(self.weight.size(1))
        self.weight.data.uniform_(-stdv, stdv)
        if self.bias is not None:
            self.bias.data.uniform_(-stdv, stdv)

    def forward(self, input, adj):
        #batch,node,dim
        #batch,node,node
        support = torch.matmul(input, self.weight)
        output = torch.matmul(adj, support)
        if self.bias is not None:
            return output + self.bias
        else:
            return output

    def __repr__(self):
        return self.__class__.__name__ + ' (' \
               + str(self.in_features) + ' -> ' \
               + str(self.out_features) + ')'

class GraphAttentionLayer(nn.Module):
    """
    Simple GAT layer, similar to https://arxiv.org/abs/1710.10903
    """
    def __init__(self, in_features, out_features, dropout=0.6, alpha=0.2, concat=True):
        super(GraphAttentionLayer, self).__init__()
        self.dropout = dropout
        self.in_features = in_features
        self.out_features = out_features
        self.alpha = alpha
        self.concat = concat

        self.W = nn.Parameter(torch.empty(size=(in_features, out_features)))
        nn.init.xavier_uniform_(self.W.data, gain=1.414)
        self.a = nn.Parameter(torch.empty(size=(2*out_features, 1)))
        nn.init.xavier_uniform_(self.a.data, gain=1.414)

        self.leakyrelu = nn.LeakyReLU(self.alpha)

    def forward(self, h, adj):
        #(batch,node,out_feature)
        Wh = torch.matmul(h, self.W)
        #(batch,node,node,2*out_features)
        a_input = self._prepare_attentional_mechanism_input(Wh)
        #(batch,node,node,1)
        B=adj.size()[0]
        N=adj.size()[1]
        e = self.leakyrelu(torch.matmul(a_input, self.a).squeeze(2))
        e=e.view(B,N,N)
        zero_vec = -9e15*torch.ones_like(e)
        attention = torch.where(adj > 0, e, zero_vec)
        attention = F.softmax(attention, dim=1)
        attention = F.dropout(attention, self.dropout, training=self.training)
        h_prime = torch.matmul(attention, Wh)

        if self.concat:
            return F.elu(h_prime)
        else:
            return h_prime
    
    def _prepare_attentional_mechanism_input(self, Wh):
        B=Wh.size()[0]
        N = Wh.size()[1] # number of nodes
        D=Wh.size()[2]
        # Below, two matrices are created that contain embeddings in their rows in different orders.
        # (e stands for embedding)
        # These are the rows of the first matrix (Wh_repeated_in_chunks): 
        # e1, e1, ..., e1,            e2, e2, ..., e2,            ..., eN, eN, ..., eN
        # '-------------' -> N times  '-------------' -> N times       '-------------' -> N times
        # 
        # These are the rows of the second matrix (Wh_repeated_alternating): 
        # e1, e2, ..., eN, e1, e2, ..., eN, ..., e1, e2, ..., eN 
        # '----------------------------------------------------' -> N times
        # 
        #(batch,N*N,out_features)
        Wh_repeated_in_chunks = Wh.repeat_interleave(N, dim=1)
        #(batch,N*N,out_features)
        Wh_repeated_alternating = Wh.repeat(1,N, 1)
        # Wh_repeated_in_chunks.shape == Wh_repeated_alternating.shape == (N * N, out_features)

        # The all_combination_matrix, created below, will look like this (|| denotes concatenation):
        # e1 || e1
        # e1 || e2
        # e1 || e3
        # ...
        # e1 || eN
        # e2 || e1
        # e2 || e2
        # e2 || e3
        # ...
        # e2 || eN
        # ...
        # eN || e1
        # eN || e2
        # eN || e3
        # ...
        # eN || eN
        #(batch,N*N,2*out_features)
        all_combinations_matrix = torch.cat([Wh_repeated_in_chunks, Wh_repeated_alternating], dim=-1)
        # all_combinations_matrix.shape == (N * N, 2 * out_features)

        return all_combinations_matrix.view(-1,N, N, 2 * self.out_features)

    def __repr__(self):
        return self.__class__.__name__ + ' (' + str(self.in_features) + ' -> ' + str(self.out_features) + ')'

class Model(nn.Module):
    def __init__(self,device,weight, vocab_size,
                bert_servant=None,
                bert_path=None,
                 bert_batch_size=32,
                 max_l=256,mlp_d=512, num_of_class=3, 
                 drop_r=0.5,kernal_size=10 ,node_size=100,
                 max_evi_node_l=40,activation_type='relu',
                 embedding_dim=300,
                 args=None):
        super(Model, self).__init__()
        self.device=device
        self.bert_dim=1024
        self.max_l = max_l
        self.dropout_layer = nn.Dropout(drop_r)
        self.bert_path=bert_path
        self.bert_config = XLNetConfig.from_pretrained(self.bert_path, num_labels=3)
        # Prepare model
        self.bert_model = XLNetForFEVER.from_pretrained(self.bert_path,args,config=self.bert_config)
        # self.max_claim_node_l=max_claim_node_l
        self.max_evi_node_l=max_evi_node_l
        self.node_size=node_size
        self.glove_embd_layer = Embedding(vocab_size, embedding_dim,
                                          weight=weight, padding_index=0)
        self.lstm = nn.LSTM(input_size=300, hidden_size=300,
                              num_layers=1, bidirectional=True, batch_first=True)
        # self.esim_layer = ESIM()
        self.node_proj=nn.Linear(2*embedding_dim,self.node_size)
        self.lstm_node_proj=nn.Linear(600,self.node_size)
        self.temp_node_proj=nn.Linear(50*300,self.node_size)
        #bert
        print("*"*20,"initialize the pretrained model")
        self.bert_servant = bert_servant
        self.kernal_size=kernal_size

        self.mlp = nn.Linear(self.bert_dim, mlp_d)
        self.sm = nn.Linear(mlp_d, num_of_class)

        self.final_mlp = nn.Linear(4*self.node_size, self.node_size)
        self.final_sm = nn.Linear(self.node_size, num_of_class)

    
        self.logits_proj = nn.Linear(mlp_d, num_of_class)
        self.graph_logits_proj = nn.Linear(self.node_size,16)

        if activation_type == 'relu':
            activation = nn.ReLU()
        elif activation_type == 'tanh':
            activation = nn.Tanh()
        else:
            raise ValueError("Not a valid activation!")

        #GCNs
        # self.graph_node_proj = nn.Linear(self.bert_dim,self.node_size)
        # self.GCN_W=[]
        # self.GCN_W_self=[]
        # for i in range(2):
        #     self.GCN_W.append(nn.Linear(self.node_size, self.node_size) )
        #     self.GCN_W_self.append( nn.Linear(self.node_size, self.node_size) )
        # self.GCN_W=nn.ModuleList(self.GCN_W)
        # self.GCN_W_self=nn.ModuleList(self.GCN_W_self)
        # self.classifier = nn.Sequential(*[self.mlp, activation, self.sm])
        # self.final_classifier = nn.Sequential(*[self.final_mlp, activation, self.final_sm])

        # self.gcn = GraphConvolution(600, self.node_size)
        # self.gat=GraphAttentionLayer(600,self.node_size)

    def display(self, exclude=None):
        total_p_size = 0
        if exclude is None:
            exclude = {'glove'}

        for name, param in self.named_parameters():
            if param.requires_grad:
                print(name, param.data.size())

                exclude_this = False
                for exclude_name in exclude:
                    if exclude_name in str(name):
                        exclude_this = True

                if exclude_this:
                    continue

                nn = 1
                for s in list(param.size()):
                    nn = nn * s
                total_p_size += nn

        print('Total Size:', total_p_size)

    def raw_input_to_esim_input(self, s_tokens):
        s_tokens = torch_util.length_truncate(s_tokens, 128)
        s1_glove_embd = self.glove_embd_layer(s_tokens)
        s1_mask, s1_len = torch_util.get_length_and_mask(s_tokens)

        return s1_glove_embd, s1_len


    def forward(self, batch):
        # #tokens
        # claim_glove_tokens=batch['claim_glove_tokens']['tokens'].to(self.device)
        # evidence_glove_tokens=batch['evidence_glove_tokens']['tokens'].to(self.device)
        # claim_elmo_tokens=batch['claim_glove_tokens']['elmo_chars'].to(self.device)
        # evidence_elmo_tokens=batch['evidence_glove_tokens']['elmo_chars'].to(self.device)
        # #embed
        # claim_glove_embd, claim_elmo_embd, claim_len = self.raw_input_to_esim_input(claim_glove_tokens, claim_elmo_tokens)
        # evidence_glove_embd, evidence_elmo_embd, evidence_len = self.raw_input_to_esim_input(evidence_glove_tokens, evidence_elmo_tokens)
        # #concat
        #  = torch.cat((claim_glove_embd, claim_elmo_embd), dim=2)
        # claim_embed
        # s2_layer1_in = torch.cat((evidence_glove_embd, evidence_elmo_embd), dim=2)
        # #esim
        # esim_out = self.esim_layer(s1_layer1_in, s1_layer2_in, claim_len,
        #                            s2_layer1_in, s2_layer2_in, evidence_len)        
        # return esim_out
        token_ids= batch['token_ids'].to(self.device)
        input_mask=batch['input_mask'].to(self.device)
        token_type_ids=batch['token_type_ids'].to(self.device)
        #evidence
        #(batch,node,seq)
        evidence_glove_tokens=batch['evidence_node_tokens']['tokens'].to(self.device)
        evidence_node_mask=batch['evidence_node_mask'].to(self.device)
        #(batch,node,node)
        evidence_edge_matrix=batch['evidence_edge_matrix'].to(self.device)
        #only xlnert context
        # last_hidden_state,context= self.bert_servant.run_bert(token_ids,input_mask,token_type_ids)
        # return self.final_classifier(graph_context)
        # # return self.classifier(context)
        evidence_edge_matrix=evidence_edge_matrix.float()
        batch_size=evidence_glove_tokens.shape[0]
        node_size=evidence_glove_tokens.shape[1]
        seq_size=evidence_glove_tokens.shape[2]
        #(batch*node,seq)
        evidence_glove_tokens=evidence_glove_tokens.view(-1,seq_size)
        #(batch*node,seq,dim)
        evidence_glove_embed,evidence_len=self.raw_input_to_esim_input(evidence_glove_tokens)
        evidence_glove_embed = self.dropout_layer(evidence_glove_embed)
        evidence_glove_embed=evidence_glove_embed.mean(dim=1)
        #(batch,node,dim)
        evidence_glove_embed=evidence_glove_embed.view(batch_size,node_size,-1)
        # (batch*node,seq,600)
        # evidence_bilstm_out = torch_util.auto_rnn(self.lstm, evidence_glove_embed, evidence_len)
        # evidence_node_embed=evidence_bilstm_out.mean(dim=1)
        #(batch,node,300)
        # evi_nodes_emb=self.lstm_node_proj(evidence_node_embed)
        # evidence_node_embed=evi_nodes_emb.view(batch_size,-1)
        #attention
        # evi_nodes_emb=evidence_node_embed.view(batch_size,node_size,-1)
        # evi_nodes_emb=self.gat(evi_nodes_emb,evidence_edge_matrix)
        # graph_context=self.graph_node_proj(context)
        # print(evi_nodes_emb)
        # assert 1==2
        # evidence_node_embed=evidence_glove_embed.view(batch_size,-1)
        # evidence_node_embed=self.temp_node_proj(evidence_node_embed)
        # atte_score=torch.einsum("ab,acb->ac",graph_context,evi_nodes_emb)
        # atte_score = atte_score.masked_fill(evidence_node_mask == 0, -1e9)
        # atten_weight=torch.softmax(atte_score,dim=-1)
        # evidence_node_embed=torch.einsum("ab,abc->ac",atten_weight,evi_nodes_emb)
        # final_rep=torch.cat([graph_context, evidence_node_embed,
        #                       torch.abs(graph_context - evidence_node_embed),
        #                       graph_context * evidence_node_embed], dim=-1)
        # return self.final_classifier(final_rep)
        # return self.classifier(finla_logits)
        return self.bert_model(input_ids=token_ids,
                               token_type_ids=token_type_ids,
                               attention_mask=input_mask,
                               node_embed=evidence_glove_embed,
                               node_mask=evidence_node_mask,
                               adj_mask=evidence_edge_matrix)


def get_sampled_data(tokenized_data_file, additional_data_file):
    # This is for sampling training data.
    sampled_d_list = sample_v1_1(tokenized_data_file, additional_data_file, tokenized=True)
    return sampled_d_list


def get_actual_data(tokenized_data_file, additional_data_file):
    # This is for get actual data:.
    actual_d_list = select_sent_for_eval(tokenized_data_file, additional_data_file, tokenized=True)
    return actual_d_list

def get_test_data(tokenized_data_file, additional_data_file):
    # This is for get actual data:.
    test_d_list = select_sent_for_test(tokenized_data_file, additional_data_file, tokenized=True)
    return test_d_list


def full_eval_model(model, data_iter, criterion, dev_data_list,device):
    # SUPPORTS < (-.-) > 0
    # REFUTES < (-.-) > 1
    # NOT ENOUGH INFO < (-.-) > 2

    id2label = {
        0: "SUPPORTS",
        1: "REFUTES",
        2: "NOT ENOUGH INFO"
    }

    print("Evaluating ...")
    model.eval()
    n_correct = loss = 0
    totoal_size = 0

    y_pred_list = []
    y_true_list = []
    y_id_list = []

    with torch.no_grad():  # Important fixing.

        for batch_idx, batch in enumerate(data_iter):
            out = model(batch)
            #y = batch['label']
            y = batch['label'].to(device)
            y_id_list.extend(list(batch['pid']))

            n_correct += (torch.max(out, 1)[1].view(y.size()) == y).sum().item()

            y_pred_list.extend(torch.max(out, 1)[1].view(y.size()).tolist())
            y_true_list.extend(y.tolist())

            loss += criterion(out, y).item() * y.size(0)
            totoal_size += y.size(0)

        assert len(y_id_list) == len(dev_data_list)
        assert len(y_pred_list) == len(dev_data_list)
        assert len(y_true_list) == len(dev_data_list)

        for i in range(len(dev_data_list)):
            assert str(y_id_list[i]) == str(dev_data_list[i]['id'])
            # Matching id

            dev_data_list[i]['predicted_label'] = id2label[y_pred_list[i]]
            # Reset neural set
            if len(dev_data_list[i]['predicted_sentids']) == 0:
                dev_data_list[i]['predicted_label'] = "NOT ENOUGH INFO"

                # dev_data_list[i]['predicted_evidence'] = convert_evidence2scoring_format(dev_data_list[i]['predicted_sentids'])

        print('n_correct:', n_correct)
        print('total_size:', totoal_size)

        eval_mode = {'check_sent_id_correct': True, 'standard': True}
        strict_score, acc_score, pr, rec, f1 = c_scorer.fever_score(dev_data_list, dev_data_list, mode=eval_mode,verbose=False)
        print("Fever Score(Strict/Acc./Precision/Recall/F1):", strict_score, acc_score, pr, rec, f1)

        avg_acc = 100. * n_correct / totoal_size
        avg_loss = loss / totoal_size

    return strict_score, avg_loss

def hidden_eval(model, data_iter, dev_data_list, with_logits=False, with_probs=False):
    # SUPPORTS < (-.-) > 0
    # REFUTES < (-.-) > 1
    # NOT ENOUGH INFO < (-.-) > 2
    id2label = {
        0: "SUPPORTS",
        1: "REFUTES",
        2: "NOT ENOUGH INFO"
    }
    logger.info("id2label:{}".format(id2label))
    print("*"*20,"Evaluating ...")
    with torch.no_grad():
        model.eval()
        totoal_size = 0

        y_pred_list = []
        y_id_list = []
        y_logits_list = []
        y_probs_list = []

        print("*"*20,"running the model for batch sample:")
        for batch_idx, batch in enumerate(tqdm(data_iter)):
            out = model(batch)
            y_id_list.extend(list(batch['pid']))

            y_pred_list.extend(torch.max(out, 1)[1].view(out.size(0)).tolist())

            if with_logits:
                y_logits_list.extend(out.tolist())

            if with_probs:
                y_probs_list.extend(F.softmax(out, dim=1).tolist())

            totoal_size += out.size(0)

        assert len(y_id_list) == len(dev_data_list)
        assert len(y_pred_list) == len(dev_data_list)

        for i in range(len(dev_data_list)):
            assert str(y_id_list[i]) == str(dev_data_list[i]['id'])

            # Matching id
            dev_data_list[i]['predicted_label'] = id2label[y_pred_list[i]]
            if with_logits:
                dev_data_list[i]['logits'] = y_logits_list[i]

            if with_probs:
                dev_data_list[i]['probs'] = y_probs_list[i]

            # Reset neural set
            if len(dev_data_list[i]['predicted_sentids']) == 0:
                dev_data_list[i]['predicted_label'] = "NOT ENOUGH INFO"

        print('total_size:', totoal_size)

    return dev_data_list



def open_ie(upstream_file,mode="train"):
    if mode=="train":
        data_list=get_sampled_data(config.T_FEVER_TRAIN_JSONL, upstream_file)
    elif mode=="shared_task_dev":
        data_list=get_actual_data(config.T_FEVER_DEV_JSONL, upstream_file)
    elif mode=="shared_task_test":
        data_list=get_test_data(config.DATA_ROOT / "fever/shared_task_test.jsonl",upstream_file)
    
    return data_list

def open_ie_for_claim(upstream_file):
    data_list=get_openie_for_claim(upstream_file)
    print("save openie result into{}".format(upstream_file))
    common.save_jsonl(data_list,upstream_file)

def train_fever():
    num_epoch = 3
    seed = 12
    batch_size = 8
    experiment_name = "xlnet_gnn_joint_nli"
    lazy = True
    max_l=256
    max_evi_node_l=20
    # max_claim_node_l=10
    kernal_size=10
    node_size=300
    token_indexers = {
        'tokens': SingleIdTokenIndexer(namespace='tokens'),  # This is the raw tokens
    }
    dev_upstream_file = config.RESULT_PATH / "pipeline_r/2021_03_08_17:40:14_r/nli_openie_shared_task_dev.jsonl"
    train_upstream_file = config.RESULT_PATH / "pipeline_r/2021_03_08_14:35:16_r/nli_openie_train.jsonl"
    #bert pretrained
    bert_src=config.DEP_PATH / "xlnet-large-cased"
    bert_servant = XlnetServant(bert_type_name=bert_src)

    dev_fever_data_reader = XlnetGCNGloveReader(bert_servant, lazy=lazy, max_l=max_l,max_evi_node_l=max_evi_node_l,token_indexers=token_indexers)
    train_fever_data_reader = XlnetGCNGloveReader(bert_servant, lazy=lazy, max_l=max_l,max_evi_node_l=max_evi_node_l,token_indexers=token_indexers)
    complete_upstream_dev_data=load_data(dev_upstream_file)
    print("Dev size:", len(complete_upstream_dev_data))
    dev_instances = dev_fever_data_reader.read(complete_upstream_dev_data)
    dev_actual_list = common.load_jsonl(config.DATA_ROOT / "fever/shared_task_dev.jsonl")
    # Load Vocabulary
    biterator = BasicIterator(batch_size=batch_size)
    dev_biterator = BasicIterator(batch_size=batch_size)

    # unk_token_num = {'tokens': 2600}  # work around for initiating vocabulary.
    # vocab = ExVocabulary(unk_token_num=unk_token_num)
    vocab, weight_dict = load_vocab_embeddings(config.DATA_ROOT / "vocab_cache" / "nli_basic")
    vocab.add_token_to_namespace('SUPPORTS', namespace='labels')
    vocab.add_token_to_namespace('REFUTES', namespace='labels')
    vocab.add_token_to_namespace('NOT ENOUGH INFO', namespace='labels')
    print(vocab.get_token_to_index_vocabulary('labels'))
    print(vocab)

    biterator.index_with(vocab)
    dev_biterator.index_with(vocab)

    # Build Model
    device="cuda"
    parser = argparse.ArgumentParser()
    args = parser.parse_args()
    model = Model(device=device,bert_servant=bert_servant, bert_batch_size=batch_size,kernal_size=kernal_size,
                  bert_path=bert_src,
                  weight=weight_dict['glove.840B.300d'],
                  vocab_size=vocab.get_vocab_size('tokens'),
                  max_l=max_l,max_evi_node_l=max_evi_node_l,node_size=node_size,num_of_class=3,args=args)
    model.display()
    model.to(device)
    # bert_servant.bert_model.to(device)
    model = nn.DataParallel(model)
    # Create Log File
    file_path_prefix, date = save_tool.gen_file_prefix(f"{experiment_name}")
    # Save the source code.
    script_name = os.path.basename(__file__)
    with open(os.path.join(file_path_prefix, script_name), 'w') as out_f, open(__file__, 'r') as it:
        out_f.write(it.read())
        out_f.flush()
    # Save source code end.

    best_dev = -1
    iteration = 0

    start_lr = 1e-5
    optimizer = optim.AdamW(filter(lambda p: p.requires_grad, model.parameters()), lr=start_lr,eps=1e-6)
    criterion = nn.CrossEntropyLoss()
    # for i,p in enumerate(model.named_parameters()):
    #     if p[1].requires_grad==False:
    #         print(i,p[0])
    for i_epoch in range(num_epoch):
        print("Resampling...")
        # Resampling
        complete_upstream_train_data=load_data(train_upstream_file)
        sampled_train_instances = train_fever_data_reader.read(complete_upstream_train_data)
        train_iter = biterator(sampled_train_instances, shuffle=True, num_epochs=1)
        for i, batch in tqdm(enumerate(train_iter)):
            model.train()
            out = model(batch)
            y = batch['label'].to(device)
            loss = criterion(out, y)
            # No decay
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
            iteration += 1

            if iteration%10==0:
                print('epoch: {}, batch: {},loss: {}'.format(i_epoch + 1, i + 1,loss.data))

            if i_epoch <= 2:
                mod = 3000
            else:
                mod = 1000

            if iteration % mod == 0:
                eval_iter = biterator(dev_instances, shuffle=False, num_epochs=1)
                dev_score, dev_loss = full_eval_model(model, eval_iter, criterion, complete_upstream_dev_data,device)

                print(f"Dev:{dev_score}/{dev_loss}")

                need_save = False
                if dev_score > best_dev:
                    best_dev = dev_score
                    need_save = True

                if need_save:
                    save_path = os.path.join(
                        file_path_prefix,
                        f'i({iteration})_epoch({i_epoch})_dev({dev_score})_loss({dev_loss})_seed({seed})'
                    )

                    torch.save(model.state_dict(), save_path)
                



def pipeline_gnn_nli_run(t_org_file, complete_upstream_dev_data, model_path,
                     with_logits=False, with_probs=False, load_from_dict=False):
    batch_size = 16
    lazy = True
    max_l=256
    max_evi_node_l=40
    max_claim_node_l=40
    kernal_size=10
    print("*"*20,"pipeline nli run start:")
    print("Size:", len(complete_upstream_dev_data))

    #bert pretrained
    bert_src=config.DEP_PATH / "xlnet-large-cased"
    bert_servant = XlnetServant(bert_type_name=bert_src)

    dev_fever_data_reader = XlnetGCNReader(bert_servant, lazy=lazy, max_l=max_l,max_claim_node_l=max_claim_node_l,max_evi_node_l=max_evi_node_l)

    complete_upstream_dev_data= append_hidden_label(complete_upstream_dev_data)
    print("*"*20,"read the dev data:")
    dev_instances = dev_fever_data_reader.read(complete_upstream_dev_data)
    print("*"*20,"initialize the BasicIterator:")
    # Load Vocabulary
    biterator = BasicIterator(batch_size=batch_size)

    print("*"*20,"load the vocab embedding:")
    unk_token_num = {'tokens': 2600}  # work around for initiating vocabulary.
    vocab = ExVocabulary(unk_token_num=unk_token_num)
    vocab.change_token_with_index_to_namespace('hidden', -2, namespace='labels')
    print(vocab.get_token_to_index_vocabulary('labels'))
    print(vocab)

    biterator.index_with(vocab)
    # Build Model
    print("*"*20,"build the model")
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu", index=0)
    device_num = -1 if device.type == 'cpu' else 0

    model = Model(bert_servant=bert_servant, bert_batch_size=batch_size,
                    max_l=max_l, num_of_class=3)

    if not load_from_dict:
        model.load_state_dict(torch.load(model_path))
    else:
        load_ema_to_model(model, model_path)

    print("*"*20,"model display:")
    model.display()
    model.to(device)
    print("*"*20,"build the BasicIterator:")
    eval_iter = biterator(dev_instances, shuffle=False, num_epochs=1)
    print("*"*20,"start evaluate the hidden")
    complete_upstream_dev_data = hidden_eval(model, eval_iter, complete_upstream_dev_data,
                                             with_logits=with_logits,
                                             with_probs=with_probs)

    return complete_upstream_dev_data



def pipeline_concat_nli_run(t_org_file, complete_upstream_dev_data, model_path,
                    with_logits=False, with_probs=False, load_from_dict=False):
    batch_size = 16
    lazy = True
    max_l=256
    print("*"*20,"pipeline nli run start:")
    print("Size:", len(complete_upstream_dev_data))

    #bert pretrained
    bert_src=config.DEP_PATH / "roberta"
    bert_servant = RobertaServant(bert_type_name=bert_src)
    dev_fever_data_reader = RobertaNLIReader(bert_servant, lazy=lazy, max_l=max_l)

    complete_upstream_dev_data= append_hidden_label(complete_upstream_dev_data)
    print("*"*20,"read the dev data:")
    dev_instances = dev_fever_data_reader.read(complete_upstream_dev_data)
    print("*"*20,"initialize the BasicIterator:")
    # Load Vocabulary
    biterator = BasicIterator(batch_size=batch_size)

    print("*"*20,"load the vocab embedding:")
    unk_token_num = {'tokens': 2600}  # work around for initiating vocabulary.
    vocab = ExVocabulary(unk_token_num=unk_token_num)
    vocab.change_token_with_index_to_namespace('hidden', -2, namespace='labels')
    print(vocab.get_token_to_index_vocabulary('labels'))
    print(vocab)

    biterator.index_with(vocab)
    # Build Model
    print("*"*20,"build the model")
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu", index=0)
    device_num = -1 if device.type == 'cpu' else 0

    model = Model(bert_servant=bert_servant, bert_batch_size=batch_size,
                    max_l=max_l, num_of_class=3)

    if not load_from_dict:
        model.load_state_dict(torch.load(model_path))
    else:
        load_ema_to_model(model, model_path)

    print("*"*20,"model display:")
    model.display()
    model.to(device)
    print("*"*20,"build the BasicIterator:")
    eval_iter = biterator(dev_instances, shuffle=False, num_epochs=1)
    print("*"*20,"start evaluate the hidden")
    complete_upstream_dev_data = hidden_eval(model, eval_iter, complete_upstream_dev_data,
                                             with_logits=with_logits,
                                             with_probs=with_probs)

    return complete_upstream_dev_data


def append_hidden_label(d_list):
    for item in d_list:
        item['label'] = 'hidden'
    return d_list


if __name__ == "__main__":
    #upstream_file=config.RESULT_PATH / "pipeline_r" / "2021_03_08_14:35:16_r" / "nli_openie_train.jsonl"
    #open_ie_for_claim(upstream_file)
    train_fever()

