# -*- coding: utf-8 -*-
"""
@Time ： 2024/4/1 9:04
@Auth ： fcq
@File ：train_MoE_step1.py
@IDE ：PyCharm
@Motto：ABC(Always Be Coding)
"""
import argparse
import pickle

import numpy as np
import torch
from models.BERT_MoE_Model import MoEModel
from torch import optim
import os
import json
from time import strftime, localtime
from torch.utils.data import RandomSampler, DataLoader
from data_utils import Tokenizer4Bert, ZSSDDataset
from utils.criterion import TraditionCriterion
from transformers import BertModel, BertConfig
from sklearn.metrics import f1_score, accuracy_score, classification_report
import warnings
import torch.nn.functional as F
import torch.nn as nn
warnings.filterwarnings("ignore")
gpu_id = 0
torch.cuda.set_device(gpu_id)
os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu_id)


def init_args():
    parser = argparse.ArgumentParser()

    parser.add_argument('--model_name', default='bert-MoE-step1', type=str, required=False)
    parser.add_argument('--type', default=0, help='2 for all,0 for zero shot ,1 for few shot', type=str, required=False)
    parser.add_argument('--dataset', default='zeroshot', type=str, required=False)
    parser.add_argument('--output_par_dir', default='bert-MoE', type=str)
    parser.add_argument('--polarities', default=["pro", "con", "neutral"], nargs='+',
                        help="if just two polarity switch to ['positive', 'negtive']", required=False)
    parser.add_argument('--optimizer', default='adam', type=str, required=False)
    parser.add_argument('--initializer', default='xavier_uniform_', type=str, required=False)
    parser.add_argument('--lr', default=5e-6, type=float, help='try 5e-5, 2e-5, 1e-3 for others', required=False)
    parser.add_argument('--dropout', default=0.1, type=float, required=False)
    parser.add_argument('--l2reg', default=0.001, type=float, required=False)
    parser.add_argument('--log_step', default=10, type=int, required=False)
    parser.add_argument('--log_path', default="./log", type=str, required=False)
    parser.add_argument('--embed_dim', default=300, type=int, required=False)
    parser.add_argument('--hidden_dim', default=128, type=int, required=False, help="lstm encoder hidden size")
    parser.add_argument('--feature_dim', default=2 * 128, type=int, required=False,
                        help="feature dim after encoder depends on encoder")
    parser.add_argument('--output_dim', default=64, type=int, required=False)
    parser.add_argument('--relation_dim', default=100, type=int, required=False)
    parser.add_argument('--bert_dim', default=768, type=int, required=False)
    parser.add_argument('--pretrained_bert_name', default='../../bot-detection/BERT_PretrainModel/bert-base-uncased',
                        type=str, required=False)
    parser.add_argument('--max_seq_len', default=200, type=int, required=False)
    parser.add_argument('--train_dir', default='./VAST/vast_train.csv.json', type=str, required=False)
    parser.add_argument('--val_dir', default='./VAST/vast_dev.csv.json', type=str, required=False)
    parser.add_argument('--test_dir', default='./VAST/vast_test.csv.json', type=str, required=False)
    parser.add_argument('--alpha', default=0.8, type=float, required=False)
    parser.add_argument('--beta', default=1.2, type=float, required=False)

    parser.add_argument('--device', default='cuda:0', type=str, help='e.g. cuda:0', required=False)
    parser.add_argument('--seed', default=0, type=int, help='set seed for reproducibility')

    parser.add_argument("--batch_size", default=16, type=int, required=False)
    parser.add_argument("--eval_batch_size", default=16, type=int, required=False)
    parser.add_argument("--epochs", default=15, type=int, required=False)
    parser.add_argument("--eval_steps", default=50, type=int, required=False)
    opt = parser.parse_args()

    opt.n_gpus = torch.cuda.device_count()
    opt.model_class = MoEModel
    opt.input_features = ['concat_bert_indices', 'concat_segments_indices', 'attention_mask', 'gate_output']
    opt.optim_class = optim.Adam
    opt.output_dir = os.path.join(opt.output_par_dir, opt.model_name, opt.dataset,
                                  strftime("%Y-%m-%d %H-%M-%S", localtime()))
    opt.num_labels = len(opt.polarities)
    opt.bert_config = BertConfig.from_pretrained(opt.pretrained_bert_name)
    if not os.path.exists(opt.output_dir):
        os.makedirs(opt.output_dir)

    return opt


class Instructor(object):
    def __init__(self, opt):
        self.opt = opt
        tokenizer = Tokenizer4Bert(opt.max_seq_len, opt.pretrained_bert_name)
        bert_model = BertModel.from_pretrained(opt.pretrained_bert_name)
        self.model = opt.model_class(opt, bert_model).to(opt.device)
        ckpt_cross_label_1 = torch.load(os.path.join('subset_saved_model/bert-sep/cross_label_new_0/2024-04-01 11-23-41/state_dict', "best_acc_model.bin"))
        ckpt_cross_label_0 = torch.load(os.path.join('subset_saved_model/bert-sep/cross_label_new_1/2024-04-01 11-21-07/state_dict', 'best_acc_model.bin'))
        self.model.attn_module1.load_state_dict(ckpt_cross_label_0)
        self.model.attn_module2.load_state_dict(ckpt_cross_label_1)
        print("using model: ", opt.model_name)
        print("running dataset: ", opt.dataset)
        print("output_dir: ", opt.output_dir)
        train_file_name = 'saved_dataset/vast_train1.dat'
        val_file_name = 'saved_dataset/vast_val0.dat'
        test_file_name = 'saved_dataset/vast_test0.dat'
        try:
            self.trainset = pickle.load(open(train_file_name, 'rb'))
            self.valset = pickle.load(open(val_file_name, 'rb'))
            self.testset = pickle.load(open(test_file_name, 'rb'))
        except:
            # self.trainset = ZSSDDataset(data_dir=self.opt.train_dir, tokenizer=tokenizer, opt=self.opt,
            #                             data_type='train', bert=bert_model)

            self.valset = ZSSDDataset(data_dir=self.opt.val_dir, tokenizer=tokenizer, opt=self.opt, data_type='val',
                                      bert=bert_model)
            self.testset = ZSSDDataset(data_dir=self.opt.test_dir, tokenizer=tokenizer, opt=self.opt, data_type='test',
                                       bert=bert_model)
            pickle.dump(self.trainset, open(train_file_name, 'wb'))
            pickle.dump(self.valset, open(val_file_name, 'wb'))
            pickle.dump(self.testset, open(test_file_name, 'wb'))

        self.criterion = nn.CrossEntropyLoss(reduction='none')
        params = ([p for p in self.model.parameters()])
        self.optimizer = self.opt.optim_class(params, lr=self.opt.lr)

    def run_tradition(self):
        # best_acc, best_f1 = self.train_tradition()
        # state_dict_dir = opt.output_dir + "/state_dict"
        state_dict_dir = 'bert-MoE/bert-MoE-step1/zeroshot/2024-04-03 08-24-10' + '/state_dict'
        # print("\n\nReload the best model with best acc {} from path {}\n\n".format(best_acc, state_dict_dir))
        ckpt = torch.load(os.path.join(state_dict_dir, "best_acc_model.bin"))
        self.model.load_state_dict(ckpt)

        acc, f1 = self.evaluation_tradition(self.testset)

        # print("\n\nReload the best model with best f1 {} from path {}\n\n".format(best_f1, state_dict_dir))
        ckpt = torch.load(os.path.join(state_dict_dir, "best_f1_model.bin"))
        self.model.load_state_dict(ckpt)
        acc, f1 = self.evaluation_tradition(self.testset)

        return acc, f1
        # self.write_new_label()

    # def write_new_label(self):

    def train_tradition(self):
        sampler = RandomSampler(self.trainset)
        train_loader = DataLoader(self.trainset, batch_size=self.opt.batch_size, sampler=sampler)
        val_loader = DataLoader(self.valset, batch_size=self.opt.batch_size, sampler=sampler)
        print("Train loader length: {}".format(len(train_loader)))
        optimizer = self.optimizer
        best_acc = 0
        best_f1 = 0
        cnt = 0

        for i_epoch in range(self.opt.epochs):
            print('>' * 20, 'epoch:{}'.format(i_epoch), '<' * 20)
            n_correct, n_total, loss_total = 0, 0, 0
            pred = []
            gt = []
            self.model.train()
            for i_batch, batch in enumerate(train_loader):
                optimizer.zero_grad()

                input_features = [batch[feat_name].to(self.opt.device) for feat_name in self.opt.input_features]
                index = batch['index']
                true_stance = batch['polarity']
                true_stance = true_stance.to(self.opt.device)

                logits_, weight, logits = self.model(input_features)
                logits_reshaped = logits_.permute(0, 2, 1)
                logits_reshaped = logits_reshaped.contiguous().view(logits_reshaped.shape[0]*logits_reshaped.shape[1], 3)
                targets_reshaped = true_stance.unsqueeze(1).repeat(1, logits_.size(2)).view(-1)
                unweighted_loss = self.criterion(logits_reshaped, targets_reshaped)
                unweighted_loss = unweighted_loss.view(logits_.size(0), logits_.size(2))
                weighted_loss = unweighted_loss * weight
                loss = weighted_loss.sum()/weight.shape[0]
                loss.backward()
                optimizer.step()
                # pred+=torch.argmax(logits, -1).detach().cpu().tolist()
                # gt+=true_stance.detach().cpu().tolist()
                n_correct += (torch.argmax(logits, -1) == true_stance).sum().item()
                n_total += len(logits)
                loss_total += loss.item() * len(logits)
                if cnt % self.opt.log_step == 0:
                    train_acc = n_correct / n_total
                    train_loss = loss_total / n_total
                    print("Train step: {} acc:{} loss: {}".format(cnt, train_acc, train_loss))
                cnt += 1

            # train_f1 = f1_score(y_true=np.array(gt), y_pred=np.array(pred), average='macro')
            # train_acc = accuracy_score(y_true=np.array(gt), y_pred=np.array(pred))
            eval_acc, eval_f1 = self.evaluation_tradition(self.valset)

            if eval_acc > best_acc:
                print('Better ACC! Saving model!')
                best_acc = eval_acc
                print("Saving model of best acc: {}".format(best_acc))
                state_dict_dir = opt.output_dir + "/state_dict"
                if not os.path.exists(state_dict_dir):
                    os.makedirs(state_dict_dir)
                torch.save(self.model.state_dict(), os.path.join(state_dict_dir, "best_acc_model.bin"))
            if eval_f1 > best_f1:
                print('Better F1! Saving model!')
                best_f1 = eval_f1
                print("Saving model of best f1: {}".format(best_f1))
                state_dict_dir = opt.output_dir + "/state_dict"
                if not os.path.exists(state_dict_dir):
                    os.makedirs(state_dict_dir)
                torch.save(self.model.state_dict(), os.path.join(state_dict_dir, "best_f1_model.bin"))

        print("Training finished.")
        return best_acc, best_f1

    def evaluation_tradition(self, dataset):
        self.model.eval()
        sampler = RandomSampler(dataset)
        dev_loader = DataLoader(dataset=dataset, batch_size=self.opt.eval_batch_size, sampler=sampler)
        all_labels = []
        all_logits = []
        all_weights = []
        all_cross_labels = []
        eval_loss = 0
        cnt = 0

        for i_batch, batch in enumerate(dev_loader):
            input_features = [batch[feat_name].to(self.opt.device) for feat_name in self.opt.input_features]
            true_stance = batch['polarity']
            true_stance = true_stance.to(self.opt.device)
            with torch.no_grad():
                logits_, weight, logits = self.model(input_features)
                logits_reshaped = logits_.permute(0, 2, 1)
                logits_reshaped = logits_reshaped.contiguous().view(logits_reshaped.shape[0]*logits_reshaped.shape[1], 3)
                targets_reshaped = true_stance.unsqueeze(1).repeat(1, logits_.size(2)).view(-1)
                unweighted_loss = self.criterion(logits_reshaped, targets_reshaped)
                unweighted_loss = unweighted_loss.view(logits_.size(0), logits_.size(2))
                weighted_loss = unweighted_loss * weight
                loss = weighted_loss.sum()/weight.shape[0]

                # loss = self.criterion(logits, true_stance)
                loss = loss.mean().item()
            eval_loss += loss
            labels = true_stance.detach().cpu().numpy()
            logits = logits.detach().cpu().numpy()
            weight = weight.detach().cpu().numpy()
            # true_weight = batch['cross_label_new'].cpu().numpy()
            all_weights.append(weight)
            # all_cross_labels.append(true_weight)
            all_labels.append(labels)
            all_logits.append(logits)
            cnt = cnt + 1
        all_labels = np.concatenate(all_labels, axis=0)
        all_logits = np.concatenate(all_logits, axis=0)
        all_weights = np.concatenate(all_weights, axis=0)
        all_weights = np.mean(all_weights, axis=0)
        # all_cross_labels = np.concatenate(all_cross_labels, axis=0)
        preds = all_logits.argmax(axis=1)
        # pred_weights = all_weights.argmax(axis=1)

        acc = accuracy_score(y_true=all_labels, y_pred=preds)
        f1 = f1_score(all_labels, preds, average='macro')

        # acc_w = accuracy_score(y_true=all_cross_labels, y_pred=pred_weights)
        # f1_w = f1_score(all_cross_labels, pred_weights, average='macro')

        print(classification_report(all_labels, preds, digits=6))
        print("Test Acc: {} F1:{}".format(acc, f1))
        # print("weight Acc: {} F1: {}".format(acc_w, f1_w))
        self.model.train()
        return acc, f1


if __name__ == "__main__":
    opt = init_args()
    ins = Instructor(opt)
    ins.run_tradition()
