# -*- coding: utf-8 -*-
"""
@Time ： 2024/3/29 8:50
@Auth ： fcq
@File ：train_target_invariant_label.py
@IDE ：PyCharm
@Motto：ABC(Always Be Coding)
"""
import argparse
import pickle

import numpy as np
import torch
from models.BERT_SPC import BERT_SPC
from models.BERT_JOINT import BERT_JOINT
from torch import optim
import os
import json
from time import strftime, localtime
from torch.utils.data import RandomSampler,DataLoader
from data_utils import Tokenizer4Bert, ZSSDDataset
from utils.criterion import TraditionCriterion
from transformers import BertModel
from sklearn.metrics import f1_score, accuracy_score
import warnings
warnings.filterwarnings("ignore")
gpu_id = 0
torch.cuda.set_device(gpu_id)
os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu_id)


def init_args():
    parser = argparse.ArgumentParser()

    parser.add_argument('--model_name', default='bert-sep', type=str, required=False)
    parser.add_argument('--type', default=0, help='2 for all,0 for zero shot ,1 for few shot', type=str, required=False)
    parser.add_argument('--dataset', default='trainset', type=str, required=False)
    parser.add_argument('--output_par_dir', default='bert-sep', type=str)
    parser.add_argument('--polarities', default=["pro", "con", "neutral"], nargs='+',
                        help="if just two polarity switch to ['positive', 'negtive']", required=False)
    parser.add_argument('--optimizer', default='adam', type=str, required=False)
    parser.add_argument('--initializer', default='xavier_uniform_', type=str, required=False)
    parser.add_argument('--lr', default=5e-6, type=float, help='try 5e-5, 2e-5, 1e-3 for others', required=False)
    parser.add_argument('--dropout', default=0.1, type=float, required=False)
    parser.add_argument('--l2reg', default=0.001, type=float, required=False)
    parser.add_argument('--log_step', default=10, type=int, required=False)
    parser.add_argument('--log_path', default="./log", type=str, required=False)
    parser.add_argument('--embed_dim', default=300, type=int, required=False)
    parser.add_argument('--hidden_dim', default=128, type=int, required=False, help="lstm encoder hidden size")
    parser.add_argument('--feature_dim', default=2 * 128, type=int, required=False,
                        help="feature dim after encoder depends on encoder")
    parser.add_argument('--output_dim', default=64, type=int, required=False)
    parser.add_argument('--relation_dim', default=100, type=int, required=False)
    parser.add_argument('--bert_dim', default=768, type=int, required=False)
    parser.add_argument('--pretrained_bert_name', default='../../bot-detection/BERT_PretrainModel/bert-base-uncased',
                        type=str, required=False)
    parser.add_argument('--max_seq_len', default=200, type=int, required=False)
    parser.add_argument('--train_dir', default='./VAST/vast_train.csv.json', type=str, required=False)
    parser.add_argument('--mask_dir', default='./augment_data/mask/vast_mask.json', type=str, required=False)
    parser.add_argument('--sentence_dir', default='./augment_data/sentence/lda_vast.json', type=str, required=False)
    parser.add_argument('--alpha', default=0.8, type=float, required=False)
    parser.add_argument('--beta', default=1.2, type=float, required=False)

    parser.add_argument('--device', default='cuda:0', type=str, help='e.g. cuda:0', required=False)
    parser.add_argument('--seed', default=0, type=int, help='set seed for reproducibility')

    parser.add_argument("--batch_size", default=16, type=int, required=False)
    parser.add_argument("--eval_batch_size", default=16, type=int, required=False)
    parser.add_argument("--epochs", default=15, type=int, required=False)
    parser.add_argument("--eval_steps", default=50, type=int, required=False)
    opt = parser.parse_args()

    opt.n_gpus = torch.cuda.device_count()
    opt.model_class = BERT_JOINT
    opt.input_features = ['concat_bert_indices', 'concat_segments_indices','attention_mask']
    opt.optim_class = optim.Adam
    opt.output_dir = os.path.join(opt.output_par_dir, opt.model_name, opt.dataset,
                                  strftime("%Y-%m-%d %H-%M-%S", localtime()))
    opt.num_labels = len(opt.polarities)
    if not os.path.exists(opt.output_dir):
        os.makedirs(opt.output_dir)

    return opt


class Instructor(object):
    def __init__(self, opt):
        self.opt = opt
        tokenizer = Tokenizer4Bert(opt.max_seq_len, opt.pretrained_bert_name)
        bert_model = BertModel.from_pretrained(opt.pretrained_bert_name)
        self.model = opt.model_class(opt, bert_model).to(opt.device)
        print("using model: ", opt.model_name)
        print("running dataset: ", opt.dataset)
        print("output_dir: ", opt.output_dir)
        train_file_name = 'saved_dataset/vast_train1.dat'
        mask_file_name = './vast_mask1.dat'
        sentence_file_name = './vast_sentence1.dat'
        try:
            self.trainset = pickle.load(open(train_file_name, 'rb'))
            self.maskset = pickle.load(open(mask_file_name, 'rb'))
            self.sentenceset = pickle.load(open(sentence_file_name, 'rb'))
        except:
            self.trainset = ZSSDDataset(data_dir=self.opt.train_dir, tokenizer=tokenizer, opt=self.opt,
                                        data_type='train',bert=bert_model)

            self.maskset = ZSSDDataset(data_dir=self.opt.mask_dir, tokenizer=tokenizer, opt=self.opt,data_type='train')
            self.sentenceset = ZSSDDataset(data_dir=self.opt.sentence_dir, tokenizer=tokenizer, opt=self.opt,data_type='train')
            pickle.dump(self.trainset, open(train_file_name, 'wb'))
            pickle.dump(self.maskset, open(mask_file_name, 'wb'))
            pickle.dump(self.sentenceset, open(sentence_file_name, 'wb'))

        self.criterion = TraditionCriterion(opt)
        params = ([p for p in self.model.parameters()])
        self.optimizer = self.opt.optim_class(params, lr=self.opt.lr)

    def write_new_label(self):
        mask_data_loader = DataLoader(dataset=self.maskset, batch_size=self.opt.batch_size, shuffle=False)
        sentence_data_loader = DataLoader(dataset=self.sentenceset, batch_size=self.opt.batch_size, shuffle=False)
        orig_data_loader = DataLoader(dataset=self.trainset, batch_size=self.opt.batch_size, shuffle=False)

        w_path = './VAST/'+'lda_vast_train.json'
        w_fp = open(w_path, 'w', encoding='utf-8')
        n_cross = 0
        n_not = 0
        cross_all_list = []

        # switch model to evaluation mode
        self.model.eval()
        with torch.no_grad():
            for mt_batch, st_batch, o_batch in zip(mask_data_loader, sentence_data_loader, orig_data_loader):
                orig_texts = o_batch['text']
                orig_targets = o_batch['target']
                orig_stances = o_batch['polarity'].cpu().detach().numpy()

                mt_inputs = [mt_batch[col].to(self.opt.device) for col in self.opt.input_features]
                mt_targets = mt_batch['polarity'].to(self.opt.device)
                mt_outputs,_ = self.model(mt_inputs)

                st_inputs = [st_batch[col].to(self.opt.device) for col in self.opt.input_features]
                st_targets = st_batch['polarity'].to(self.opt.device)
                st_outputs,_ = self.model(st_inputs)
                st_texts = st_batch['text']

                m_predict = torch.argmax(mt_outputs, -1)
                s_predict = torch.argmax(st_outputs, -1)
                for mp, sp, mt, text, target, stance, mask_text in zip(m_predict, s_predict, mt_targets, orig_texts,
                                                                       orig_targets, orig_stances, st_texts):

                    if mp == sp:
                        cross_label = 1
                        n_cross += 1
                    else:
                        cross_label = 0
                        n_not += 1
                    cross_all_list.append({
                        'text':text,
                        'target':target,
                        'label':int(stance),
                        'cross_label_new':cross_label
                    })
        json.dump(cross_all_list, w_fp,ensure_ascii=False)
        w_fp.close()
        print(self.opt.dataset)
        # print(self.opt.method)
        print('cross rate:', n_cross/(n_cross+n_not))
        print('count:', n_cross, n_not)


    def run_tradition(self):
        best_acc, best_f1 =self.train_tradition()
        state_dict_dir = opt.output_dir+ "/state_dict"
        # state_dict_dir = 'bert-sep/bert-sep/trainset/2024-03-29 23-34-58' + '/state_dict'
        print("\n\nReload the best model with best acc {} from path {}\n\n".format(best_acc, state_dict_dir))
        ckpt = torch.load(os.path.join(state_dict_dir, "best_acc_model.bin"))
        self.model.load_state_dict(ckpt)
        self.write_new_label()

    # def write_new_label(self):

    def train_tradition(self):
        sampler = RandomSampler(self.trainset)
        train_loader = DataLoader(self.trainset, batch_size=self.opt.batch_size, sampler=sampler)
        train_loader_prototype = DataLoader(self.trainset, batch_size=self.opt.batch_size, sampler=sampler)
        print("Train loader length: {}".format(len(train_loader)))
        optimizer = self.optimizer
        best_acc = 0
        best_f1 = 0
        cnt = 0

        for i_epoch in range(self.opt.epochs):
            print('>' * 20, 'epoch:{}'.format(i_epoch), '<'*20)
            n_correct, n_total, loss_total = 0, 0, 0
            pred = []
            gt = []
            self.model.train()
            for i_batch, batch in enumerate(train_loader):
                optimizer.zero_grad()

                input_features = [batch[feat_name].to(self.opt.device) for feat_name in self.opt.input_features]
                index = batch['index']
                true_stance = batch['polarity']
                true_stance = true_stance.to(self.opt.device)

                logits,_ = self.model(input_features)
                loss = self.criterion(logits, true_stance)

                loss.backward(retain_graph=True)
                optimizer.step()
                pred+=torch.argmax(logits, -1).detach().cpu().tolist()
                gt+=true_stance.detach().cpu().tolist()
                n_correct += (torch.argmax(logits, -1) == true_stance).sum().item()
                n_total += len(logits)
                loss_total += loss.item() * len(logits)
                if cnt % self.opt.log_step == 0:
                    train_acc = n_correct / n_total
                    train_loss = loss_total / n_total
                    print("Train step: {} acc:{} loss: {}".format(cnt, train_acc, train_loss))
                cnt += 1

            train_f1 = f1_score(y_true=np.array(gt), y_pred=np.array(pred), average='macro')
            train_acc = accuracy_score(y_true=np.array(gt), y_pred=np.array(pred))


            if train_acc > best_acc:
                print('Better ACC! Saving model!')
                best_acc = train_acc
                print("Saving model of best acc: {}".format(best_acc))
                state_dict_dir = opt.output_dir + "/state_dict"
                if not os.path.exists(state_dict_dir):
                    os.makedirs(state_dict_dir)
                torch.save(self.model.state_dict(), os.path.join(state_dict_dir, "best_acc_model.bin"))
            if train_f1 > best_f1:
                print('Better F1! Saving model!')
                best_f1 = train_f1
                print("Saving model of best f1: {}".format(best_f1))
                state_dict_dir = opt.output_dir + "/state_dict"
                if not os.path.exists(state_dict_dir):
                    os.makedirs(state_dict_dir)
                torch.save(self.model.state_dict(), os.path.join(state_dict_dir, "best_f1_model.bin"))

        print("Training finished.")
        return best_acc, best_f1


if __name__ == "__main__":
    opt = init_args()
    ins = Instructor(opt)
    ins.run_tradition()
