# -*- coding: utf-8 -*-
"""
@Time ： 2024/4/20 9:48
@Auth ： fcq
@File ：write_predict_label.py
@IDE ：PyCharm
@Motto：ABC(Always Be Coding)
"""

import argparse
import pickle

import numpy as np
import torch
from models.BERT_SPC import BERT_SPC
from torch import optim
import os
import json
from time import strftime, localtime
from torch.utils.data import RandomSampler, DataLoader
from data_utils import Tokenizer4Bert, ZSSDDataset, ZSSDDataset_split
from utils.criterion import TraditionCriterion
from transformers import BertModel, BertConfig
from sklearn.metrics import f1_score, accuracy_score, classification_report
import warnings

warnings.filterwarnings("ignore")
gpu_id = 0
torch.cuda.set_device(gpu_id)
os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu_id)
idx2stance = {0: 'con', 1: 'pos', 2: 'neutral'}
idx2senti = {0: 'negative', 1: 'positive', 2: "neutral"}

def init_args():
    parser = argparse.ArgumentParser()

    parser.add_argument('--model_name', default='bert-sep', type=str, required=False)
    parser.add_argument('--type', default=0, help='2 for all,0 for zero shot ,1 for few shot', type=str, required=False)
    parser.add_argument('--dataset', default='zeroshot', type=str, required=False)
    parser.add_argument('--output_par_dir', default='bert-sep', type=str)
    parser.add_argument('--polarities', default=["pro", "con", "neutral"], nargs='+',
                        help="if just two polarity switch to ['positive', 'negtive']", required=False)
    parser.add_argument('--optimizer', default='adam', type=str, required=False)
    parser.add_argument('--initializer', default='xavier_uniform_', type=str, required=False)
    parser.add_argument('--lr', default=5e-6, type=float, help='try 5e-5, 2e-5, 1e-3 for others', required=False)
    parser.add_argument('--dropout', default=0.1, type=float, required=False)
    parser.add_argument('--l2reg', default=0.001, type=float, required=False)
    parser.add_argument('--log_step', default=10, type=int, required=False)
    parser.add_argument('--log_path', default="./log", type=str, required=False)
    parser.add_argument('--embed_dim', default=300, type=int, required=False)
    parser.add_argument('--hidden_dim', default=128, type=int, required=False, help="lstm encoder hidden size")
    parser.add_argument('--feature_dim', default=2 * 128, type=int, required=False,
                        help="feature dim after encoder depends on encoder")
    parser.add_argument('--output_dim', default=64, type=int, required=False)
    parser.add_argument('--relation_dim', default=100, type=int, required=False)
    parser.add_argument('--bert_dim', default=768, type=int, required=False)
    parser.add_argument('--pretrained_bert_name', default='../../bot-detection/BERT_PretrainModel/bert-base-uncased',
                        type=str, required=False)
    parser.add_argument('--max_seq_len', default=200, type=int, required=False)
    parser.add_argument('--train_dir', default='./VAST/vast_train.csv.json', type=str, required=False)
    parser.add_argument('--val_dir', default='./VAST/vast_dev.csv.json', type=str, required=False)
    parser.add_argument('--test_dir', default='./VAST/vast_test.csv.json', type=str, required=False)
    parser.add_argument('--alpha', default=0.8, type=float, required=False)
    parser.add_argument('--beta', default=1.2, type=float, required=False)

    parser.add_argument('--device', default='cuda:0', type=str, help='e.g. cuda:0', required=False)
    parser.add_argument('--seed', default=0, type=int, help='set seed for reproducibility')

    parser.add_argument("--batch_size", default=16, type=int, required=False)
    parser.add_argument("--eval_batch_size", default=16, type=int, required=False)
    parser.add_argument("--epochs", default=15, type=int, required=False)
    parser.add_argument("--eval_steps", default=50, type=int, required=False)
    opt = parser.parse_args()

    opt.n_gpus = torch.cuda.device_count()
    opt.model_class = BERT_SPC
    opt.input_features = ['concat_bert_indices_mask', 'concat_segments_indices_mask', 'attention_mask_mask']
    opt.optim_class = optim.Adam
    opt.output_dir = os.path.join(opt.output_par_dir, opt.model_name, opt.dataset,
                                  strftime("%Y-%m-%d %H-%M-%S", localtime()))
    opt.num_labels = len(opt.polarities)
    opt.bert_config = BertConfig.from_pretrained(opt.pretrained_bert_name)

    return opt


class Instructor(object):
    def __init__(self, opt):
        self.opt = opt
        tokenizer = Tokenizer4Bert(opt.max_seq_len, opt.pretrained_bert_name)
        bert_model = BertModel.from_pretrained(opt.pretrained_bert_name)
        self.model = opt.model_class(opt, bert_model).to(opt.device)
        self.idx2stance = {0: 'con', 1: 'pos', 2: 'neutral'}
        self.idx2senti = {0: 'negative', 1: 'positive', 2: "neutral"}
        print("using model: ", opt.model_name)
        print("running dataset: ", opt.dataset)
        print("output_dir: ", opt.output_dir)
        test_file_name = 'saved_dataset/vast_test0_plus.dat'

        self.testset = pickle.load(open(test_file_name, 'rb'))

        self.criterion = TraditionCriterion(opt)
        params = ([p for p in self.model.parameters()])
        self.optimizer = self.opt.optim_class(params, lr=self.opt.lr)

    def run_tradition(self):
        state_dict_dir = 'bert-sep/bert-sep/zeroshot/2024-04-20 08-41-49' + '/state_dict'
        ckpt = torch.load(os.path.join(state_dict_dir, "best_acc_model.bin"))
        self.model.load_state_dict(ckpt)

        self.evaluation_tradition(self.testset)
        # acc2, f12 = self.evaluation_tradition(self.testset_0)

        # print("\n\nReload the best model with best f1 {} from path {}\n\n".format(best_f1, state_dict_dir))
        ckpt = torch.load(os.path.join(state_dict_dir, "best_f1_model.bin"))
        self.model.load_state_dict(ckpt)
        self.evaluation_tradition(self.testset)



    def evaluation_tradition(self, dataset):
        self.model.eval()
        sampler = RandomSampler(dataset)
        dev_loader = DataLoader(dataset=dataset, batch_size=self.opt.eval_batch_size, sampler=sampler)
        all_labels = []
        all_logits = []
        all_topic = []
        all_text = []
        all_cross_label = []
        all_cross_labels = []
        all_sentiment_labels = []
        output_data = []
        eval_loss = 0
        cnt = 0

        for i_batch, batch in enumerate(dev_loader):
            input_features = [batch[feat_name].to(self.opt.device) for feat_name in self.opt.input_features]
            true_stance = batch['polarity']
            text = batch['text']
            topic = batch['target']
            true_stance = true_stance.to(self.opt.device)
            cross_label = batch['cross_senti'].cpu().numpy()
            sentiment_label = batch['sentiment_label'].cpu().numpy()
            with torch.no_grad():
                logits, _ = self.model(input_features)

            labels = true_stance.detach().cpu().numpy()
            logits = logits.detach().cpu().numpy()
            all_labels.append(labels)
            all_cross_label.append(cross_label)
            all_logits.append(logits)
            all_sentiment_labels.append(sentiment_label)
            all_text.extend(text)
            all_topic.extend(topic)
            cnt = cnt + 1
        all_labels = np.concatenate(all_labels, axis=0)
        all_logits = np.concatenate(all_logits, axis=0)
        all_sentiment_labels = np.concatenate(all_sentiment_labels, axis=0)
        all_cross_labels = np.concatenate(all_cross_label, axis=0)
        preds = all_logits.argmax(axis=1)

        labels_senti_0 = all_labels[all_cross_labels == 0]
        # labels_senti_1 = all_labels[all_cross_labels == 1]
        preds_senti_0 = preds[all_cross_labels == 0]
        # preds_senti_1 = preds[all_cross_labels == 1]
        sentiment_label_0 = all_sentiment_labels[all_cross_labels == 0]
        text_0 = np.array(all_text)[all_cross_labels == 0]
        topic_0 = np.array(all_topic)[all_cross_labels == 0]

        for i in range(len(labels_senti_0)):
            if preds_senti_0[i] != labels_senti_0[i]:
                continue
            data = {
                'text': all_text[i],
                'topic':all_topic[i],
                'ground_true': self.idx2stance[labels_senti_0[i]],
                'pred':self.idx2stance[preds_senti_0[i]],
                'sentiment_label':self.idx2senti[sentiment_label_0[i]]
            }
            output_data.append(data)

        with open("./VAST/predict_senti_0_correct.json", 'w', encoding='utf-8') as f:
            json.dump(output_data, f, ensure_ascii=False)


if __name__ == "__main__":
    opt = init_args()
    ins = Instructor(opt)
    ins.run_tradition()
