from transformers import RobertaTokenizer, RobertaModel, AutoConfig, BertTokenizer, BertModel, XLMRobertaTokenizer, \
    XLMRobertaModel, \
    AutoTokenizer, AutoModel
from functools import partial
from transformers import AdamW, get_linear_schedule_with_warmup
from torch.utils.data import DataLoader

from torchsummary import summary
import torch
import torch.nn as nn
from torch.nn import AdaptiveMaxPool2d, AdaptiveMaxPool1d
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt

import os
import sys
import time
from countermeasure_training import FGM, PGD
from load_data import MyDataset, create_dataloader, get_public_data, save_pred, get_my_data, label2array
from tqdm import tqdm
from sklearn.metrics import f1_score
from early_stopping import EarlyStopping
from vote import soft_vote

sys.path.append('..')


def build_pretrained_model_path(name):
    return os.path.join(os.path.abspath('.'), "pretrainedModel",
                        name)


TRAIN_OR_VOTE = 'train'

PRETRAINED_MODEL_TYPE = 0

PRETRAINED_MODEL_NAME = ['roberta-base', 'roberta-large', 'twitter-roberta-base-sentiment', 'xlm-roberta-base']
PRETRAINED_MODEL_TOKENIZER_LOADER = [RobertaTokenizer, RobertaTokenizer, RobertaTokenizer, XLMRobertaTokenizer]
VOTE_MODEL_NUM = [0, 1, 2, 3]
USEFUL_MODEL = ["fgm_roberta-base_warmup0.05long._model", 'fgm_roberta-base_warmup50._model',
                'fgm_twitter-roberta-base-sentiment_warmup50._model', 'fgm_xlm-roberta-base_warmup50._model']
USEFUL_MODEL_BERT_TYPE = [0, 0, 2, 3]

PRETRAINED_MODEL_PATH = os.path.join(os.path.abspath('.'), "pretrainedModel",
                                     PRETRAINED_MODEL_NAME[PRETRAINED_MODEL_TYPE])
# 加载模型
config = AutoConfig.from_pretrained(PRETRAINED_MODEL_PATH)
config.update({"output_hidden_states": True})
tokenizer = RobertaTokenizer.from_pretrained(PRETRAINED_MODEL_PATH)
base_model = RobertaModel.from_pretrained(PRETRAINED_MODEL_PATH, config=config)
# mast use cuda
# USE_CUDA = torch.cuda.is_available()

# 训练参数
MAX_LEN = 128
EPOCHS = 2
BATCH_SIZE = 16
weight_decay = 0.0

bert_lr = 6e-6
else_lr = 1e-5
# 预热率
warm_up_step = 50
warm_up_ratio = 0.005

# 是否开启对抗训练
attack = True
# 是用官方数据集还是用自己抽样的训练与验证集
use_public_data = True
# 提前结束训练，避免过拟合，可以把EPOCH设大一点
early_stopping = EarlyStopping(patience=2, verbose=True, delta=1e-3)


def get_data(is_train=True):
    if use_public_data:
        return get_public_data(is_train)
    else:
        return get_my_data(is_train)


train_dataset = MyDataset(tokenizer=tokenizer, _dataframe=get_data(), max_len=MAX_LEN)
train_loader = create_dataloader(train_dataset, batch_size=BATCH_SIZE)
test_dataset = MyDataset(tokenizer=tokenizer, _dataframe=get_data(False), max_len=512,
                         mode='test' if use_public_data else 'train')
test_loader = create_dataloader(test_dataset, batch_size=1, mode='test')
total_steps = len(train_loader) * EPOCHS

# 损失函数
criterion = nn.CrossEntropyLoss().cuda()


def init_params(module_lst):
    for module in module_lst:
        for param in module.parameters():
            if param.dim() > 1:
                torch.nn.init.xavier_uniform_(param)
    return


class SentencePositiveModel(nn.Module):
    def __init__(self, n_classes=3):
        super(SentencePositiveModel, self).__init__()
        self.base = base_model
        dim = 1024 if 'large' in PRETRAINED_MODEL_NAME[PRETRAINED_MODEL_TYPE] else 768

        self.out = nn.Sequential(
            nn.Linear(dim, n_classes)
        )

        init_params([self.out])  #

    def forward(self, input_ids, attention_mask):
        bert_output = self.base(input_ids=input_ids,
                                attention_mask=attention_mask)

        last_layer_hidden_states = bert_output.hidden_states[-1]

        context_vector = last_layer_hidden_states[:, 0, :]

        out = self.out(context_vector)

        return out


def build_model():
    _model = SentencePositiveModel(n_classes=3)

    _model.cuda()

    if torch.cuda.device_count() > 1:
        _model = nn.DataParallel(_model)
    _optimizer = AdamW([{"params": _model.base.parameters(), "lr": bert_lr},
                        {"params": _model.out.parameters(), "lr": else_lr}],
                       )  # correct_bias=False,weight_decay=weight_decay
    _scheduler = get_linear_schedule_with_warmup(
        _optimizer,
        num_warmup_steps=warm_up_ratio * total_steps if warm_up_step is None else warm_up_step,
        num_training_steps=total_steps
    )
    return _model, _optimizer, _scheduler


def do_train_with_fgm(_model, _optimizer, _scheduler):
    global_step = 0
    tic_train = time.time()
    log_steps = 100
    fgm = FGM(_model)
    for epoch in range(EPOCHS):
        _model.train()
        losses = []
        for step, sample in enumerate(train_loader):
            loss = get_loss(_model, sample)

            losses.append(loss.item())
            loss.backward()

            # 对抗训练
            if attack:
                fgm.attack()
                loss_sum = get_loss(_model, sample)
                loss_sum.backward()
                fgm.restore()

            # 优化器参数修改
            _optimizer.step()
            _optimizer.zero_grad()
            _scheduler.step()
            global_step += 1

            if global_step % log_steps == 0:
                print("global step %d, epoch: %d, batch: %d, loss: %.5f, speed: %.2f step/s, lr: "
                      % (global_step, epoch, step, np.mean(losses), global_step / (time.time() - tic_train)),
                      _scheduler.get_last_lr())
        if not use_public_data:
            del losses

            _model.eval()
            test_losses = []
            for sample in tqdm(test_loader, "test pred"):
                with torch.no_grad():
                    loss = get_loss(_model, sample)
                test_losses.append(loss.item())
            count_loss = np.mean(test_losses)
            print(f"test loss:{count_loss}")
            early_stopping(count_loss, _model)
            if early_stopping.early_stop:
                print(f'Early stop in Epoch{epoch}')
                _model.load_state_dict(torch.load('checkpoint.pt'))
                break


def get_loss(input_model: nn.Module, sample):
    input_ids = sample["input_ids"].cuda()
    attention_mask = sample["attention_mask"].cuda()
    outputs = input_model(input_ids=input_ids, attention_mask=attention_mask)
    loss = criterion(outputs, torch.squeeze(sample['label']).cuda())
    return loss


def predict(_test_loader, _model, return_float=False):
    test_pred = []
    _model.eval()
    _model.cuda()
    for batch in tqdm(_test_loader, desc='Pred'):
        b_input_ids = batch['input_ids'].cuda()

        attention_mask = batch["attention_mask"].cuda()
        with torch.no_grad():
            ans = _model(input_ids=b_input_ids, attention_mask=attention_mask)
            ans = ans.cpu()
            ans = ans.numpy()
            if not return_float:
                ans = np.argmax(ans, axis=1)
            for _ in ans:
                test_pred.append(_)
    if return_float:
        test_pred = np.array(test_pred)
    return test_pred


def count_acc(y_pred, y_true):
    return f1_score(y_true, y_pred, labels=[0, 1, 2], average='micro')


def do_train_and_save(train_a_new_model=True):
    _model: nn.Module
    if train_a_new_model:
        _model, _optimizer, _scheduler = build_model()
        do_train_with_fgm(_model, _optimizer, _scheduler)
        print('Train Done')
        torch.save(_model,
                   f'.\\model\\fgm_{PRETRAINED_MODEL_NAME[PRETRAINED_MODEL_TYPE]}_warmup{warm_up_step}._model')
    else:
        _model = torch.load(f'.\\_model\\fgm_{PRETRAINED_MODEL_NAME[PRETRAINED_MODEL_TYPE]}._model')
    y_pred = predict(_test_loader=test_loader, _model=_model)
    save_pred(y_pred)


def do_train_and_count_acc():
    assert use_public_data is False
    _model, _optimizer, _scheduler = build_model()
    do_train_with_fgm(_model, _optimizer, _scheduler)
    print('Train Done')
    y_pred = predict(_test_loader=test_loader, _model=_model)
    y_true = label2array(test_dataset.labels)
    print("Last F1 ACC: ", count_acc(y_pred, y_true))


def vote_ans():
    ans = list()
    for model_num in VOTE_MODEL_NUM:
        model_path = USEFUL_MODEL[model_num]
        tokenizer_loader = PRETRAINED_MODEL_TOKENIZER_LOADER[USEFUL_MODEL_BERT_TYPE[model_num]]
        bert_path = build_pretrained_model_path(PRETRAINED_MODEL_NAME[USEFUL_MODEL_BERT_TYPE[model_num]])
        with torch.no_grad():
            a_model = torch.load(f'model\\{model_path}')
            _tokenizer = tokenizer_loader.from_pretrained(bert_path)
            print(PRETRAINED_MODEL_NAME[USEFUL_MODEL_BERT_TYPE[model_num]])
            _test_dataset = MyDataset(_tokenizer, 512, get_data(False), 'test')
            _test_loader = create_dataloader(dataset=_test_dataset, batch_size=1, mode='test')
            ans.append(predict(_test_loader, a_model, True))

    ans = soft_vote(ans)
    save_pred(ans)


if __name__ == '__main__':
    pass
    if TRAIN_OR_VOTE == 'train':
        do_train_and_save()
    else:
        vote_ans()
