from transformers import AutoConfig, AutoTokenizer, AutoModel

from transformers import AdamW, get_linear_schedule_with_warmup
from sklearn.metrics import classification_report
import torch
import torch.nn as nn
import pandas as pd
import numpy as np
import json
import os
import sys
import time
from countermeasure_training import FGM
from load_data import MyDataset, create_dataloader, get_data
from tqdm import tqdm
from early_stopping import EarlyStopping
from vote import soft_vote
import Baseline_model
import CA_model
import SA_Net_model
from src.EnvironmentVariables import PRETRAIN_BERT_PATH, MODELS_PATH

sys.path.append('../..')


def build_pretrained_model_path(name):
    return os.path.join(os.path.abspath(''), "pretrainedModel",
                        name)


MODEL_TYPE = 'CA'

SentencePositiveModel = {'BASE': Baseline_model.BaseSentencePositiveModel, 'CA': CA_model.SentencePositiveModelWithCBAM,
                         'SA': SA_Net_model.SaNetSentencePositiveModel}

TRAIN_OR_VOTE = 'train'

PRETRAINED_MODEL_TYPE = 1

PRETRAINED_MODEL_NAME = ['bert-base-chinese', 'chinese-roberta-wwm-ext', 'FinBERT_L-12_H-768_A-12_pytorch']

# 投票部分所用模型
VOTE_MODEL_NUM = [1, 2, 4, 7]
USEFUL_MODEL = ["fgm_FinBERT_L-12_H-768_A-12_pytorch_SA._model", 'fgm_FinBERT_L-12_H-768_A-12_pytorch_BASE._model',
                'fgm_FinBERT_L-12_H-768_A-12_pytorch_CA._model', "FinBERT_L-12_H-768_A-12_pytorch_BASE._model",
                "chinese-roberta-wwm-ext_BASE._model", 'bert-base-chinese_BASE._model',
                'FinBERT_L-12_H-768_A-12_pytorch_CA._model', "fgm_chinese-roberta-wwm-ext_CA._model"]
USEFUL_MODEL_BERT_TYPE = [0, 0, 2, 3]
# 投票部分结束

PRETRAINED_MODEL_PATH = os.path.join(PRETRAIN_BERT_PATH,
                                     PRETRAINED_MODEL_NAME[PRETRAINED_MODEL_TYPE])

# must use cuda
# USE_CUDA = torch.cuda.is_available()

# 训练参数
MAX_LEN = 128
EPOCHS = 3
BATCH_SIZE = 16
weight_decay = 0.0

bert_lr = 6e-6
else_lr = 1e-5
# 预热率
warm_up_step = 50
warm_up_ratio = 0.005

# 是否开启对抗训练
attack = True

# 损失函数
criterion = nn.CrossEntropyLoss().cuda()


def build_model(base_model, total_steps):
    _model = SentencePositiveModel[MODEL_TYPE](base_model, PRETRAINED_MODEL_NAME[PRETRAINED_MODEL_TYPE], n_classes=2,
                                               max_len=MAX_LEN)

    _model.cuda()

    if torch.cuda.device_count() > 1:
        _model = nn.DataParallel(_model)

    lr_list = [{"params": _model.base.parameters(), "lr": bert_lr},
               {"params": _model.out.parameters(), "lr": else_lr}]
    if MODEL_TYPE != "BASE":
        lr_list.append({"params": _model.attention.parameters(), "lr": else_lr})
    _optimizer = AdamW(lr_list)  # correct_bias=False,weight_decay=weight_decay
    _scheduler = get_linear_schedule_with_warmup(
        _optimizer,
        num_warmup_steps=warm_up_ratio * total_steps if warm_up_step is None else warm_up_step,
        num_training_steps=total_steps
    )
    return _model, _optimizer, _scheduler


def do_train_with_fgm(_model, _optimizer, _scheduler, early_stopping, train_loader, test_loader):
    global_step = 0
    tic_train = time.time()
    log_steps = 100
    fgm = FGM(_model)
    for epoch in range(EPOCHS):
        _model.train()
        losses = []
        for step, sample in enumerate(train_loader):
            loss = get_loss(_model, sample)

            losses.append(loss.item())
            loss.backward()

            # 对抗训练
            if attack:
                fgm.attack()
                loss_sum = get_loss(_model, sample)
                loss_sum.backward()
                fgm.restore()

            # 优化器参数修改
            _optimizer.step()
            _optimizer.zero_grad()
            _scheduler.step()
            global_step += 1

            if global_step % log_steps == 0:
                print("global step %d, epoch: %d, batch: %d, loss: %.5f, speed: %.2f step/s, lr: "
                      % (global_step, epoch, step, np.mean(losses), global_step / (time.time() - tic_train)),
                      _scheduler.get_last_lr())
                losses.clear()

            # early_stop 从第二个epoch每500step检查一次
            if global_step % (5 * log_steps) == 0 and epoch > 0:
                with torch.no_grad():
                    _model.eval()
                    test_losses = []
                    for test_sample in tqdm(test_loader, "test pred"):
                        loss = get_loss(_model, test_sample)
                        test_losses.append(loss.item())
                    count_loss = np.mean(test_losses)
                    print(f"test loss:{count_loss}")
                    early_stopping(count_loss, _model)
                    _model.train()
                    if early_stopping.early_stop:
                        print(f'Early stop in Epoch{epoch}')
                        _model.load_state_dict(early_stopping.load_checkpoint())
                        return _model
    return _model


def get_loss(input_model: nn.Module, sample):
    input_ids = sample["input_ids"].cuda()
    attention_mask = sample["attention_mask"].cuda()
    outputs = input_model(input_ids=input_ids, attention_mask=attention_mask)
    loss = criterion(outputs, sample['label'].cuda())
    return loss


def predict(_test_loader, _model, return_float=False):
    test_pred = []
    _model.eval()
    _model.cuda()
    for batch in tqdm(_test_loader, desc='Pred'):
        b_input_ids = batch['input_ids'].cuda()

        attention_mask = batch["attention_mask"].cuda()
        with torch.no_grad():
            ans = _model(input_ids=b_input_ids, attention_mask=attention_mask)
            ans = ans.cpu()
            ans = ans.numpy()
            if not return_float:
                ans = np.argmax(ans, axis=1)
            for _ in ans:
                test_pred.append(_)
    if return_float:
        test_pred = np.array(test_pred)
    return test_pred


def do_train_and_save(train_a_new_model=True):
    # 加载模型
    config = AutoConfig.from_pretrained(PRETRAINED_MODEL_PATH)
    config.update({"output_hidden_states": True})
    tokenizer = AutoTokenizer.from_pretrained(PRETRAINED_MODEL_PATH)
    base_model = AutoModel.from_pretrained(PRETRAINED_MODEL_PATH, config=config)
    # 提前结束训练，避免过拟合，可以把EPOCH设大一点
    early_stopping = EarlyStopping(patience=2, verbose=True, delta=1e-3)

    train_dataset = MyDataset(tokenizer=tokenizer, _dataframe=get_data(), max_len=MAX_LEN)
    train_loader = create_dataloader(train_dataset, batch_size=BATCH_SIZE)
    test_dataset = MyDataset(tokenizer=tokenizer, _dataframe=get_data('test'), max_len=MAX_LEN,
                             mode='train')
    test_loader = create_dataloader(test_dataset, batch_size=1, mode='test')
    total_steps = len(train_loader) * EPOCHS

    _model: nn.Module
    if train_a_new_model:
        _model, _optimizer, _scheduler = build_model(base_model, total_steps=total_steps)
        do_train_with_fgm(_model, _optimizer, _scheduler, early_stopping=early_stopping,
                          train_loader=train_loader, test_loader=test_loader)
        _model.load_state_dict(early_stopping.load_checkpoint())
        print('Train Done')
        _config = {
            # 训练参数
            'MAX_LEN': MAX_LEN,
            'EPOCHS': EPOCHS,
            'BATCH_SIZE': BATCH_SIZE,
            'weight_decay': weight_decay,
            'bert_lr': bert_lr,
            'else_lr': else_lr,
            # 预热率
            'warm_up_step': warm_up_step,
            'warm_up_ratio': warm_up_ratio,
            'base_model': PRETRAINED_MODEL_NAME[PRETRAINED_MODEL_TYPE],
            'model_type': MODEL_TYPE
        }
        name = f'{"fgm_" if attack else ""}{PRETRAINED_MODEL_NAME[PRETRAINED_MODEL_TYPE]}_{MODEL_TYPE}._model'
        save_model(_model, name, **_config)
    else:
        _model = torch.load(os.path.join(MODELS_PATH, f'fgm_{PRETRAINED_MODEL_NAME[PRETRAINED_MODEL_TYPE]}._model'))


def save_model(_model, name: str, **kwargs):
    _config = json.dumps(kwargs)
    torch.save(_model, os.path.join(MODELS_PATH, name))
    with open(os.path.join(MODELS_PATH, name + '.config'), 'w') as f:
        f.write(_config)


def load_model(_name):
    _model = torch.load(os.path.join(MODELS_PATH, _name))
    my_config: json
    with open(os.path.join(MODELS_PATH, _name + '.config'), 'r') as f:
        my_config = json.load(f)
    return _model, my_config


def vote_ans(df: pd.DataFrame):
    ans = list()
    for model_num in VOTE_MODEL_NUM:
        with torch.no_grad():
            a_model, a_config = load_model(USEFUL_MODEL[model_num])
            base_model_path = os.path.join(PRETRAIN_BERT_PATH, a_config['base_model'])
            max_len = a_config['MAX_LEN']
            batch_size = a_config["BATCH_SIZE"]
            _tokenizer = AutoTokenizer.from_pretrained(base_model_path)

            valida_dataset = MyDataset(_tokenizer, max_len, df, 'test')
            valida_loader = create_dataloader(dataset=valida_dataset, batch_size=batch_size, mode='test')
            ans.append(predict(valida_loader, a_model, True))
            samples = valida_dataset.labels
    ans = soft_vote(ans)
    return ans, samples


def valida(_name):
    _model, _config = load_model(_name)
    base_model_path = os.path.join(PRETRAIN_BERT_PATH, _config['base_model'])
    max_len = _config['MAX_LEN']
    tokenizer = AutoTokenizer.from_pretrained(base_model_path)
    df = get_data('valida')
    valida_dataset = MyDataset(tokenizer, max_len, df, 'valida')
    valida_loader = create_dataloader(dataset=valida_dataset, batch_size=16, mode='valida')
    with torch.no_grad():
        pred = predict(valida_loader, _model)
    samples = valida_dataset.labels
    save_classification_report(_name + '_validAns.txt', samples, pred)
    # df.to_csv('pred.csv')


def save_classification_report(_name: str, samples, pred):
    with open(os.path.join(MODELS_PATH, _name), "w") as f:
        t = classification_report(samples, pred, digits=5)
        print(t)
        f.write(t)


if __name__ == '__main__':
    # do_train_and_save()
    #  vote_ans()
    """for _ in USEFUL_MODEL:
        valida(_)"""
    # valida(USEFUL_MODEL[-1])
    save_classification_report('vote_ans.txt', *vote_ans(get_data('validation')))
