# -*- coding: utf-8 -*-
"""
@Time ： 2022/11/26 9:09
@Auth ： xlwreally
@File ：fine_tuning.py
@IDE ：PyCharm
"""
from sklearn.metrics import accuracy_score
from torch import nn

import pre_training
import sys
import time
from argparse import Namespace

import seaborn as sns
from matplotlib import pyplot as plt
from torch.utils.data import DataLoader

import datasets
from losses import WclLoss
from model import WML
from util import *
import torch.optim as optim
from tqdm import *

args = pre_training.args
args.model_state_file = "微调.pth"
args.epochs = 30
args.batch_size=128
# 训练集路径
path = r"D:\data\Labeled_data_11754\train.csv"
# 模型路径
model_path = r"D:\data\20230111 06-53-01 训练.pth"
# 参数
a = 0.2
if sys.platform=="linux":
    path = "/mnt/JuChiYun-XiongLiWei/wml/data/Labeled_data_11754/train.csv"
    model_path = "/mnt/JuChiYun-XiongLiWei/wml/model_storage/20230302 10-28-11 训练.pth"
    args.device = "cuda"

if __name__ == '__main__':
    print("使用 CUDA: {}".format(args.device))
    set_seed_everywhere(args.seed, args.device)
    handle_dirs(args.save_dir)
    WML_model = WML(args)
    WML_model.load_state_dict(torch.load(model_path))
    WML_model.to(args.device)

    optimizer = optim.AdamW(WML_model.parameters(), lr=args.learning_rate)
    scheduler = optim.lr_scheduler.ReduceLROnPlateau(
        optimizer=optimizer,
        mode='min',
        factor=0.5,
        patience=1
    )
    print("开始加载数据")
    loader = DataLoader(dataset=datasets.Fine_tuning_data(args,path), batch_size=args.batch_size, shuffle=True, num_workers=0, drop_last=False)
    train_state = make_train_state(args)
    # %%
    print("开始训练")
    time.sleep(1)
    train_bar = tqdm(
        desc='训练',
        total=loader.__len__(),
    )
    epoch_bar = tqdm(
        desc='训练epochs',
        total=args.epochs,
    )

    WML_model.train()
    WCL_criterion = WclLoss(args.t_WCL)
    ce_criterion =nn.CrossEntropyLoss()

    for epoch_index in range(args.epochs):
        train_state['epoch_index'] = epoch_index
        running_loss = 0.0
        running_loss_ce = 0.0
        running_loss_wcl = 0.0
        accuracy=0.0
        for item_number, batch_dict in enumerate(loader):
            overall = batch_dict['overall'].to(args.device)
            vector = batch_dict['vector'].to(args.device)
            glove_embedding = batch_dict['glove_embedding'].to(args.device)

            wml_attention, x1,out = WML_model(vector,glove_embedding)

            WCL_loss = WCL_criterion(x1, overall)/args.batch_size
            ce_loss = ce_criterion(out, overall)

            loss = a*WCL_loss + (1-a)*ce_loss
            loss_t = loss.item()
            running_loss += (loss_t - running_loss) / (item_number + 1)
            running_loss_wcl += (WCL_loss.item() - running_loss_wcl) / (item_number + 1)
            running_loss_ce += (ce_loss.item() - running_loss_ce) / (item_number + 1)

            res=torch.argmax(out,-1).detach().cpu().numpy()

            overall = overall.detach().cpu().numpy()
            accuracy += (accuracy_score(overall,res)-accuracy)/ (item_number + 1)

            loss.backward()
            optimizer.step()

            train_bar.set_postfix(accuracy=accuracy,loss=running_loss,CE_loss=running_loss_ce,WCL_loss=running_loss_wcl, epoch=epoch_index)
            train_bar.update()

        train_state['train_loss'].append(running_loss)
        train_state = update_train_state(args=args, model=WML_model, train_state=train_state,)
        scheduler.step(train_state['train_loss'][-1])


        if train_state['stop_early']:
            epoch_bar.close()
            train_bar.close()
            break

        train_bar.n = 0

        epoch_bar.set_postfix(best_val=train_state['early_stopping_best_val'])
        epoch_bar.update()
    plt.figure(figsize=(10, 7))
    sns.lineplot(
        x=[epoch + 1 for epoch in range(len(train_state['train_loss']))],
        y=train_state['train_loss'],
        color='coral',
        label='loss',
    )

    plt.xticks([epoch for epoch in range(len(train_state['train_loss']) + 1)])
    plt.show()