# -*- coding: utf-8 -*-
"""
@Time ： 2022/9/27 9:09
@Auth ： xlwreally
@File ：pre_training.py
@IDE ：PyCharm
"""

import sys
import time
from argparse import Namespace

import seaborn as sns
from matplotlib import pyplot as plt
from torch.utils.data import DataLoader

import datasets
from model import WML
from util import *
from losses import WclLoss
import torch.optim as optim
from tqdm import *
args = Namespace(
    scentences=r"D:\data\Weakly_labeled_data_1.1M\laptop.txt",
    overalls=r"D:\data\Weakly_labeled_data_1.1M\laptop_overall.txt",
    attentions=r"D:\data\Weakly_labeled_data_1.1M\laptop_attention.npy",

    bert=r"D:\data\bert_torch",
    glove_cache = r"D:\data\glove.twitter.27B\vectors_wd2.txt",

    wv_dim=200,
    bert_dim=768,
    batch_size=12,
    epochs_size=10000,
    epochs=20,
    text_size=128,
    WCL_loss_size=24,
    f=50,
    m=192,
    u=192,
    t_PL=0.5,
    t_WCL=0.5,
    σ1=0.2 ,

    device="cpu",
    reload_from_files=False,
    learning_rate=1e-4,
    early_stopping_criteria=10,
    catch_keyboard_interrupt=True,
    seed=1234,


    save_dir="model_storage",
    model_state_file="训练.pth",
)
if sys.platform=="linux":
    # args.scentences = "/mnt/JuChiYun-XiongLiWei/wml/data/Cell_Phones_and_Accessories_5.json.txt"
    # args.overalls = "/mnt/JuChiYun-XiongLiWei/wml/data/Cell_Phones_and_Accessories_5.json_overall.txt"
    # args.attentions = "/mnt/JuChiYun-XiongLiWei/wml/data/Cell_Phones_and_Accessories_5.json_att.npy"
    args.scentences = "/mnt/JuChiYun-XiongLiWei/wml/data/laptop.txt"
    args.overalls = "/mnt/JuChiYun-XiongLiWei/wml/data/laptop_overall.txt"
    args.attentions = "/mnt/JuChiYun-XiongLiWei/wml/data/laptop_attention.npy"
    # args.scentences = "/mnt/JuChiYun-XiongLiWei/wml/data/IMDB Dataset.txt"
    # args.overalls = "/mnt/JuChiYun-XiongLiWei/wml/data/IMDB Dataset_overall.txt"
    # args.attentions = "/mnt/JuChiYun-XiongLiWei/wml/data/IMDB Dataset_att.npy"
    args.bert = "/mnt/JuChiYun-XiongLiWei/wml/data"
    args.glove_cache = "/mnt/JuChiYun-XiongLiWei/wml/data/vectors_wd2.txt"
    args.save_dir = "/mnt/JuChiYun-XiongLiWei/wml/model_storage"
    args.device = "cuda"
    args.epochs_size=20000
    args.batch_size=128



if __name__ == '__main__':
    print("使用 CUDA: {}".format(args.device))
    set_seed_everywhere(args.seed, args.device)
    handle_dirs(args.save_dir)
    WML_model = WML(args)
    WML_model.to(args.device)
    WCL_criterion = WclLoss(args.t_WCL)

    optimizer = optim.AdamW(WML_model.parameters(), lr=args.learning_rate)
    scheduler = optim.lr_scheduler.ReduceLROnPlateau(
        optimizer=optimizer,
        mode='min',
        factor=0.5,
        patience=1
    )
    print("开始加载数据")
    loader = DataLoader(dataset=datasets.Data(args), batch_size=args.batch_size, shuffle=True, num_workers=0, drop_last=False)
    train_state = make_train_state(args)
    # %%
    print("开始训练")
    time.sleep(1)
    train_bar = tqdm(
        desc='训练',
        total=loader.__len__(),
    )
    epoch_bar = tqdm(
        desc='训练epochs',
        total=args.epochs,
    )

    WML_model.train()


    for epoch_index in range(args.epochs):
        train_state['epoch_index'] = epoch_index
        running_loss = 0.0
        running_loss_pl = 0.0
        running_loss_wcl = 0.0
        for item_number, batch_dict in enumerate(loader):
            overall = batch_dict['overall'].to(args.device)
            ABAE_attention = batch_dict['ABAE_attention'].to(args.device)
            neg_ABAE_attention = batch_dict['neg_ABAE_attention'].to(args.device)
            vector = batch_dict['vector'].to(args.device)
            glove_embedding = batch_dict['glove_embedding'].to(args.device)
            wml_attention, x1,out = WML_model(vector,glove_embedding)
            PL_loss = WML_model.PL_loss(wml_attention=wml_attention, ABAE_attention=ABAE_attention,neg_ABAE_attention=neg_ABAE_attention, t_PL=args.t_PL)
            WCL_loss= WCL_criterion(x1, overall)
            loss = args.σ1*PL_loss + (1-args.σ1)*WCL_loss
            # loss=PL_loss
            loss_t = loss.item()
            running_loss += (loss_t - running_loss) / (item_number + 1)
            running_loss_pl += (PL_loss.item() - running_loss_pl) / (item_number + 1)
            running_loss_wcl += (WCL_loss.item() - running_loss_wcl) / (item_number + 1)
            loss.backward()

            optimizer.step()

            train_bar.set_postfix(loss=running_loss, epoch=epoch_index,PL_loss=running_loss_pl,WCL_loss=running_loss_wcl)
            train_bar.update()

        train_state['train_loss'].append(running_loss)
        train_state = update_train_state(args=args, model=WML_model, train_state=train_state,)
        scheduler.step(train_state['train_loss'][-1])


        if train_state['stop_early']:
            epoch_bar.close()
            train_bar.close()
            break

        train_bar.n = 0

        epoch_bar.set_postfix(best_val=train_state['early_stopping_best_val'])
        epoch_bar.update()
    plt.figure(figsize=(10, 7))
    sns.lineplot(
        x=[epoch + 1 for epoch in range(len(train_state['train_loss']))],
        y=train_state['train_loss'],
        color='coral',
        label='loss',
    )

    plt.xticks([epoch for epoch in range(len(train_state['train_loss']) + 1)])
    plt.show()