#!/usr/bin/env python  
#-*- coding:utf-8 _*-  
""" 
@author:hello_life 
@license: Apache Licence 
@file: train_utils.py 
@time: 2022/04/23
@software: PyCharm 
description:
"""
import sys,os
sys.path.insert(0,os.path.dirname(os.getcwd()))

from tqdm import tqdm
import torch
import torch.nn as nn
from sklearn.metrics import accuracy_score
from transformers import AdamW
from transformers import get_scheduler
from accelerate import Accelerator

from parameters.albert_config import Config

config=Config()

def train_loop(model,train_dataloder,test_dataloader):
    #optimizer
    no_decay=["bias","LayerNorm.weight"]
    optimizer_grouped_parameters=[{"params":[p for n,p in model.named_parameters() if not any(nd in n for nd in no_decay)],"weight_decay":0.01},
                                  {"params":[p for n,p in model.named_parameters() if any(nd in n for nd in no_decay)],"weight_decay":0.0}]
    optimizer=AdamW(optimizer_grouped_parameters,lr=config.lr)

    #损失函数
    loss_fn=nn.CrossEntropyLoss()

    #权重衰减
    num_training_steps=config.epoches*len(train_dataloder)
    lr_scheduler=get_scheduler(
        "linear",
        optimizer=optimizer,
        num_warmup_steps=0,
        num_training_steps=num_training_steps
    )

    #accelerator 加速
    accelerator=Accelerator()
    train_dataloder,test_dataloader,model,optimizer=accelerator.prepare(
        train_dataloder,test_dataloader,model,optimizer
    )

    #开始训练
    for epoch in range(config.epoches):
        model.train()
        for step,(x,y) in tqdm(enumerate(train_dataloder)):
            outputs=model(x)

            loss=loss_fn(outputs,y)
            # loss.backward()
            accelerator.backward(loss)

            optimizer.step()
            lr_scheduler.step()
            optimizer.zero_grad()

            if step%10==0:
                #存储模型
                torch.save(model.state_dict(),config.save_model)
                #打印准确率和loss
                acc=accuracy_score(y.cpu().numpy(),outputs.argmax(1).cpu().numpy())
                loss=loss.item()
                print(f"epoch:{epoch}/{config.epoches},step:{step}/{len(train_dataloder)},acc:{acc},loss:{loss}")
        test_loop(model,loss_fn,test_dataloader)

def test_loop(model,loss_fn,dataloader):
    model.eval()
    total_loss,total_pred,total_label=0,[],[]
    with torch.no_grad():
        for (x,y) in dataloader:
            outputs=model(x)
            loss=loss_fn(outputs,y)

            #计算loss,acc
            total_loss+=loss.item()
            total_pred.extend(outputs.argmax(1).cpu().numpy())
            total_label.extend(y.cpu().numpy())
    loss_value=total_loss/len(dataloader)
    acc=accuracy_score(total_label,total_pred)
    print(f"Test: loss:{loss_value},acc:{acc}")

def evaluate_loop(model,dataloader):
    model.load_state_dict(torch.load(config.save_model))
    model.eaval()
    with torch.no_grad:
        for x,y in dataloader:
            output=model(x)
            print(output.argmax(1))
