#!/usr/bin/env python  
#-*- coding:utf-8 _*-  
""" 
@author:hello_life 
@license: Apache Licence 
@file: main.py 
@time: 2022/04/04
@software: PyCharm 
description:the file is used to train the model
"""
import os
import sys
import logging

import pickle

import torch
import torch.nn as nn
import pandas as pd
from tqdm import tqdm

logging.basicConfig(
    level=logging.INFO,
    filename="./logs/logs",
    # stream=sys.stdout,
    format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)

def train_loop(dataloader,model,loss_fn,optimizer,save_path,save_dir,resume,num_epoch):
    """
    :param dataloader:数据集
    :param model: 模型
    :param loss_fn: 损失函数
    :param optimizer: 优化器
    :param save_path: 模型存贮路径
    :param save_dir: 模型存储上层文件夹

    :return:
    """
    start_epoch, start_step =0, 0
    size=len(dataloader.dataset)
    best,correct,prev=0,0,0

    #增加 加载模型的权重训练
    if resume!="":
        logging.info(f"loading from {resume}")
        checkpoint=torch.load(resume)
        model.load_state_dict(checkpoint['model_state_dict'])
        optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
        start_epoch = checkpoint['epoch']
        start_step = checkpoint['step']

    for epoch_index in range(start_epoch,num_epoch):

        num_batchs=len(dataloader)

        for batch,(X,y) in enumerate(dataloader):
            step=num_batchs*epoch_index+batch+1
            #Computer prediction and loss
            pred = model(X)
            loss = loss_fn(pred, y)

            #Backpropagation
            optimizer.zero_grad()
            loss.backward()

            #增加梯度裁剪
            nn.utils.clip_grad_norm_(model.parameters(),0.1)
            optimizer.step()

            #计算正确的个数
            correct+=(pred.argmax(1)==y).type(torch.float).sum().item()
            if batch % 100 ==0:
                loss_val, current,correct=loss.item(), batch*len(X),  correct/((batch-prev+1)*len(X))
                #存储准确率最高的模型
                if correct>best:
                    best=correct
                    if not os.path.exists(save_dir):
                        os.mkdir(save_dir)
                    torch.save({
                        'epoch': epoch_index,
                        'step': step,
                        'model_state_dict': model.state_dict(),
                        'optimizer_state_dict': optimizer.state_dict(),
                        'loss': loss,
                    }, save_path)
                    logging.info(f"checkpoint has been saved in {save_path}")
                logging.info(f"loss:{loss_val:>7f},correct:{correct:>7f} [{current:>5d}/{size:>5d}]")
                correct,prev=0,batch+1

        test_loop(dataloader,model,loss_fn)

def test_loop(dataloader,model,loss_fn):
    """
    该方法用于检测集合
    :param dataloader:
    :param model:
    :param loss_fn:
    :return:
    """
    size=len(dataloader.dataset)
    num_batches=len(dataloader)
    test_loss,correct=0,0

    with torch.no_grad():
        for X,y in dataloader:
            pred=model(X)
            test_loss+=loss_fn(pred,y).item()
            correct+=(pred.argmax(1)==y).type(torch.float).sum().item()

    test_loss/=num_batches
    correct/=size
    logging.info(f"Test Error:\n Accuracy:{(100*correct):>0.1f}%,Avg loss:{test_loss:>8f}\n")

def predict_loop(model,dataloader,save_path):
    """
    该方法用于预测
    :param dataloader: 预测数据集
    :param load_path:模型存储路径
    :return:
    """
    checkpoint = torch.load(save_path)
    model.load_state_dict(checkpoint['model_state_dict'])


    model.eval()
    with torch.no_grad():
        for batch,(X,y) in enumerate(dataloader):
            predict=model(X).argmax(1)
            if batch % 100==0:
                break
    print(predict)




if __name__ == '__main__':
    datadir="../Data/IMDB Dataset.csv"
    save_path="../Data/vocab.pkl"
    # getVocab(datadir,save_path,True)
    with open(save_path,"rb") as f:
        data=pickle.load(f)
        print(data)