﻿import torch
from torch.nn import functional as F
from torch import nn, optim
from torch.utils.tensorboard import SummaryWriter
from datetime import datetime
import time


from model import *
from args import get_args
from data_load import load_dataset


def train_model(model,device,train_loader,optimizer,criterion,epochs,writer):
    model.train()
    correct =0
    total = 0
    total_loss =0
    for batch_idx, (data, target) in enumerate(train_loader):
        data,target =data.to(device),target.to(device)
        optimizer.zero_grad()
        output = model(data)
        loss = criterion(output,target)
        loss.backward()
        optimizer.step()
        total_loss += loss.item()

        #计算准确率
        pred =output.argmax(dim=1,keepdim=True)
        correct +=pred.eq(target.view_as(pred)).sum().item()
        total +=target.size(0)


        accuracy = 100.*correct/total
        Avatage_loss = total_loss/len(train_loader)
        if batch_idx % 100 == 0:
            print(f"Epoch{epochs} Avatage_loss: {Avatage_loss:.4f} ,accuravy:{accuracy:.2f}%")
        
        writer.add_scalar('training loss', Avatage_loss, epochs)
        writer.add_scalar('training accuracy', accuracy, epochs)


def test_model(model,device,test_loader,criterion,epochs,writer):
    model.eval()
    correct =0
    total = 0
    total_loss =0
    with torch.no_grad():
        for data,target in test_loader:
            data,target =data.to(device),target.to(device)
            output = model(data)
            loss = criterion(output,target)
            total_loss += loss.item()

            #计算准确率
            pred =output.argmax(dim=1,keepdim=True)
            correct +=pred.eq(target.view_as(pred)).sum().item()
            total +=target.size(0)


        accuracy = 100.*correct/total
        Avarage_loss = total_loss/len(test_loader)
        print(f"Epoch{epochs} test_Avatage_loss: {Avarage_loss:.4f} ,test_accuracy:{accuracy:.2f}%")
        
        writer.add_scalar('test loss', Avarage_loss, epochs)
        writer.add_scalar('test accuracy', accuracy, epochs)


def main():
    
    #参数解析
    args = get_args()
    args = args.parse_args()
    print(args.work_dir)
    #超参数
    device = args.device
    batch_size = args.batch_size
    num_epochs = args.num_epochs
    torch.manual_seed(args.seed)

    '''
    添加optimizer参数
    '''
    optim = args.optim


    print(f'num_epochs:{num_epochs} ')
    print('device:',device)
    criterion = nn.CrossEntropyLoss()
    if args.model == 'MLP_3':
        model = get_MLP_3().to(device)
    elif args.model == 'MLP_3MAX':
        model = get_MLP_3MAX().to(device)
    elif args.model == 'MLP_6':
        model = get_MLP_6().to(device)
    elif args.model == 'MLP_9':
        model = get_MLP_9().to(device)
    elif args.model == 'MLP_3_BN':
        model = get_MLP_3_BN().to(device)
    elif args.model == 'MLP_3_BN_XavierInit':
        model = get_MLP_3_BN_XavierInit().to(device)
    elif args.model == 'MLP_3_BN_KaimingInit':
        model = get_MLP_3_BN_KaimingInit().to(device)

    


    '''
    配置adam参数
    torch.optim.Adam(params, lr=0.001, betas=(0.9, 0.999), eps=1e-08, weight_decay=0, amsgrad=False, *, foreach=None, maximize=False, capturable=False, differentiable=False, fused=None)
    
    AMSGrad 是 Adam 的一个变体，它使用了自适应的梯度平方和的最大值，而不是简单的平方和。这有助于在训练过程中更好地处理稀疏梯度和梯度爆炸问题

    foreach 和 fused 的实现通常比基于 for 循环的单个张量实现速度更快.
    如果 foreach=None 且所有张量位于 CUDA，则 PyTorch 会默认使用 foreach 实现。这种实现可以批量处理张量，避免了逐个张量进行的 for 循环，从而提高了速度。
    如果 foreach=None 且张量不在 CUDA，那么 PyTorch 可能会退回到逐个张量的 for 循环实现，因为 foreach 实现目前主要优化了 CUDA 上的操作。
    若要强制使用 for 循环实现，可将 foreach 或 fused 设置为 False,  fused=True 会优先于 foreach

    capturable=False 和 differentiable=False 用于控制优化器是否可以被捕捉到,计算图的管理（captured）或是否可以被微分（differentiated）。在某些情况下，你可能希望优化器不被捕捉到或不被微分，例如在某些特定的模型或训练过程中,
    元学习（Meta-Learning）：在元学习中，训练过程不仅涉及计算模型参数的梯度，还可能需要计算优化过程本身的梯度。因此，元学习通常需要 differentiable=True，以允许对优化步骤求导。
    '''
    if args.optim == 'adam':
        print('adam')
        if args.foreach == 'foreach':
            print('foreach')
            optimizer = optim.Adam(model.parameters(),lr=args.lr,weight_decay=args.weight_decay,foreach=True)
        elif args.foreach == 'fused':
            print('fused')
            optimizer = optim.Adam(model.parameters(),lr=args.lr,weight_decay=args.weight_decay,fused=True)
        elif args.foreach == 'for':
            print('for')
            optimizer = optim.Adam(model.parameters(),lr=args.lr,weight_decay=args.weight_decay,foreach=False,fused=False)
        else:
            optimizer = optim.Adam(model.parameters(),lr=args.lr,weight_decay=args.weight_decay)    
    elif args.optim == 'sgd':
        optimizer = optim.SGD(model.parameters(),lr=args.lr,momentum=args.momentum,weight_decay=args.weight_decay)
     

    train_loader,test_loader = load_dataset(batch_size)


    timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
    work_dir = f"{args.work_dir}_{timestamp}"
    writer = SummaryWriter(work_dir)
    

    time_start = time.time()                
    for epochs in range(num_epochs):
        train_model(model,device,train_loader,optimizer,criterion,epochs,writer)
        test_model(model,device,test_loader,criterion,epochs,writer)

    time_stop = time.time() #查看一下使用 foreach 或 fused以及for的区别
    print(f"Total time: {time_stop - time_start:.2f} seconds")
    writer.add_scalar('train time',f'{time_stop - time_start:.2f}')
    writer.close()
    
    

   


if __name__=='__main__':
    main()
                          