﻿import torch
from torch.nn import functional as F
from torch import nn, optim
from torch.utils.tensorboard import SummaryWriter
from datetime import datetime


from model import *
from args import get_args
from data_load import load_dataset
import time


def train_model(model,device,train_loader,optimizer,criterion,epochs,writer):
    model.train()
    correct =0
    total = 0
    total_loss =0
    for batch_idx, (data, target) in enumerate(train_loader):
        data,target =data.to(device),target.to(device)
        optimizer.zero_grad()
        output = model(data)
        loss = criterion(output,target)
        loss.backward()
        optimizer.step()
        total_loss += loss.item()

        #计算准确率
        pred =output.argmax(dim=1,keepdim=True)
        correct +=pred.eq(target.view_as(pred)).sum().item()
        total +=target.size(0)


        accuracy = 100.*correct/total
        Avatage_loss = total_loss/len(train_loader)
        if batch_idx % 100 == 0:
            print(f"Epoch{epochs} Avatage_loss: {Avatage_loss:.4f} ,accuravy:{accuracy:.2f}%")
        
        writer.add_scalar('training loss', Avatage_loss, epochs)
        writer.add_scalar('training accuracy', accuracy, epochs)


def test_model(model,device,test_loader,criterion,epochs,writer):
    model.eval()
    correct =0
    total = 0
    total_loss =0
    with torch.no_grad():
        for data,target in test_loader:
            data,target =data.to(device),target.to(device)
            output = model(data)
            loss = criterion(output,target)
            total_loss += loss.item()

            #计算准确率
            pred =output.argmax(dim=1,keepdim=True)
            correct +=pred.eq(target.view_as(pred)).sum().item()
            total +=target.size(0)


        accuracy = 100.*correct/total
        Avarage_loss = total_loss/len(test_loader)
        print(f"Epoch{epochs} test_Avatage_loss: {Avarage_loss:.4f} ,test_accuracy:{accuracy:.2f}%")
        
        writer.add_scalar('test loss', Avarage_loss, epochs)
        writer.add_scalar('test accuracy', accuracy, epochs)


def main():
    
    #参数解析
    args = get_args()
    args = args.parse_args()
    print(args.work_dir)
    #超参数
    device = args.device
    batch_size = args.batch_size
    num_epochs = args.num_epochs
    torch.manual_seed(args.seed)


    print(f'num_epochs:{num_epochs} ')
    print('device:',device)
    criterion = nn.CrossEntropyLoss()
    # model = MLP_3().to(device)
    if args.model == 'AlexNet':
        model = get_AlexNet().to(device)

    elif args.model == 'MLP_6':
        model = get_MLP_6().to(device)

    elif args.model == 'LeNet6':
        model = get_LeNet6().to(device)
    else:
        raise ValueError(f'model is not defined:{args.model}')
    
    #使用多GPU
    # DEVICE_IDS = [0, 1, 2, 3]  # GPUs to use
    # model = torch.nn.parallel.DataParallel(model, device_ids=DEVICE_IDS)

    
    if args.L2 == 'True':
        optimizer = optim.Adam(model.parameters(),lr=args.lr,weight_decay=args.weight_decay)
    else:
        optimizer = optim.Adam(model.parameters(),lr=args.lr)


    train_loader,test_loader = load_dataset(batch_size)


    timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
    work_dir = f"{args.work_dir}_{timestamp}"
    writer = SummaryWriter(work_dir)
    
    start_time = time.time()
    for epochs in range(num_epochs):
        train_model(model,device,train_loader,optimizer,criterion,epochs,writer)
        test_model(model,device,test_loader,criterion,epochs,writer)
    stop_time = time.time()
    train_time = stop_time - start_time
    print(f"训练时间：{train_time}")
    writer.add_scalar('train_time', train_time)
    writer.close()
    

   


if __name__=='__main__':
    main()
                          