﻿import torch
from torch.utils.data import Dataset    # Dataset读取单个样本的抽象类，包括读取样本的标签、样本的索引、样本的特征
from torch.utils.data import DataLoader   # 数据加载器，组合数据集和采样器，并在数据集上提供单进程或多进程迭代器
from torchvision import datasets,transforms    # torchvision是PyTorch中专门用于图像处理的库,datasets是其中的数据集模块,transforms是其中的数据预处理模块
from torchvision.transforms import ToTensor

from torch.nn import functional as F
from torch import nn, optim

import matplotlib.pyplot as plt


from torch.utils.tensorboard import SummaryWriter


import argparse #参数解释器


class MLP_3(nn.Module):
    def __init__(self):
        super(MLP_3,self).__init__()
        self.fc1 = nn.Linear(32*32*3, 1024)
        self.fc2 = nn.Linear(1024, 256 )
        self.fc3 = nn.Linear(256 , 10)
    
    def forward(self,x):
        x =x.view(-1,32*32*3)
        x = F.relu(self.fc1(x))
        x = F.relu(self.fc2(x))
        x = self.fc3(x)
        return x


def load_dataset(batch_size):
    transform = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))    # 标准化,(r,g,b)均值为0.5，方差为0.5，即将数据压缩到[-1,1]之间
    ])
    
    
    #从torchversion加载自带数据集
    training_data = datasets.CIFAR10(
    root="../data",    # 数据集存放路径
    train=True,    # 是否为训练集
    download=True,    # 是否下载
    transform=transform,    # 数据转换方式
    )

    train_dataloader = DataLoader(training_data, batch_size=batch_size, shuffle=True)    # DataLoader是一个迭代器，每次返回一个batch的数据

    test_data = datasets.CIFAR10(
        root="../data",
        train=False,
        download=True,
        transform=transform
    )

    test_dataloader = DataLoader(test_data, batch_size=batch_size, shuffle=False)  
    return train_dataloader, test_dataloader

def train_model(model,device,train_loader,optimizer,criterion,epochs,writer):
    model.train()
    correct =0
    total = 0
    total_loss =0
    for batch_idx, (data, target) in enumerate(train_loader):
        data,target =data.to(device),target.to(device)
        optimizer.zero_grad()
        output = model(data)
        loss = criterion(output,target)
        loss.backward()
        optimizer.step()
        total_loss += loss.item()

        #计算准确率
        pred =output.argmax(dim=1,keepdim=True)
        correct +=pred.eq(target.view_as(pred)).sum().item()
        total +=target.size(0)


        accuracy = 100.*correct/total
        Avatage_loss = total_loss/len(train_loader)
        if batch_idx % 100 == 0:
            print(f"Epoch{epochs} Avatage_loss: {Avatage_loss:.4f} ,accuravy:{accuracy:.2f}%")
        
        writer.add_scalar('training loss', Avatage_loss, epochs)
        writer.add_scalar('training accuracy', accuracy, epochs)


def test_model(model,device,test_loader,criterion,epochs,writer):
    model.eval()
    correct =0
    total = 0
    total_loss =0
    with torch.no_grad():
        for data,target in test_loader:
            data,target =data.to(device),target.to(device)
            output = model(data)
            loss = criterion(output,target)
            total_loss += loss.item()

            #计算准确率
            pred =output.argmax(dim=1,keepdim=True)
            correct +=pred.eq(target.view_as(pred)).sum().item()
            total +=target.size(0)


        accuracy = 100.*correct/total
        Avatage_loss = total_loss/len(test_loader)
        print(f"Epoch{epochs} test_Avatage_loss: {Avatage_loss:.4f} ,test_accuravy:{accuracy:.2f}%")
        
        writer.add_scalar('test loss', Avatage_loss, epochs)
        writer.add_scalar('test accuracy', accuracy, epochs)

def main():
    #参数解释器实例化
    args = argparse.ArgumentParser(add_help=False)
    args.add_argument('--batch_size',type=int,default=128,help='input batch size for training (default: 128)')
    args.add_argument('--num_epochs',type=int,default=10,help='number of epochs to train (default: 10)')
    args.add_argument('--device',type=str,default='cuda',help='device to train (default: cuda)')
    args.add_argument('--lr',type=float,default=0.001,help='learning rate (default: 0.001)')
    args.add_argument('--work_dir',type=str,default='runs/CIFAR10_experiment_1',help='training directory (default: runs/CIFAR10_experiment_1)')
    args.add_argument('--seed',type=int,default=123,help='random seed (default: 123)')

    #参数解析
    args = args.parse_args()
    #超参数
    device = args.device
    batch_size = args.batch_size
    num_epochs = args.num_epochs
    torch.manual_seed(args.seed)


    print(f'num_epochs:{num_epochs} ')
    print('device:',device)
    criterion = nn.CrossEntropyLoss()
    model = MLP_3().to(device)
    optimizer = optim.Adam(model.parameters(),lr=args.lr)
    train_loader,test_loader = load_dataset(batch_size)

    writer = SummaryWriter(args.work_dir)
    for epochs in range(num_epochs):
        train_model(model,device,train_loader,optimizer,criterion,epochs,writer)
        test_model(model,device,test_loader,criterion,epochs,writer)

    writer.close()
    

   


if __name__=='__main__':
    main()
                          