import argparse

from data import data_loader
from model.LeNet import LeNet
import torch
import torch.nn as nn
from torch.utils.tensorboard import SummaryWriter
from tqdm import tqdm

from utils.Metric import Metric

parser = argparse.ArgumentParser()
parser.add_argument('--epochs', type=int, default=10, help='number of epochs')
parser.add_argument('--batch_size', type=int, default=64, help='batch size')
parser.add_argument('--lr', type=float, default=0.001, help='learning rate')
parser.add_argument('--num_classes', type=int, default=10, help='number of classes')
parser.add_argument('--root', type=str, default='./data', help='path to dataset')
args = parser.parse_args()
print(args)

num_classes = args.num_classes
epochs = args.epochs
batch_size = args.batch_size
lr = args.lr
root = args.root

train_loader, val_loader, test_loader = data_loader(root,batch_size)
model = LeNet(num_classes)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)
optimizer = torch.optim.Adam(model.parameters(), lr=lr)
criterion = nn.CrossEntropyLoss()
writer = SummaryWriter()

for epoch in range(epochs):
    model.train()
    running_loss = 0.0
    correct_train = 0
    total_train = 0

    for images,labels in tqdm(train_loader,desc=f'Epoch {epoch + 1}/{epochs} [Train]'):
        images = images.to(device)
        labels = labels.to(device)

        # 前向传播
        outputs = model(images)
        loss = criterion(outputs, labels)

        # 反向传播和优化
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        running_loss = loss.item()

        # 记录训练集的预测和真实标签
        _, predicted = torch.max(outputs, 1)
        total_train += labels.size(0)
        correct_train += predicted.eq(labels).sum().item()
    train_accuracy = Metric(correct_train,total_train).accuracy()
    train_loss = running_loss / len(train_loader)
    writer.add_scalar('train_loss',train_loss,epochs)
    writer.add_scalar('train_accuracy',train_accuracy,epochs)
    print(f'[Epoch {epoch+1}/{epochs}]: Train Loss {train_loss:.4f}')

    model.eval()
    correct_val = 0
    total_val = 0
    val_running_loss = 0.0
    with torch.no_grad():
        for images,labels in tqdm(val_loader,desc=f'Epoch {epoch + 1}/{epochs} [Validation]'):
            images = images.to(device)
            labels = labels.to(device)
            outputs = model(images)
            loss = criterion(outputs, labels)
            val_running_loss = loss.item()
            _, predicted = torch.max(outputs, 1)
            total_val += labels.size(0)
            correct_val += predicted.eq(labels).sum().item()


    val_accuracy = Metric(correct_val,total_val).accuracy()
    val_loss = running_loss / len(val_loader)
    writer.add_scalar('val_loss',val_loss,epoch)
    writer.add_scalar('val_accuracy',val_accuracy,epoch)
    print(f'Validation Loss {val_loss:.4f} Validation Accuracy {val_accuracy:.4f}')

    correct_test = 0
    total_test = 0
    test_running_loss = 0.0
    with torch.no_grad():
        for images,labels in tqdm(test_loader,desc=f'Epoch {epoch + 1}/{epochs} [Test]'):
            images = images.to(device)
            labels = labels.to(device)
            outputs = model(images)
            loss = criterion(outputs, labels)
            test_running_loss = loss.item()
            _, predicted = torch.max(outputs, 1)
            total_test += labels.size(0)
            correct_test += predicted.eq(labels).sum().item()

    test_accuracy = Metric(correct_test,total_test).accuracy()
    test_loss = running_loss / len(test_loader)
    writer.add_scalar('test_loss',test_loss,epoch)
    writer.add_scalar('test_accuracy',test_accuracy,epoch)

    print(f'Test Loss {test_loss:.4f} Test Accuracy {test_accuracy:.4f}')

writer.close()






