from torchvision.datasets import ImageNet,CIFAR100
import torch
import torchvision
import torchvision.transforms as transforms
import torch.nn as nn
from torch.utils.data.dataset import Dataset
import torch.optim as optim
import numpy as np

from darknet import Darknet

transform = transforms.Compose([
    transforms.Resize(size=(32,32)), # MLP神经网络的输入层必须固定大小
    transforms.ToTensor(),
])
# 使用torchvision的数据集
root = './dataset/'
train_data = CIFAR100(
    root, train=True, transform=transform, target_transform=None, download=True)
test_data = CIFAR100(
    root, train=False, transform=transform, target_transform=None, download=True)

# 将Dataset对象转化为可迭代的Dataloader对象
train_loader = torch.utils.data.DataLoader(train_data, batch_size=128, shuffle=True, sampler=None,)

model = Darknet(in_chanel=3, use_fc=True, fc_out=100)

device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
#device = torch.device("cpu")
print("正在使用{}".format(device))
model = model.to(device)
optimizer = optim.Adam(model.parameters(), lr = 2e-4)
loss_fn = nn.NLLLoss()

def get_parameter_number(net):
    total_num = sum(p.numel() for p in net.parameters())
    trainable_num = sum(p.numel() for p in net.parameters() if p.requires_grad)
    return {'Total': total_num, 'Trainable': trainable_num}
print(get_parameter_number(model))


for epoch in range(3):
    # 记录每轮Epoch的损失和准确率
    total_loss = 0
    total_acc = 0
    total_data_length = len(train_data)
    cur_data_length = 0

    for i, data in enumerate(train_loader):
        # 取一个batch数据
        inputs, labels = data

        # 数据默认在cpu上，如果使用cuda，则需要迁移到cuda上
        inputs, labels = inputs.to(device), labels.to(device)

        # 清除上一次反向传播的梯度
        # Pytorch中每个Tensor都会保存自己上一次反向传播的梯度信息，每次更新参数前必须将过去的梯度清除，否则梯度会累加
        optimizer.zero_grad()

        # 进行一次前向传播
        outputs = model(inputs)

        # 使用前向传播结果计算loss，NLLLoss接受的labels参数不需要转化为one-hot编码
        loss = loss_fn(outputs, labels.view(-1).long())

        # 从loss开始反向传播
        loss.backward()

        # 使用optimizer更新参数，更新参数的规则在创建optimizer实例时指定
        optimizer.step()

        # 计算本次loss和acc
        total_acc += np.mean((torch.argmax(outputs, 1) == labels).cpu().numpy())
        total_loss += loss.item()

        cur_data_length += len(data[0])
        print("\rEpoch: {:d} batch: {:d} loss: {:.4f} acc: {:.4f} | {:.2%}"
              .format(epoch + 1, i + 1, total_loss / (i + 1), total_acc / (i + 1), cur_data_length / total_data_length),
              end='')
    print("")
