#!/usr/bin/env python
# -*- coding: utf-8 -*-
import torch.optim
import torchvision
from model import *

from torch import nn
from torch.utils.data import DataLoader

train_data = torchvision.datasets.CIFAR10(root='./dataset', train=True, transform=torchvision.transforms.ToTensor(), download=True)
test_data = torchvision.datasets.CIFAR10(root='./dataset', train=False, transform=torchvision.transforms.ToTensor(), download=True)

train_data_size = len(train_data)
test_data_size = len(test_data)

print("训练数据集的长度为: {}".format(train_data_size))
print("测试数据集的长度为: {}".format(test_data_size))

train_data_loader = DataLoader(train_data, batch_size=64, shuffle=True)
test_data_loader = DataLoader(test_data, batch_size=64, shuffle=False)

#创建模型
model = Modle()
#损失函数
loss_function = nn.CrossEntropyLoss()
#优化器优化
learning_rate = 1e-2
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)

#设置训练网络的一些参数
#记录训练的次数
total_train_step = 0
#记录测试的次数
total_test_step = 0
#训练的轮数
epoch = 20

#开始训练
for i in range(0,epoch):
    print('第{}轮训练开始'.format(i+1))
    #训练步骤开始
    for data in train_data_loader:
        imgs,target = data
        output = model(imgs)
        loss=loss_function(output,target)
        #优化器优化模型
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        total_train_step += 1

        print('训练次数: {}, Loss: {}'.format(total_train_step, loss.item()))
        if loss.item() < 0.1:
            print('训练结束')
            break

#保存模型
path='./model/model.pth'
print('保存模型:' + path)
torch.save(model.state_dict(), './model/model.pth')
