# 1.帮助命令 dir() help() print() print(type())
import torch
# print(torch.cuda.is_available())

# 2.PyCharm和Jupyter

# 3.Dataset数据集
'''
__init__
__getitem__
__len__
'''

# 4.TensorBoard看板
'''
pip install tensorboard
'''
# from torch.utils.tensorboard import SummaryWriter
# writer = SummaryWriter("logs")
# for i in range(100):
#   writer.add_scalar("y=x", i, i)
# writer.close()



# 5.transforms图片转换
'''
ToTensor
Normalize 加快梯度下降速度
resize
Compose
RandomCrop随机裁剪
'''

# 6.torchvision
# import torchvision
# train_set = torchvision.datasets.CIFAR10(root="./dataset", train=True, download=True)
# test_set = torchvision.datasets.CIFAR10(root="./dataset", train=False, download=True)

# img, target = train_set[0]

# print(img, target, train_set.classes[target])
# img.show()

# 7.Dataloader
# import torchvision
# test_set = torchvision.datasets.CIFAR10(root="./dataset", train=False, transform=torchvision.transforms.ToTensor())
# from torch.utils.data import DataLoader
# test_loader = DataLoader(dataset=test_set, batch_size=4, shuffle=True, num_workers=0, drop_last=False)


# 8.nn.Module

# 9.卷积操作

# 10.卷积层
'''
卷积作用:
提取数据特征
效果:图片颜色加深

Conv2d(in_channel=3, out_channel=6, kernel_size=3, stride=1, padding=0, dilation=0)
dilation间隔

工具:
reshape() 变换尺寸
# torch.reshape(input, (-1, 1, 2, 2))
'''

# 11.池化层
'''
池化作用:
压缩数据特征,减少数据量
效果:图片马赛克

MaxPool2d(kernel_size=3, stride=1, padding=0, dilation=0, return_indices=True, ceil_model=True)
去卷积核范围内最大值
ceil_model 向上取整
'''

# 12.非线性激活
'''
作用:为神经网络引入非线性特征

ReLu(input, inplace=False)
作用:小于0的替换为0
inplace是否替换input原值

Sigmoid()
作用:Sigmoid公式计算
效果:灰度变换,压缩灰度范围
'''

# 13.线性层
'''
Linear
'''

# 14.Sequenntial
'''
序列
nn.Sequenntial(卷积,池化...)
'''

# 15.损失函数和反向传播
'''
L1Loss()
MSELoss() 平方差 解决分类问题
'''

# 16.优化器
'''
optim.zero_grad() 梯度归零
optim.step() 优化参数
'''

# 17.模型保存和加载
'''
torch.save(model, 'model.pth')
model_load1 = torh.load('model.pth')

torch.save(model.state_dict, 'model_state.pth')
model_load2 = model.load_state_dict(torh.load('model_state.pth'))
'''

# 18.完整模型训练套路
import torchvision
from torch import nn
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter

# 准备数据集
train_data = torchvision.datasets.CIFAR10(root="./dataset",train=True,transform=torchvision.transforms.ToTensor(),download=True)
test_data = torchvision.datasets.CIFAR10(root="./dataset",train=False,transform=torchvision.transforms.ToTensor(),download=True)

# 查看数据集长度
train_data_size = len(train_data)
test_data_size = len(test_data)
print('train_data_size: {}'.format(train_data_size))
print('test_data_size: {}'.format(test_data_size))

# 使用 DataLoader 加载数据集
train_dataloader = DataLoader(train_data, batch_size=64)
test_dataLoader = DataLoader(test_data, batch_size=64)

# 搭建神经网络
class Model(nn.Module):
  def __init__(self):
    super(Model,self).__init__()
    self.model = nn.Sequential(
      nn.Conv2d(3, 32, 5, 1, 2),
      nn.MaxPool2d(2),
      nn.Conv2d(32, 32, 5, 1, 2),
      nn.MaxPool2d(2),
      nn.Conv2d(32, 64, 5, 1, 2),
      nn.MaxPool2d(2),
      nn.Flatten(),
      nn.Linear(1024, 64),
      nn.Linear(64, 10)
    )
  
  def forward(self, x):
    x = self.model(x)
    return x
  
# 创建网络模型
model = Model()

# 损失函数
loss_fn = nn.CrossEntropyLoss() 

# 优化器
learning_rate = 1e-2
optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate)

# 设置训练参数
total_train_step, total_test_step = 0, 0
epoch = 10

writer = SummaryWriter("./logs/log_train")

for i in range(epoch):
  print("---第{}轮训练开始---".format(i+1))

  # 训练步骤
  model.train() #注1
  for data in train_dataloader:
    imgs, targets = data
    outputs = model(imgs)
    loss = loss_fn(outputs, targets)
    # 优化器
    optimizer.zero_grad()
    loss.backward()
    optimizer.step()
    total_train_step = total_train_step + 1
    if total_train_step % 100 == 0:
      print("训练次数：{}, loss: {}".format(total_train_step, loss.item()))
      writer.add_scalar("train_loss", loss.item(), total_train_step)
  
  # 测试步骤
  model.eval() #注2
  total_test_loss = 0
  total_accuracy = 0
  with torch.no_grad():
    for data in test_dataLoader:
      imgs, targets = data
      outputs = model(imgs)
      loss = loss_fn(outputs, targets)
      total_test_loss = total_test_loss + loss.item()
      accuracy = (outputs.argmax(1) == targets).sum()
      total_accuracy = total_accuracy + accuracy
  total_test_step = total_test_step + 1
  print("整体测试集上的loss: {}".format(total_test_loss))
  print("整体测试集上的正确率：{}".format(total_accuracy/test_data_size))
  writer.add_scalar("test_loss", total_test_loss, total_test_step)
  writer.add_scalar("test_accuracy", total_accuracy/test_data_size, total_test_step)

  # 保存模型
  torch.save(model, "./saveModels/model_{}.pth".format(i))
  # torch.save(model.state_dict, './saveModels/model_{}.pth'.format(i))
  print('模型已保存')

writer.close()
'''
注1,注2：特定情况下有效————网络模型层有Dropout,BatchNorm等
'''



# GPU训练
'''
# google colab
方式一：
模型，损失函数，训练测试过程中的数据调用cuda()
if torch.cuda.is_available()
  model = model.cuda()
  loss_fn = loss_fn.cuda()
  imgs =imgs.cuda() 
  targets = targets.cuda()

方式二：(常用)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model.to(device)
loss_fn.to(device)
imgs = imgs.to(device) 
targets = targets.to(device)
'''


# 完整的模型验证
'''
test.py
引入图片
引入训练好的模型
'''

