# 1. 准备数据（分为两部分.tensor--->dataset---->dataloader）
import torch
from torch import nn
from torch.nn import functional as F
from torch.utils.data import Dataset
from torch.utils.data import DataLoader
import cv2
from matplotlib import pyplot as plt
import numpy as np
#from PIL import Image
class MyDataset(Dataset):
    def __init__(self,anno_info=None):
        self.anno_info = anno_info
        self.images    = self._read()
    def _read(self):
        with open(file=self.anno_info,mode="r",encoding="utf-8") as f:
            images = []
            for line in f.readlines():
                l_line = line.strip().split(",")
                #[图像全路径,标签]
                images.append(l_line)
                #【[图像全路径, 标签]，[图像全路径,标签]，。。】
            return images
    def __getitem__(self, idx):
        img_path,label =self.images[idx]
        img_path = img_path.replace("\\","/")
        image = plt.imread(fname=img_path)
        image = cv2.resize(src=image,dsize=(32,32))
        #print(image.shape)
        image = image.mean(axis=-1)
        #print(image.shape)

        image = image[np.newaxis,:,:]
        #print(image.shape)

        #C H W
        return torch.tensor(data=image).float(),torch.tensor(data=int(label)).long()
    def __len__(self):
        return len(self.images)

train_dataset = MyDataset(anno_info="./train.csv")
test_dataset = MyDataset(anno_info="./test.csv")
train_dataloader = DataLoader(dataset=train_dataset,batch_size=32,shuffle=True,drop_last=True)
test_dataloader = DataLoader(dataset=test_dataset,batch_size=32)

# 2. 准备模型
from 手写lenet import Lenet
#创建模型类实例对象
lenet = Lenet()

# 3. 模型训练
loss_fn = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(params=lenet.parameters(),lr=1e-3)

#为什么训练过程中，既要看train数据的表现，还需要同时看test数据的表现？
#欠拟合（继续训练，模型复杂化，特征复杂化）
#过拟合（模型简化，数据量够大，数据类型复杂化（数据增强））
# 评估函数。回归：mse mse
def get_loss(dataloader,model=lenet,loss_fn=loss_fn):
    model.eval()
    losses = []
    with torch.no_grad():
        for x,y in dataloader:

            y_pred = model(x)
            loss = loss_fn(y_pred,y)
            losses.append(loss)
        #return sum(losses)/len(losses)
        return np.array(losses).mean()

def get_acc(dataloader,model=lenet,loss_fn=loss_fn):
    model.eval()
    acces = []
    with torch.no_grad():
        for x,y in dataloader:
            y_pred = model(x)
            y_pred = y_pred.argmax(dim=-1)
            acc = (y_pred==y).float().mean()
            acces.append(acc.item())
        return (np.array(acces).mean())


# 训练函数
def train(model=lenet,dataloader=train_dataloader,optimizer=optimizer,loss_fn = loss_fn,epochs=1):
    #print(f"当前是初始状态,训练集误差为：{get_loss(dataloader=train_dataloader)},测试集误差为：{get_loss(dataloader=test_dataloader)}")
    print(f"当前是初始状态,训练集acc为：{get_acc(dataloader=train_dataloader)},测试集acc为：{get_acc(dataloader=test_dataloader)}")

    for epoch in range(epochs):
        model.train()
        for x,y in train_dataloader:
            #1. 正向传播
            y_pred = model(x)
            #2。算损失
            loss = loss_fn(y_pred,y)
            #3。 梯度清0
            optimizer.zero_grad()
            #4。 反向求梯度
            loss.backward()
            #5。 w-梯度
            optimizer.step()
        #print(f"当前是第{epoch}轮,训练集误差为：{get_loss(dataloader=train_dataloader)},测试集误差为：{get_loss(dataloader=test_dataloader)}")
        print(f"当前是第{epoch}轮,训练集acc为：{get_acc(dataloader=train_dataloader)},测试集acc为：{get_acc(dataloader=test_dataloader)}")
train()
# 5。预测
def predict(model=lenet,img_path="./datasets/MNIST/test/9/1.jpeg"):
    model.eval()
    img = plt.imread(fname=img_path)
    img = img.mean(axis=-1)
    img = cv2.resize(src=img,dsize=(32,32))
    #bchw
    img = img[np.newaxis,np.newaxis,:,:]
    img = torch.tensor(data=img).float()
    with torch.no_grad():
        y_predict = model(img)
        print("开始预测！！！！")
        y_predict = y_predict.argmax(dim=1)
        print("预测结果是：",y_predict)

predict()