import os
import random

from  PIL import  Image
from torch.utils.data import DataLoader,Dataset
from torchvision import  transforms

#把训练集和测试集分为8:2
train_ratio = 0.8
test_ratio = 1 - train_ratio

rootdata = r'D:\niggger\niigger\pythonProject\dataset\train'

train_list, test_list = [], []

data_list = []

#图片的标签
class_flag = -1

'''
要取得该文件夹下的所有文件，可以使用 for(root,dirs,files) in walk(roots)函数
roots:代表需要便利的根文件夹
root: 表示正在遍历的文件夹的名字
dirs:记录正在遍历的文件夹中的文件
'''
for root, dirs, files in os.walk(rootdata):

    for i in range(len(files)):
        '''
        os.path.join()函数：连接两个或者更多的路径名组价你
        1.如果各组件首字母不包含'/'，则函数会自动加上
        2.如果一个组件是一个绝对路径，则在它之前的所有组件均会被舍弃
        3.如果最后一个组件为空，则成一个路径以一个'/'分隔符结尾

        root='/home/hsy/PycharmProjects/数据集/5月下旬/train/鱼腥草'
        files[i]='yuxingcao_1.jpg'

        os.path.join(root,files[i])='/home/hsy/PycharmProjects/数据集/5月下旬/train/鱼腥草/yuxingcao_1.jpg'
        '''

        data_list.append(os.path.join(root, files[i]))

    for i in range(0, int(len(files) * train_ratio)):
        train_data = os.path.join(root, files[i]) + '\t' + str(class_flag) + '\n'
        train_list.append(train_data)

    for i in range(int(len(files) * train_ratio), len(files)):
        test_data = os.path.join(root, files[i]) + '\t' + str(class_flag) + '\n'
        test_list.append(test_data)

    class_flag += 1

# print(train_list)

# 将数据打乱
random.shuffle(train_list)
random.shuffle(test_list)


# 保存到txt
with open('../dataset/train.txt', 'w', encoding='UTF-8') as f:
    for train in train_list:
        f.write(train)

with open('../dataset/test.txt', 'w', encoding='UTF-8') as f:
    for test in test_list:
        f.write(test)


print(test_list)


# 图像标准化
# transform_BN=transforms.Normalize((0.485,0.456,0.406),(0.226,0.224,0.225))


class LoadData(Dataset):


    def __init__(self, txt_path, train_flag=True):
        self.imgs_info = self.get_imags(txt_path)
        self.train_flag = train_flag

        self.transform_train = transforms.Compose([
            transforms.Resize((32, 32)),  # 先将图像调整到较大尺寸，以便后续裁剪
            transforms.CenterCrop(32),  # 从中心裁剪到224x224
            transforms.ToTensor(),
            transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
        ])

        self.transform_test = transforms.Compose([
            transforms.Resize((32, 32)),  # 测试集也做相同的处理
            transforms.CenterCrop(32),
            transforms.ToTensor(),
            transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
        ])

    def get_imags(self, txt_path):
        with open(txt_path, 'r', encoding='UTF-8') as f:
            imgs_info = f.readlines()
            imgs_info = list(map(lambda x: x.strip().split('\t'), imgs_info))

        return imgs_info

    def __getitem__(self, index):
        img_path, label = self.imgs_info[index]

        img = Image.open(img_path)

        img = img.convert("RGB")

        if self.train_flag:
            img = self.transform_train(img)
        else:
            img = self.transform_test(img)

        label = int(label)

        # 返回打开的图片和它的标签
        return img, label

    def __len__(self):
        return len(self.imgs_info)

train_txt_path = '../dataset/train.txt'
test_txt_path = '../dataset/test.txt'

train_dataset = LoadData(txt_path=train_txt_path, train_flag=True)
test_dataset = LoadData(txt_path=test_txt_path, train_flag=False)

# 创建DataLoader实例
batch_size = 64  # 你可以根据需要调整批大小
train_loader = DataLoader(dataset=train_dataset, batch_size=batch_size, shuffle=True)
test_loader = DataLoader(dataset=test_dataset, batch_size=batch_size, shuffle=False)

# 创建网络模型

# model.py
from torch import nn
import torch


# 搭建神经网络
class Tudui(nn.Module):
    def __init__(self):
        super(Tudui, self).__init__()
        self.model = nn.Sequential(
            nn.Conv2d(3, 32, 5, 1, padding=2),
            nn.MaxPool2d(2),
            nn.Conv2d(32, 32, 5, 1, padding=2),
            nn.MaxPool2d(2),
            nn.Conv2d(32, 64, 5, 1, padding=2),
            nn.MaxPool2d(2),
            nn.Flatten(),  # 展平后的序列长度为 64*4*4=1024
            nn.Linear(1024, 64),
            nn.Linear(64, 10)
        )

    def forward(self, x):
        x = self.model(x)
        return x

tudui = Tudui()

if __name__ == '__main__':
    tudui = Tudui()
    input = torch.ones((64, 3, 32, 32))
    output = tudui(input)
    print(output.shape)

# 损失函数
loss_fn = nn.CrossEntropyLoss()

# 优化器
learning_rate = 1e-2
optimizer = torch.optim.SGD(tudui.parameters(), lr=learning_rate)

# 设置训练网络的一些参数
# 记录训练的次数
total_train_step = 0

# 记录测试的次数
total_test_step = 0

# 训练的轮数
epoch = 10

for i in range(epoch):
    print("-------------第 {} 轮训练开始------------".format(i + 1))
    # 训练步骤开始
    for data in train_loader:
        imgs, targets = data
        output = tudui(imgs)
        loss = loss_fn(output, targets)

        # 优化器优化模型
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        total_train_step = total_train_step + 1
        if total_train_step % 100 == 0:
            print("训练次数：{}， Loss：{}".format(total_train_step, loss.item()))

    # 测试步骤开始
    total_test_loss = 0
    with torch.no_grad():
        for data in test_loader:
            imgs, targets = data
            outputs = tudui(imgs)
            loss = loss_fn(outputs, targets)
            total_test_loss = total_test_loss + loss.item()
    print("整体测试集上的Loss：{}".format(total_test_loss))

    total_test_step = total_test_step + 1

    torch.save(tudui, "tudui_{}.pth".format(i))
    print("模型已保存")

# 保存模型
# 注意：应该保存模型的state_dict
torch.save(tudui.state_dict(), "tudui_final.pth")