import os
import numpy as np
import paddle
from paddle.io import Dataset
from paddle.vision.datasets import DatasetFolder, ImageFolder
from paddle.vision.transforms import Compose, Resize, Transpose, ColorJitter, Normalize,RandomHorizontalFlip,RandomVerticalFlip,RandomRotation

class Garbages_Dataset(Dataset):
    """
    步骤一：继承paddle.io.Dataset类
    """
    def __init__(self, mode="Train"):
        """
        步骤二：实现构造函数，定义数据读取方式，划分训练和测试数据集
        """
        super(Garbages_Dataset, self).__init__()
        train_image_dir = '/home/aistudio/data/data89094/Garbages'
        test_image_dir = '/home/aistudio/data/data89094/Garbages'
        self.mode = mode
        transform = Compose([Resize(size=(256, 256)),RandomHorizontalFlip(0.5),Normalize(mean=[127.5, 127.5, 127.5],std=[127.5, 127.5, 127.5],data_format='HWC'), Transpose()])
        train_data_folder = DatasetFolder(train_image_dir, transform=transform)
        test_data_folder = ImageFolder(test_image_dir, transform=transform)
        if self.mode == "Train":
            self.data = train_data_folder
        elif self.mode == "Test":
            self.data = test_data_folder

    def __getitem__(self, index):
        """
        步骤三：实现__getitem__方法，定义指定index时如何获取数据，并返回单条数据（训练数据，对应的标签）
        """
        data = np.array(self.data[index][0]).astype('float32')
        if self.mode == "Train":
            label = np.array([self.data[index][1]]).astype('int64')
            return data, label
        elif self.mode == "Test":
            return data

    def __len__(self):
        """
        步骤四：实现__len__方法，返回数据集总数目
        """
        return len(self.data)

train_dataset = Garbages_Dataset(mode="Train")
test_dataset = Garbages_Dataset(mode="Test")


resnet152 = paddle.vision.models.resnet152(pretrained=True)
model = paddle.Model(resnet152)

# 为模型训练做准备，设置优化器，损失函数和精度计算方式
batch_size = 32
EPOCHS = 2
step_each_epoch = 4000 // batch_size
model.prepare(optimizer=paddle.optimizer.Adam(parameters=model.parameters(), learning_rate=paddle.optimizer.lr.CosineAnnealingDecay(learning_rate=0.000125, T_max=step_each_epoch * EPOCHS)),
              loss=paddle.nn.CrossEntropyLoss(),
              metrics=paddle.metric.Accuracy(topk=(1, 5)))

# 启动模型训练，指定训练数据集，设置训练轮次，设置每次数据集计算的批次大小，设置日志格式
model.fit(train_dataset,
          epochs=EPOCHS,
          batch_size=batch_size,
          verbose=1)

model.save("Model/Garbage", training=False)



