import os
import paddle
import paddle.nn as nn
import paddle.optimizer as optim
from paddle.static import InputSpec
from PIL import Image

# 设置GPU
device = paddle.device("cuda" if paddle.cuda.is_available() else "cpu")
# device = paddle.device("cpu")
print(device)

# 定义简单的CNN网络
class CNN(nn.Layer):
    def __init__(self, num_classes=10):
        super(CNN, self).__init__()
        self.conv1 = nn.Conv2D(1, 32, kernel_size=(3, 3), padding=1)
        self.relu1 = nn.ReLU()
        self.pool1 = nn.Pool2D(pool_size=2, pool_type='max', pool_stride=2)
        self.conv2 = nn.Conv2D(32, 64, kernel_size=(3, 3), padding=1)
        self.relu2 = nn.ReLU()
        self.pool2 = nn.Pool2D(pool_size=2, pool_type='max', pool_stride=2)
        self.flatten = nn.Flatten()
        self.fc1 = nn.Linear(64 * 16 * 10, 128)
        self.relu3 = nn.ReLU()
        self.fc2 = nn.Linear(128, num_classes)

    def forward(self, x):
        x = self.conv1(x)
        x = self.relu1(x)
        x = self.pool1(x)
        x = self.conv2(x)
        x = self.relu2(x)
        x = self.pool2(x)
        x = self.flatten(x)
        x = self.fc1(x)
        x = self.relu3(x)
        x = self.fc2(x)
        return x

# 设置数据加载器和模型
print("     --------------构建数据集--------------     ")
# 'D:\code\machineLearning\class\dataset'


# 数据加载
def load_data():
    # 在此处加载你的本地训练集数据，可以使用PaddlePaddle提供的数据加载工具或自己实现数据加载逻辑
    # 返回训练集的数据迭代器
    dataset = []
    for i in range(10):
        folder_path = os.path.join('D:\\code\\machineLearning\\class\\dataset', str(i))
        print(folder_path)
        files = os.listdir(folder_path)

        for file in files:
            file_name = file
            img_path = os.path.join(folder_path, file_name)
            image = Image.open(img_path)

            # 当前的i是标签，需要转换为one-hot编码，例如：0 -> [1, 0, 0, 0, 0, 0, 0, 0, 0, 0]
            labels = paddle.zeros(10)
            labels[i] = 1
            # labels = torch.tensor(labels, dtype=torch.long)
            dataset.append([image, labels])
    return dataset

# 定义训练函数
def train(model, train_loader, num_epochs=10, learning_rate=0.001):
    # 定义优化器和损失函数
    optimizer = optim.Adam(learning_rate=learning_rate, parameters=model.parameters())
    loss_fn = nn.CrossEntropyLoss()

    # 开始训练循环
    for epoch in range(num_epochs):
        for batch_data in train_loader():
            x_data, y_data = batch_data
            x_data = paddle.to_tensor(x_data)
            y_data = paddle.to_tensor(y_data)

            # 前向传播
            logits = model(x_data)

            # 计算损失
            loss = loss_fn(logits, y_data)

            # 反向传播和优化
            loss.backward()
            optimizer.step()
            optimizer.clear_grad()

        print(f"Epoch [{epoch + 1}/{num_epochs}], Loss: {loss.numpy()}")

if __name__ == "__main__":
    # 创建模型
    model = CNN()

    # 设置输入规格
    input_spec = InputSpec(shape=[-1, 1, 64, 40], dtype='float32')
    model = paddle.Model(model, input_spec=input_spec)

    # 加载数据
    train_loader = load_data()

    # 配置模型训练环境
    model.prepare(optimizer=optim.Adam(learning_rate=0.001),
                  loss=nn.CrossEntropyLoss(),
                  metrics=paddle.metric.Accuracy())

    # 启动模型训练
    model.fit(train_loader, epochs=10, batch_size=64, verbose=1)



