import os
import pandas as pd
from PIL import Image
import numpy as np
from mindspore import dataset as ds, dtype as mstype
from mindspore.dataset.transforms import Compose
from mindspore.dataset.vision.transforms import Resize, Normalize, HWC2CHW
from mindspore import nn, Model
from mindspore.nn import SoftmaxCrossEntropyWithLogits, Adam, Accuracy

# 数据集类定义
class CameraDeflectionDataset:
    def __init__(self, csv_file, img_dir):
        self.data_info = pd.read_csv(csv_file)
        self.img_dir = img_dir
        # Define transformations for the images
        self.transform = Compose([
            Resize((224, 224)),  # Adjust size according to your model's requirements
            Normalize(mean=[0.485 * 255, 0.456 * 255, 0.406 * 255],
                      std=[0.229 * 255, 0.224 * 255, 0.225 * 255]),
            HWC2CHW()
        ])

    def __getitem__(self, index):
        image_name = os.path.join(str(self.img_dir), str(self.data_info.iloc[index, 0])) + ".jpg"
        base_image_name = os.path.join(str(self.img_dir), str(self.data_info.iloc[index, 1])) + ".jpg"
        label = self.data_info.iloc[index, 2]

        image = Image.open(image_name).convert('RGB')
        base_image = Image.open(base_image_name).convert('RGB')

        if self.transform is not None:
            image = self.transform(image)
            base_image = self.transform(base_image)

        return (np.array(image), np.array(base_image)), int(label)

    def __len__(self):
        return len(self.data_info)

# 加载数据集
train_dataset = CameraDeflectionDataset(csv_file='label.csv', img_dir='image_save')
ds_loader = ds.GeneratorDataset(train_dataset, column_names=['image', 'label'], shuffle=True)
ds_loader = ds_loader.batch(batch_size=32)  # 调整batch size根据实际情况

# 简单的卷积神经网络模型定义
class SimpleCNN(nn.Cell):
    def __init__(self):
        super(SimpleCNN, self).__init__()
        self.conv1 = nn.Conv2d(3, 16, kernel_size=3, stride=1, padding=0)
        self.pool = nn.MaxPool2d(kernel_size=2, stride=2, padding=0)
        self.conv2 = nn.Conv2d(16, 32, kernel_size=3, stride=1, padding=0)
        self.gap = nn.AdaptiveAvgPool2d((1, 1))  # 全局平均池化层
        self.fc = nn.Dense(32, 2)  # 假设是二分类问题

    def construct(self, x):
        batch_size = x.shape[0]
        x = x.view(batch_size, 2, 3, 224, 224)  # 重塑输入
        x1 = x[:, 0, :, :, :]  # 取第一个图片
        x2 = x[:, 1, :, :, :]  # 取第二个图片

        x1 = self.pool(nn.ReLU()(self.conv1(x1)))
        x1 = self.pool(nn.ReLU()(self.conv2(x1)))
        x1 = self.gap(x1).view(x1.shape[0], -1)

        x2 = self.pool(nn.ReLU()(self.conv1(x2)))
        x2 = self.pool(nn.ReLU()(self.conv2(x2)))
        x2 = self.gap(x2).view(x2.shape[0], -1)

        x = x1 - x2  # 或者其他方式结合两张图片的信息
        
        x = self.fc(x)
        return x

# 实例化模型
net = SimpleCNN()

# 设置损失函数、优化器
loss = SoftmaxCrossEntropyWithLogits(sparse=True)
optimizer = Adam(net.trainable_params())

# 创建并运行模型进行训练
model = Model(net, loss_fn=loss, optimizer=optimizer, metrics={"accuracy": Accuracy()})

print("开始训练...")
model.train(epoch=100, train_dataset=ds_loader, callbacks=None)
# 加载测试集
test_dataset = CameraDeflectionDataset(csv_file='label_test.csv', img_dir='image_save')  # 注意文件名修正
ds_test_loader = ds.GeneratorDataset(test_dataset, column_names=['image', 'label'], shuffle=False)
ds_test_loader = ds_test_loader.batch(batch_size=1)  # 设置batch size为1以逐个处理样本

# 在测试集上评估模型，并记录预测结果
predictions = []
for data in ds_test_loader.create_dict_iterator():
    images, labels = data['image'], data['label']
    prediction = model.predict(images)
    predicted_class = np.argmax(prediction.asnumpy(), axis=1)[0]  # 获取预测类别
    predictions.append(predicted_class)

# 更新CSV文件
df = pd.read_csv('label_test.csv')  # 读取原始CSV文件
df['Prediction'] = predictions  # 添加新列作为预测结果
df.to_csv('label_test_with_predictions.csv', index=False)  # 将更新后的数据帧保存到新的CSV文件

print("预测完成，并已将结果写入到'label_test_with_predictions.csv'")