import os
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import Dataset, DataLoader
from skimage import color
from skimage.io import imread
from torchvision import transforms

# 色差先验信息
LAB_priors = [
    [93.69,-0.55,-3.34],
    [92.22,-0.63,-3.39],
    [94.31,0.24,1.09],
    [98.58,0.09,-0.25],
    [97.66,0.12,-0.22],
    [92.57,-0.57,1.3],
    [93.98,-1.37,-2.61]
]
# 前缀名称到整数索引的映射字典
prefix_to_index = {
    "8002色用":0,
    "8002a色用":1,
    "9003色用":2,
    "白象牙1003":3,
    "枫171a抽":4,
    "雅登":5,
    "中抽":6
}

class CustomDataset(Dataset):
    def __init__(self, data_dir, lab_priors, prefix_to_index):
        self.data_dir = data_dir
        self.lab_priors = lab_priors
        self.prefix_to_index = prefix_to_index
        self.rgb_files = [f for f in os.listdir(data_dir) if f.endswith('.png')]

    def __len__(self):
        return len(self.rgb_files)

    def __getitem__(self, idx):
        rgb_file = self.rgb_files[idx]
        prefix = '-'.join(rgb_file.split('-')[:-1])
        index = self.prefix_to_index.get(prefix, 0)
        lab_values = torch.tensor(self.lab_priors[index], dtype=torch.float32).view(1, 3)

        rgb_path = os.path.join(self.data_dir, rgb_file)
        rgb_image = imread(rgb_path)

        return rgb_image, lab_values



# 定义多头自注意力模块
class MultiHeadSelfAttention(nn.Module):
    def __init__(self, in_channels, num_heads):
        super(MultiHeadSelfAttention, self).__init__()
        self.num_heads = num_heads
        self.in_channels = in_channels
        self.attention_heads = nn.ModuleList([nn.Linear(in_channels, in_channels) for _ in range(num_heads)])
        self.out_linear = nn.Linear(in_channels, in_channels)

    def forward(self, x):
        batch_size, seq_length, features = x.size()
        head_outputs = []
        for attention_head in self.attention_heads:
            head_output = attention_head(x)
            head_outputs.append(head_output)
        combined_heads = torch.cat(head_outputs, dim=-1)
        output = self.out_linear(combined_heads)
        return output

# 定义金字塔池化模块
class PyramidPooling(nn.Module):
    def __init__(self, in_channels, pool_sizes):
        super(PyramidPooling, self).__init__()
        self.pools = nn.ModuleList([nn.AdaptiveAvgPool2d(size) for size in pool_sizes])
        self.convs = nn.ModuleList([nn.Conv2d(in_channels, in_channels, 1) for _ in pool_sizes])
        self.final_conv = nn.Conv2d(in_channels * (len(pool_sizes) + 1), in_channels, 1)

    def forward(self, x):
        pool_outs = [x]
        for pool, conv in zip(self.pools, self.convs):
            pool_out = pool(x)
            pool_out = conv(pool_out)
            pool_outs.append(pool_out)
        output = torch.cat(pool_outs, dim=1)
        output = self.final_conv(output)
        return output

# 定义高光谱重建模型
class RGBtoLABModel(nn.Module):
    def __init__(self):
        super(RGBtoLABModel, self).__init__()
        self.conv1 = nn.Conv2d(3, 16, kernel_size=3, padding=1)
        self.conv2 = nn.Conv2d(16, 32, kernel_size=3, padding=1)
        self.multihead_self_attention = MultiHeadSelfAttention(32 * 8 * 8, 8)  # 假设特征图大小为8x8
        self.pyramid_pooling = PyramidPooling(32, [1, 2, 4])
        self.fc1 = nn.Linear(32 * 32, 3)  # 假设输入图像大小为32x32

    def forward(self, x):
        x = torch.relu(self.conv1(x))
        x = torch.relu(self.conv2(x))
        x = x.view(x.size(0), -1, 32 * 8 * 8)  # 调整形状以适应自注意力机制
        x = self.multihead_self_attention(x)
        x = x.view(x.size(0), 32, 8, 8)
        x = self.pyramid_pooling(x)
        x = torch.flatten(x, 1)
        x = self.fc1(x)
        return x

# 读取RGB图像并转换为LAB颜色空间
def rgb_to_lab(image_path):
    rgb_image = imread(image_path)
    lab_image = color.rgb2lab(rgb_image)
    return lab_image

# 定义损失函数
def loss_function(predicted_lab, true_lab):
    mse_loss = nn.MSELoss()
    return mse_loss(predicted_lab, true_lab)

# 主程序
if __name__ == "__main__":
    data_dir = "/home/gf/work/lyx/mstpp/rgb2lab/data/cropped_images"
    dataset = CustomDataset(data_dir, LAB_priors, prefix_to_index)
    dataloader = DataLoader(dataset, batch_size=2, shuffle=True)

    model = RGBtoLABModel()
    optimizer = torch.optim.Adam(model.parameters(), lr=0.001)
    criterion = nn.MSELoss()

    num_epochs = 50

    for epoch in range(num_epochs):
        for rgb_images, lab_images in dataloader:
            optimizer.zero_grad()
            rgb_images = rgb_images.squeeze(1)
            outputs = model(rgb_images)
            loss = criterion(outputs, lab_images)
            loss.backward()
            optimizer.step()
        print(f'Epoch [{epoch+1}/{num_epochs}], Loss: {loss.item()}')