import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms
import copy
import os
from skimage.io import imread
from PIL import Image

# 色差先验信息
LAB_priors = [
    [93.69,-0.55,-3.34],
    [92.22,-0.63,-3.39],
    [94.31,0.24,1.09],
    [98.58,0.09,-0.25],
    [97.66,0.12,-0.22],
    [92.57,-0.57,1.3],
    [93.98,-1.37,-2.61]
]
# 前缀名称到整数索引的映射字典
prefix_to_index = {
    "8002色用":0,
    "8002a色用":1,
    "9003色用":2,
    "白象牙1003":3,
    "枫171a抽":4,
    "雅登":5,
    "中抽":6
}
class CustomDataset(Dataset):
    def __init__(self, data_dir, lab_priors, prefix_to_index, transform=None):
        self.data_dir = os.path.join(data_dir, 'cropped_images')
        self.lab_priors = lab_priors
        self.prefix_to_index = prefix_to_index
        self.transform = transform if transform else transforms.Compose([
            transforms.ToTensor(),
            transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
        ])
        self.image_files = [f for f in os.listdir(self.data_dir) if f.endswith('.png')]

    def __len__(self):
        return len(self.image_files)

    def __getitem__(self, idx):
        img_name = self.image_files[idx]
        img_path = os.path.join(self.data_dir, img_name)
        image = Image.open(img_path).convert('RGB')

        # 解析图像文件名前缀来获取对应的LAB先验信息
        prefix = img_name.split('-')[0]  # 假设文件名格式为 "前缀-编号.png"
        if prefix in self.prefix_to_index:
            index = self.prefix_to_index[prefix]
            lab_values = torch.tensor(self.lab_priors[index], dtype=torch.float32)
        else:
            raise ValueError(f"未找到前缀 '{prefix}' 的色差先验信息。")

        if self.transform:
            image = self.transform(image)

        return image, lab_values


class MultiHeadSelfAttention(nn.Module):
    def __init__(self, embed_size, heads):
        super(MultiHeadSelfAttention, self).__init__()
        self.embed_size = embed_size
        self.heads = heads
        self.head_dim = embed_size // heads

        assert (
            self.head_dim * heads == embed_size
        ), "Embed size needs to be divisible by heads"

        self.values = nn.Linear(self.head_dim, self.head_dim, bias=False)
        self.keys = nn.Linear(self.head_dim, self.head_dim, bias=False)
        self.queries = nn.Linear(self.head_dim, self.head_dim, bias=False)
        self.fc_out = nn.Linear(heads * self.head_dim, embed_size)

    def forward(self, values, keys, query, mask):
        N = query.shape[0]
        value_len, key_len, query_len = values.shape[1], keys.shape[1], query.shape[1]

        # Split the embedding into self.heads pieces
        values = values.reshape(N, value_len, self.heads, self.head_dim)
        keys = keys.reshape(N, key_len, self.heads, self.head_dim)
        queries = query.reshape(N, query_len, self.heads, self.head_dim)

        values = self.values(values)
        keys = self.keys(keys)
        queries = self.queries(queries)

        # 基于点积的注意力机制
        energy = torch.einsum("nqhd,nkhd->nhqk", [queries, keys])

        if mask is not None:
            energy = energy.masked_fill(mask == 0, float("-1e20"))

        attention = torch.softmax(energy / (self.embed_size ** (1 / 2)), dim=3)

        out = torch.einsum("nhql,nlhd->nqhd", [attention, values]).reshape(
            N, query_len, self.heads * self.head_dim
        )

        out = self.fc_out(out)
        return out

class ConvBlock(nn.Module):
    def __init__(self, in_channels, out_channels, kernel_size, padding):
        super(ConvBlock, self).__init__()
        self.conv = nn.Conv2d(in_channels, out_channels, kernel_size, padding=padding)
        self.activation = nn.ReLU(inplace=True)

    def forward(self, x):
        return self.activation(self.conv(x))


class RGBtoLABModel(nn.Module):
    def __init__(self):
        super(RGBtoLABModel, self).__init__()
        # 定义卷积块，提供kernel_size和padding参数
        self.conv1 = ConvBlock(3, 16, kernel_size=3, padding=1)
        self.conv2 = ConvBlock(16, 32, kernel_size=3, padding=1)
        self.conv3 = ConvBlock(32, 64, kernel_size=3, padding=1)
        self.pool = nn.MaxPool2d(2, 2)  # 池化层，下采样
        
        # 定义多头自注意力机制
        self.attention = MultiHeadSelfAttention(64 * 4 * 4, 8)  # 假设特征图大小缩减至8x8

        # 定义全连接层，将特征图展平后映射到LAB颜色空间的三个通道
        self.fc = nn.Linear(64 * 4 * 4, 3)  # 输出L, A, B值

    def forward(self, x):
        # 通过卷积层和池化层
        x = self.pool(F.relu(self.conv1(x)))
        x = self.pool(F.relu(self.conv2(x)))
        x = self.pool(F.relu(self.conv3(x)))
        
        # 展平特征图以适应自注意力机制
        # print(f"x.size:{x.size()}")
        x = x.view(x.size(0), -1, 1)  # 展平特征图
        x = x.permute(0, 2, 1)  # 调整维度以匹配自注意力的输入要求 [batch_size, embed_size, seq_length]
        x = self.attention(x, x, x, None)  # 注意力层，Q, K, V相同
        
        # 再次展平以匹配全连接层的输入
        x = x.view(x.size(0), -1)
        x = self.fc(x)
        return x

# 定义损失函数
def loss_function(predicted_lab, true_lab):
    mse_loss = nn.MSELoss()
    return mse_loss(predicted_lab, true_lab)

# 主程序
if __name__ == "__main__":
    # 假设我们有模型、优化器和数据加载器
    model = RGBtoLABModel()
    optimizer = torch.optim.Adam(model.parameters(), lr=0.001)
    criterion = loss_function

    data_dir = './data'
    dataset = CustomDataset(data_dir, LAB_priors, prefix_to_index)
    dataloader = DataLoader(dataset, batch_size=10, shuffle=True)
    
    best_loss = float('inf')
    best_model_params = copy.deepcopy(model.state_dict())
    # 训练循环
    num_epochs = 150
    for epoch in range(num_epochs):
        model.train()
        running_loss = 0.0
        for rgb_images, lab_images in dataloader:
            optimizer.zero_grad()
            outputs = model(rgb_images)
            loss = criterion(outputs, lab_images)
            loss.backward()
            optimizer.step()
            running_loss += loss.item()
        # 计算平均loss
        epoch_loss = running_loss/len(dataloader)
        print(f'Epoch [{epoch+1}/{num_epochs}], Loss: {loss.item()}')

        # 检查是否是最低loss，如果是，则保存模型
        if epoch_loss < best_loss:
            best_loss = epoch_loss
            best_model_params = copy.deepcopy(model.state_dict())
            print(f"New best loss: {best_loss}, saving model...")
        
# 保存具有最低loss的模型参数
torch.save(best_model_params, './model/model_best.pth')
print(f"Model with the best loss {best_loss} saved as model_best.pth")