import torch
import torch.nn as nn
import torch.optim as optim
from torchvision import transforms
from PIL import Image
import numpy as np

# 检查是否有可用的 GPU
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

# 多头自注意力机制
class MultiHeadSelfAttention(nn.Module):
    def __init__(self, in_channels, num_heads):
        super(MultiHeadSelfAttention, self).__init__()
        self.num_heads = num_heads
        self.head_dim = in_channels // num_heads
        assert (
            self.head_dim * num_heads == in_channels
        ), "输入通道数必须能被头数整除"

        self.qkv_proj = nn.Conv2d(in_channels, 3 * in_channels, kernel_size=1)
        self.out_proj = nn.Conv2d(in_channels, in_channels, kernel_size=1)

    def forward(self, x):
        print(f"进入多头自注意力层，输入张量形状: {x.shape}")
        batch_size, channels, height, width = x.size()
        qkv = self.qkv_proj(x)
        q, k, v = qkv.chunk(3, dim=1)

        q = q.view(
            batch_size, self.num_heads, self.head_dim, height * width
        ).transpose(-2, -1)
        k = k.view(
            batch_size, self.num_heads, self.head_dim, height * width
        ).transpose(-2, -1)
        v = v.view(
            batch_size, self.num_heads, self.head_dim, height * width
        ).transpose(-2, -1)

        attn_scores = torch.matmul(q, k.transpose(-2, -1)) / (self.head_dim**0.5)
        attn_probs = torch.softmax(attn_scores, dim=-1)
        attn_output = torch.matmul(attn_probs, v)

        attn_output = attn_output.transpose(-2, -1).contiguous().view(
            batch_size, channels, height, width
        )
        print(f"离开多头自注意力层，输出张量形状: {attn_output.shape}")
        return self.out_proj(attn_output)

# 优化后的 ImageFusionNet
class ImageFusionNet(nn.Module):
    def __init__(self, in_channels=6, num_heads=4):
        super(ImageFusionNet, self).__init__()
        self.conv1 = nn.Conv2d(in_channels, 32, kernel_size=3, padding=1)
        self.relu = nn.ReLU()
        self.attention = MultiHeadSelfAttention(32, num_heads)
        self.conv2 = nn.Conv2d(32, 3, kernel_size=3, padding=1)

    def forward(self, x):
        # print(f"进入 ImageFusionNet 前向传播，输入张量形状: {x.shape}")
        x = self.relu(self.conv1(x))
        print(f"经过卷积层 1 后，张量形状: {x.shape}")
        x = self.attention(x)
        # print(f"经过多头自注意力层后，张量形状: {x.shape}")
        x = self.conv2(x)
        # print(f"离开 ImageFusionNet 前向传播，输出张量形状: {x.shape}")
        return x

# 读取图像并预处理
def load_image(image_path):
    print(f"开始加载图像: {image_path}")
    image = Image.open(image_path)
    transform = transforms.ToTensor()
    tensor_image = transform(image).unsqueeze(0)  # 添加批次维度
    tensor_image = tensor_image.to(device)  # 将数据移动到 GPU
    print(f"图像加载完成，张量形状: {tensor_image.shape}")
    return tensor_image

# 基于神经网络的图像融合
def horizontally_fuse_images(net, image1, image2):
    print(f"开始图像融合，image1 形状: {image1.shape}，image2 形状: {image2.shape}")
    # 拼接两个图像的通道
    input_tensor = torch.cat((image1, image2), dim=1)
    print(f"拼接后的输入张量形状: {input_tensor.shape}")
    output = net(input_tensor)
    print(f"图像融合完成，输出张量形状: {output.shape}")
    return output

# 主函数
def main():
    image_path1 = 'src31.jpg'  # 替换为你的图像路径
    image_path2 = 'src32.jpg'  # 替换为你的图像路径
    image1 = load_image(image_path1)
    image2 = load_image(image_path2)

    # 初始化神经网络
    net = ImageFusionNet().to(device)  # 将模型移动到 GPU
    criterion = nn.MSELoss()
    optimizer = optim.Adam(net.parameters(), lr=0.001)

    # 初始化最小损失值为无穷大
    min_loss = float('inf')

    # 简单训练网络
    for epoch in range(10):
        print(f"----  开始第 {epoch + 1} 个训练轮次")
        optimizer.zero_grad()
        fused_image = horizontally_fuse_images(net, image1, image2)
        target = torch.cat((image1, image2), dim=3)
        # 调整 fused_image 的尺寸以匹配 target
        if fused_image.size(3) != target.size(3):
            # 重复拼接 fused_image 以匹配 target 的宽度
            repeat_factor = target.size(3) // fused_image.size(3)
            fused_image = torch.cat([fused_image] * repeat_factor, dim=3)
            print(f"调整融合图像尺寸后，形状: {fused_image.shape}")
        loss = criterion(fused_image, target)
        print(f"第 {epoch + 1} 个训练轮次的损失值: {loss.item()}")

        # 检查当前损失是否小于最小损失
        if loss.item() < min_loss:
            min_loss = loss.item()
            # 保存最优网络参数
            torch.save(net.state_dict(), 'best_model.pth')
            print(f"第 {epoch + 1} 个训练轮次保存了最优模型，损失值: {min_loss}")

        loss.backward()
        optimizer.step()
        print(f"第 {epoch + 1} 个训练轮次结束")

    # 最终融合图像
    print("开始生成最终融合图像")
    final_fused_image = horizontally_fuse_images(net, image1, image2)
    # 同样调整最终融合结果的尺寸
    target = torch.cat((image1, image2), dim=3)
    if final_fused_image.size(3) != target.size(3):
        repeat_factor = target.size(3) // final_fused_image.size(3)
        final_fused_image = torch.cat([final_fused_image] * repeat_factor, dim=3)
        print(f"调整最终融合图像尺寸后，形状: {final_fused_image.shape}")
    final_fused_image = final_fused_image.squeeze(0).cpu().detach().numpy()  # 将数据移回 CPU
    final_fused_image = np.transpose(final_fused_image, (1, 2, 0))  # 调整维度顺序
    final_fused_image = (final_fused_image * 255).astype(np.uint8)
    final_fused_image = Image.fromarray(final_fused_image)
    final_fused_image.save('horizontally_fused_output.jpg')
    print("最终融合图像保存完成")

if __name__ == "__main__":
    main()
