"""
基于PyTorch-Lightning的对称增强图像融合算法实现
创建时间：2025-04-10 23:06
"""
from datetime import datetime
import cv2
import torch
import logging
import numpy as np
import matplotlib.pyplot as plt
import torch.nn as nn
import pytorch_lightning as pl
from torch.optim.lr_scheduler import CosineAnnealingLR
from pytorch_lightning import seed_everything
import  torch.nn.functional as F

# 初始化日志系统
logging.basicConfig(
    filename=f'train_{datetime.now().strftime("%Y%m%d")}.log',
    level=logging.INFO,
    format='%(asctime)s - %(levelname)s - %(message)s'
)

class SymmetricFusionGenerator:
    def __init__(self, image_path):
        import os
        if not os.path.exists(image_path):
            print(f"文件 {image_path} 不存在，请检查路径。")
            self.src = None
        else:
            try:
                self.src = cv2.cvtColor(cv2.imread(image_path), cv2.COLOR_BGR2RGB)
            except Exception as e:
                print(f"读取文件 {image_path} 时出错: {e}")
                self.src = None

    def generate_fusion(self):
        if self.src is None:
            return None
        h, w = self.src.shape[:2]
        upper = self.src[:h // 2, :, :]
        lower = self.src[h // 2:, :, :]
        flipped_vertical = np.vstack((lower, upper))
        left = flipped_vertical[:, :w // 2, :]
        right = flipped_vertical[:, w // 2:, :]
        return np.hstack((right, left))

class SelfAttentionModule(nn.Module):
    def __init__(self, in_channels, num_heads=4):
        super().__init__()
        self.num_heads = num_heads
        self.head_dim = in_channels // num_heads
        assert self.head_dim * num_heads == in_channels, "输入通道数必须能被头数整除"
        self.qkv_proj = nn.Conv2d(in_channels, 3 * in_channels, kernel_size=1)
        self.out_proj = nn.Conv2d(in_channels, in_channels, kernel_size=1)

    def forward(self, x):
        batch_size, channels, height, width = x.size()
        qkv = self.qkv_proj(x)
        q, k, v = qkv.chunk(3, dim=1)
        q = q.view(batch_size, self.num_heads, self.head_dim, height * width).transpose(-2, -1)
        k = k.view(batch_size, self.num_heads, self.head_dim, height * width).transpose(-2, -1)
        v = v.view(batch_size, self.num_heads, self.head_dim, height * width).transpose(-2, -1)
        attn_scores = torch.matmul(q, k.transpose(-2, -1)) / (self.head_dim**0.5)
        attn_probs = torch.softmax(attn_scores, dim=-1)
        attn_output = torch.matmul(attn_probs, v)
        attn_output = attn_output.transpose(-2, -1).contiguous().view(batch_size, channels, height, width)
        return self.out_proj(attn_output)

class FusionTransformer(pl.LightningModule):
    def __init__(self):
        super().__init__()
        self.encoder = nn.Sequential(
            nn.Conv2d(3, 64, 3, padding=1),
            nn.BatchNorm2d(64),
            nn.ReLU(),
            ResBlock(64),
            nn.MaxPool2d(2),
            SelfAttentionModule(64),
            nn.Conv2d(64, 128, 3, padding=1),
            nn.BatchNorm2d(128),
            nn.ReLU(),
            ResBlock(128),
            nn.MaxPool2d(2),
            SelfAttentionModule(128)
        )
        # 修改解码器结构
        self.decoder = nn.Sequential(
            nn.ConvTranspose2d(128, 64, kernel_size=3, stride=2, padding=1, output_padding=(1,0)),  # 调整output_padding
            nn.BatchNorm2d(64),
            nn.ReLU(),
            ResBlock(64),
            nn.ConvTranspose2d(64, 32, kernel_size=3, stride=2, padding=1, output_padding=(1,0)),  # 调整output_padding
            nn.BatchNorm2d(32),
            nn.ReLU(),
            ResBlock(32),
            nn.Conv2d(32, 3, kernel_size=3, padding=1),
            nn.Tanh()
        )

    def forward(self, x):
        # 记录原始尺寸
        orig_size = x.size()[2:]

        # 编码过程
        encoded = self.encoder(x)

        # 解码过程
        decoded = self.decoder(encoded)

        # 尺寸对齐检查
        if decoded.size()[2:] != orig_size:
            decoded = F.interpolate(decoded, size=orig_size, mode='bilinear', align_corners=True)

        return decoded

    def configure_optimizers(self):
        opt = torch.optim.Adam(self.parameters(), lr=1e-4, weight_decay=1e-5)
        scheduler = {
            'scheduler': CosineAnnealingLR(opt, T_max=50, eta_min=1e-6),
            'interval': 'epoch',
            'frequency': 1
        }
        return [opt], [scheduler]

class ResBlock(nn.Module):
    def __init__(self, channels):
        super().__init__()
        self.conv = nn.Sequential(
            nn.Conv2d(channels, channels, 3, padding=1),
            nn.ReLU(),
            nn.Conv2d(channels, channels, 3, padding=1)
        )

    def forward(self, x):
        return x + self.conv(x)

class CompositeLoss(nn.Module):
    def __init__(self, alpha=0.8):
        super().__init__()
        self.alpha = alpha
        self.mse = nn.MSELoss()

    def ssim(self, img1, img2):
        C1 = (0.01 * 255) ** 2
        C2 = (0.03 * 255) ** 2
        mu_x = img1.mean()
        mu_y = img2.mean()
        cov_xy = ((img1 - mu_x) * (img2 - mu_y)).mean()
        return (2 * mu_x * mu_y + C1) * (2 * cov_xy + C2) / ((mu_x ** 2 + mu_y ** 2 + C1) * (cov_xy + C2))

    def forward(self, pred, target):
        return self.alpha * self.mse(pred, target) + (1 - self.alpha) * (1 - self.ssim(pred, target))

if __name__ == "__main__":
    seed_everything(100)
    generator = SymmetricFusionGenerator("src31.jpg")
    fusion_img = generator.generate_fusion()
    if fusion_img is not None:
        cv2.imwrite("fusion_base.jpg", cv2.cvtColor(fusion_img, cv2.COLOR_RGB2BGR))
        fusion_img = torch.from_numpy(fusion_img).permute(2, 0, 1).unsqueeze(0).float()
        original_img = torch.from_numpy(generator.src).permute(2, 0, 1).unsqueeze(0).float()

        model = FusionTransformer()
        optimizer = torch.optim.Adam(model.parameters(), lr=1e-4)
        scheduler = CosineAnnealingLR(optimizer, T_max=20)
        loss_fn = CompositeLoss()

        train_loss_history = []
        best_loss = float('inf')
        best_model_state = None
        best_fusion_img = None

        total_steps = 1000
        for epoch in range(total_steps):
            optimizer.zero_grad()
            new_fusion_img = model(fusion_img)
            loss = loss_fn(new_fusion_img, original_img)
            loss.backward()
            optimizer.step()
            scheduler.step()

            train_loss_history.append(loss.item())
            print(f'Epoch {epoch + 1}/{total_steps}, Loss: {loss.item()}')

            if loss.item() < best_loss:
                best_loss = loss.item()
                best_model_state = model.state_dict()
                best_fusion_img = new_fusion_img.detach().cpu().squeeze(0).permute(1, 2, 0).numpy()

        torch.save(best_model_state, 'best_model.pth')
        if best_fusion_img is not None:
            best_fusion_img = (best_fusion_img * 255).astype(np.uint8)
            cv2.imwrite("best_fusion_img.jpg", cv2.cvtColor(best_fusion_img, cv2.COLOR_RGB2BGR))

        plt.plot(train_loss_history, label='Train Loss')
        plt.savefig('loss_curve.png')
    else:
        print("无法生成融合图像，请检查输入文件。")
        plt.savefig('loss_curve.png')

# 在现有导入语句后添加
import torch.nn.functional as F