import torch
import torch.nn as nn
import torch.optim as optim
import torchvision.transforms as transforms
from torchvision.utils import save_image
from PIL import Image
import numpy as np

class Generator(nn.Module):
    def __init__(self):
        super(Generator, self).__init__()
        # 修改输入通道数为6（两张RGB图像拼接）
        self.model = nn.Sequential(
            nn.Conv2d(6, 64, kernel_size=3, stride=1, padding=1),
            nn.LeakyReLU(0.2),
            nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1),
            nn.LeakyReLU(0.2),
            # 输出通道保持3（RGB图像）
            nn.Conv2d(64, 3, kernel_size=3, stride=1, padding=1),
            nn.Tanh()
        )

    def forward(self, x):
        return self.model(x)

class Discriminator(nn.Module):
    def __init__(self):
        super(Discriminator, self).__init__()
        self.model = nn.Sequential(
            nn.Conv2d(3, 64, kernel_size=3, stride=2, padding=1),
            nn.LeakyReLU(0.2),
            nn.Conv2d(64, 128, kernel_size=3, stride=2, padding=1),
            nn.LeakyReLU(0.2),
            nn.Conv2d(128, 1, kernel_size=3, stride=1, padding=1),
            nn.Sigmoid()
        )

    def forward(self, x):
        return self.model(x)

def load_image(image_path, transform):
    image = Image.open(image_path).convert('RGB')
    image = transform(image)
    return image.unsqueeze(0)

def stitch_images(generator, src1, src2, device):
    # 创建初始拼接图像
    stitched = torch.cat([src1, src2], dim=3)

    # 获取拼接后图像的尺寸
    _, _, h, w = stitched.shape

    # 生成拼接区域掩码
    mask = torch.zeros_like(stitched)
    transition_width = w // 4  # 过渡区域设为总宽度的1/4
    mask[:, :, :, w//2-transition_width//2:w//2+transition_width//2] = 1

    # 准备生成器输入
    gen_input = torch.cat([src1, src2], dim=1)

    # 生成拼接区域(确保输出尺寸匹配)
    with torch.no_grad():
        gen_output = generator(gen_input.to(device))
        # 如果尺寸不匹配，调整生成器输出尺寸
        if gen_output.shape[-1] != w:
            gen_output = torch.nn.functional.interpolate(gen_output, size=(h, w), mode='bilinear')

    # 使用lerp融合(现在尺寸已匹配)
    result = torch.lerp(stitched, gen_output, mask)
    return result

def train_gan_stitcher(src1_path, src2_path, output_path, epochs=50, lr=0.0002):
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

    # 确保两张输入图像尺寸相同
    transform = transforms.Compose([
        transforms.Resize((256, 256)),  # 固定输入尺寸
        transforms.ToTensor(),
        transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
    ])

    # 加载图像
    src1 = load_image(src1_path, transform).to(device)
    src2 = load_image(src2_path, transform).to(device)

    # 初始化模型
    generator = Generator().to(device)
    discriminator = Discriminator().to(device)

    # 定义损失函数和优化器
    criterion = nn.BCELoss()
    optimizer_G = optim.Adam(generator.parameters(), lr=lr)
    optimizer_D = optim.Adam(discriminator.parameters(), lr=lr)

    # 训练循环
    for epoch in range(epochs):
        # 训练判别器
        optimizer_D.zero_grad()

        # 真实数据
        real_stitched = torch.cat([src1, src2], dim=3)
        # 修改标签尺寸以匹配判别器输出
        real_labels = torch.ones_like(discriminator(real_stitched))
        real_loss = criterion(discriminator(real_stitched), real_labels)

        # 生成数据
        fake_stitched = generator(torch.cat([src1, src2], dim=1))
        fake_labels = torch.zeros_like(discriminator(fake_stitched.detach()))
        fake_loss = criterion(discriminator(fake_stitched.detach()), fake_labels)

        d_loss = real_loss + fake_loss
        d_loss.backward()
        optimizer_D.step()

        # 训练生成器
        # 训练生成器部分修改为：
        optimizer_G.zero_grad()
        fake_stitched = generator(torch.cat([src1, src2], dim=1))
        # 获取判别器输出并调整尺寸
        d_output = discriminator(fake_stitched)
        # 创建匹配的标签
        real_labels = torch.ones_like(d_output)
        g_loss = criterion(d_output, real_labels)
        g_loss.backward()
        optimizer_G.step()

        if epoch % 1 == 0:
            print(f"Epoch {epoch} / {epochs}, D Loss: {d_loss.item()}, G Loss: {g_loss.item()}")

    # 拼接图像
    result = stitch_images(generator, src1, src2, device)

    # 保存结果
    save_image(result * 0.5 + 0.5, output_path)

# 使用示例
if __name__ == "__main__":
    train_gan_stitcher(
        src2_path="src2.jpg",
        src1_path="src1.jpg",
        output_path="stitched_result.jpg"
    )
