#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
轻量级去水印网络设计
目标：移除SepMark水印，同时保持图像质量
"""

import torch
import torch.nn as nn
import torch.nn.functional as F


class DepthwiseSeparableConv(nn.Module):
    """深度可分离卷积 - 减少参数量"""

    def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, padding=1):
        super(DepthwiseSeparableConv, self).__init__()
        self.depthwise = nn.Conv2d(in_channels, in_channels, kernel_size, stride, padding, groups=in_channels)
        self.pointwise = nn.Conv2d(in_channels, out_channels, 1)
        self.bn = nn.BatchNorm2d(out_channels)
        self.relu = nn.ReLU(inplace=True)

    def forward(self, x):
        x = self.depthwise(x)
        x = self.pointwise(x)
        x = self.bn(x)
        x = self.relu(x)
        return x


class ResidualBlock(nn.Module):
    """轻量残差块"""

    def __init__(self, channels):
        super(ResidualBlock, self).__init__()
        self.conv1 = DepthwiseSeparableConv(channels, channels)
        self.conv2 = DepthwiseSeparableConv(channels, channels)

    def forward(self, x):
        residual = x
        out = self.conv1(x)
        out = self.conv2(out)
        return out + residual


class AttentionModule(nn.Module):
    """轻量注意力模块"""

    def __init__(self, channels):
        super(AttentionModule, self).__init__()
        self.avg_pool = nn.AdaptiveAvgPool2d(1)
        self.fc = nn.Sequential(
            nn.Linear(channels, channels // 8),
            nn.ReLU(inplace=True),
            nn.Linear(channels // 8, channels),
            nn.Sigmoid()
        )

    def forward(self, x):
        b, c, _, _ = x.size()
        y = self.avg_pool(x).view(b, c)
        y = self.fc(y).view(b, c, 1, 1)
        return x * y.expand_as(x)


class LightweightWatermarkRemover(nn.Module):
    """
    轻量级去水印网络
    参数量约: ~2M parameters
    """

    def __init__(self, input_channels=3, base_channels=32):
        super(LightweightWatermarkRemover, self).__init__()

        # 编码器
        self.encoder1 = nn.Sequential(
            nn.Conv2d(input_channels, base_channels, 7, 1, 3),
            nn.BatchNorm2d(base_channels),
            nn.ReLU(inplace=True)
        )

        self.encoder2 = nn.Sequential(
            DepthwiseSeparableConv(base_channels, base_channels * 2, 3, 2, 1),
            ResidualBlock(base_channels * 2)
        )

        self.encoder3 = nn.Sequential(
            DepthwiseSeparableConv(base_channels * 2, base_channels * 4, 3, 2, 1),
            ResidualBlock(base_channels * 4),
            AttentionModule(base_channels * 4)
        )

        # 瓶颈层
        self.bottleneck = nn.Sequential(
            ResidualBlock(base_channels * 4),
            ResidualBlock(base_channels * 4),
            AttentionModule(base_channels * 4)
        )

        # 解码器
        self.decoder3 = nn.Sequential(
            nn.ConvTranspose2d(base_channels * 4, base_channels * 2, 4, 2, 1),
            nn.BatchNorm2d(base_channels * 2),
            nn.ReLU(inplace=True),
            ResidualBlock(base_channels * 2)
        )

        self.decoder2 = nn.Sequential(
            nn.ConvTranspose2d(base_channels * 2, base_channels, 4, 2, 1),
            nn.BatchNorm2d(base_channels),
            nn.ReLU(inplace=True),
            ResidualBlock(base_channels)
        )

        self.decoder1 = nn.Sequential(
            nn.Conv2d(base_channels, input_channels, 7, 1, 3),
            nn.Tanh()
        )

        # 细化网络 - 进一步改善质量
        self.refine = nn.Sequential(
            nn.Conv2d(input_channels * 2, base_channels, 3, 1, 1),
            nn.ReLU(inplace=True),
            ResidualBlock(base_channels),
            nn.Conv2d(base_channels, input_channels, 3, 1, 1),
            nn.Tanh()
        )

    def forward(self, x):
        # 编码
        e1 = self.encoder1(x)
        e2 = self.encoder2(e1)
        e3 = self.encoder3(e2)

        # 瓶颈
        b = self.bottleneck(e3)

        # 解码
        d3 = self.decoder3(b)
        d2 = self.decoder2(d3)
        d1 = self.decoder1(d2)

        # 细化
        concat = torch.cat([x, d1], dim=1)
        refined = self.refine(concat)

        # 残差连接
        output = x + refined

        return output


class UltraLightWatermarkRemover(nn.Module):
    """
    超轻量去水印网络
    参数量约: ~0.5M parameters
    """

    def __init__(self, input_channels=3, base_channels=16):
        super(UltraLightWatermarkRemover, self).__init__()

        self.conv_in = nn.Conv2d(input_channels, base_channels, 3, 1, 1)

        # 核心处理模块
        self.core = nn.Sequential(
            ResidualBlock(base_channels),
            ResidualBlock(base_channels),
            AttentionModule(base_channels),
            ResidualBlock(base_channels),
            ResidualBlock(base_channels)
        )

        self.conv_out = nn.Sequential(
            nn.Conv2d(base_channels, input_channels, 3, 1, 1),
            nn.Tanh()
        )

    def forward(self, x):
        features = self.conv_in(x)
        processed = self.core(features)
        residual = self.conv_out(processed)
        return x + residual * 0.1  # 小的残差修正


def count_parameters(model):
    """计算模型参数量"""
    return sum(p.numel() for p in model.parameters() if p.requires_grad)


if __name__ == "__main__":
    # 测试网络
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    # 测试轻量网络
    model1 = LightweightWatermarkRemover().to(device)
    print(f"LightweightWatermarkRemover 参数量: {count_parameters(model1):,}")

    # 测试超轻量网络
    model2 = UltraLightWatermarkRemover().to(device)
    print(f"UltraLightWatermarkRemover 参数量: {count_parameters(model2):,}")

    # 测试前向传播
    test_input = torch.randn(1, 3, 256, 256).to(device)

    with torch.no_grad():
        output1 = model1(test_input)
        output2 = model2(test_input)

    print(f"输入形状: {test_input.shape}")
    print(f"输出1形状: {output1.shape}")
    print(f"输出2形状: {output2.shape}")
    print("网络测试完成！")