"""
多尺度互信息引导的深度伪造检测网络（MI-Net）
主要模块：
1. 多区域子模型 - 模拟多区域感知特征提取
2. 局部分类器 - 构建互信息相关子损失
3. 通道压缩 - 融合多区域特征
4. 三分支输出 - 提供不同预测路径支持综合损失优化
"""

import torch
import torch.nn as nn
from models.resnet import resnet34, resnet18
from torch.nn import init
from models.efficientnet import EfficientNet
from models.mobilenet import MobileNetV1
import logging

class MI_Net(nn.Module):
    def __init__(self, model='resnet34',num_regions=4,num_classes=2,freeze_fc=False,dropout=0.5)-> object:
        """
        MI-Net网络初始化
        Args:
            model: 基础骨干网络类型
            num_regions: 区域子模型数量，默认4个
            num_classes: 分类数量，默认2（真实/伪造）
            freeze_fc: 是否冻结全连接层
            dropout: dropout比率
        """
        super(MI_Net, self).__init__()

        # 多区域子模型初始化 - 每个子模型独立提取局部特征
        self.num_regions=num_regions
        logging.info(f'Now has {num_regions} region models')
        self.region_models = []
        for i in range(num_regions):
            if model == 'resnet':
                layer = resnet18(pretrained=True)
            elif model == 'mobilenet':
                layer = MobileNetV1()
            elif model == 'efficientnet':
                layer = EfficientNet(pretrained=True)
            else:
                logging.error("please choose the tpye of backbone in Local Information Block.")
            layer_name = 'region_model{}'.format(i + 1)
            self.add_module(layer_name, layer)
            self.region_models.append(layer_name)

        # 局部分类器初始化 - 用于计算互信息损失
        in_size = get_output_size(getattr(self, 'region_model1'))
        self.local_linears = []
        for i in range(num_regions):
            local_linear=nn.Linear(in_size * (num_regions - 1), num_classes)
            layer_name = 'local_linear{}'.format(i + 1)
            self.add_module(layer_name, local_linear)
            self.local_linears.append(layer_name)

        # 通道压缩模块 - 融合多区域特征
        self.bottleneck = ChannelCompress(in_ch=in_size*num_regions, out_ch=in_size)

        # 三分支输出层
        # 1. 基线分类器 - 直接使用拼接特征
        self.baseline_linear = nn.Linear(in_size*num_regions, num_classes)
        # 2. 主分类器 - 使用压缩后的全局特征
        self.linear = nn.Sequential(
            nn.Dropout(p=dropout),
            nn.Linear(in_size, num_classes)
        )
        
        if freeze_fc:
            freeze(self.linear)

    def forward(self, x):
        """
        前向传播
        实现三个分支的输出：
        1. p_y_given_z: 主分类器输出（基于压缩特征）
        2. p_y_given_f_all: 基线分类器输出（基于拼接特征）
        3. p_y_given_f1_fn_list: 局部分类器输出列表（用于互信息计算）
        """
        # 1. 多区域特征提取
        features=[]
        for i, layer_name in enumerate(self.region_models):
            layer = getattr(self, layer_name)
            feature = layer(x)
            features.append(feature)

        # 2. 特征拼接
        feature = torch.cat(features, dim=1)

        # 3. 基线分类器输出
        p_y_given_f1_f2_f3_f4=self.baseline_linear(feature)

        # 4. 特征融合与主分类器输出
        global_feature_z = self.bottleneck(feature) # Fusion layer
        p_y_given_z = self.linear(global_feature_z)

        # 5. 局部分类器输出 - 用于互信息计算
        p_y_given_f1_fn_list=[]
        for i, layer_name in enumerate(self.local_linears):
            local_linear = getattr(self, layer_name)
            tmp=features.copy()
            tmp.pop(i)  # 移除第i个特征，使用其他特征
            f1_fn_except_i = torch.cat(tmp, dim=1)
            p_y_given_f1_fn_except_i=local_linear(f1_fn_except_i)
            p_y_given_f1_fn_list.append(p_y_given_f1_fn_except_i)

        return {'p_y_given_z': p_y_given_z, 'p_y_given_f_all': p_y_given_f1_f2_f3_f4,
                'p_y_given_f1_fn_list': p_y_given_f1_fn_list}

    def initNetParams(self,net):
        '''初始化网络参数'''
        for m in net.modules():
            if isinstance(m, nn.Conv2d):
                init.xavier_uniform(m.weight)
                if m.bias:
                    init.constant(m.bias, 0)
            elif isinstance(m, nn.BatchNorm2d):
                init.constant(m.weight, 1)
                init.constant(m.bias, 0)
            elif isinstance(m, nn.Linear):
                init.normal(m.weight, std=1e-3)
                if m.bias:
                    init.constant(m.bias, 0)


def freeze(layer):
    '''冻结层参数'''
    for child in layer.children():
        for param in child.parameters():
            param.requires_grad = False

class ChannelCompress(nn.Module):
    """
    通道压缩模块
    功能：将多区域特征融合压缩，防止最终特征维度过大
    结构：多层全连接+批归一化+ReLU+Dropout
    """
    def __init__(self, in_ch=2048, out_ch=256,dropout=0.5):
        """
        Args:
            in_ch: 输入通道数（所有区域特征拼接后的维度）
            out_ch: 输出通道数（压缩后的维度）
            dropout: dropout比率
        """
        super(ChannelCompress, self).__init__()
        num_bottleneck = 1000
        add_block = []
        # 第一层压缩
        add_block += [nn.Linear(in_ch, num_bottleneck)]
        add_block += [nn.BatchNorm1d(num_bottleneck)]
        add_block += [nn.ReLU()]
        add_block += [nn.Dropout(p=dropout)]
        # 第二层压缩
        add_block += [nn.Linear(num_bottleneck, 500)]
        add_block += [nn.BatchNorm1d(500)]
        add_block += [nn.ReLU()]
        add_block += [nn.Dropout(p=dropout)]
        # 最终压缩
        add_block += [nn.Linear(500, out_ch)]

        add_block = nn.Sequential(*add_block)
        self.model = add_block

    def forward(self, x):
        x = self.model(x)
        return x

def get_output_size(net):
    '''获取网络输出维度'''
    input = torch.randn(1,3,224, 224)
    output=net(input)
    return output.size(1)