"""
GCNet模型 - 全局上下文网络
基于论文: GCNet: Non-local Networks Meet Squeeze-Excitation Networks and Beyond
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
from .common import ConvBlock


class ContextBlock(nn.Module):
    """
    GCNet的核心：全局上下文建模模块
    
    通过注意力机制捕获全局上下文信息
    """
    def __init__(self, inplanes, ratio=16, pooling_type='att', fusion_types=('channel_add',)):
        super().__init__()
        assert pooling_type in ['avg', 'att']
        assert isinstance(fusion_types, (list, tuple))
        valid_fusion_types = ['channel_add', 'channel_mul']
        assert all([f in valid_fusion_types for f in fusion_types])
        assert len(fusion_types) > 0, 'at least one fusion should be used'
        
        self.inplanes = inplanes
        self.ratio = ratio
        self.planes = inplanes // ratio
        self.pooling_type = pooling_type
        self.fusion_types = fusion_types
        
        # 全局上下文建模
        if pooling_type == 'att':
            self.conv_mask = nn.Conv2d(inplanes, 1, kernel_size=1)
            self.softmax = nn.Softmax(dim=2)
        else:
            self.avg_pool = nn.AdaptiveAvgPool2d(1)
        
        # 通道变换（降维->升维）
        if 'channel_add' in fusion_types:
            self.channel_add_conv = nn.Sequential(
                nn.Conv2d(self.inplanes, self.planes, kernel_size=1),
                nn.LayerNorm([self.planes, 1, 1]),
                nn.ReLU(inplace=True),
                nn.Conv2d(self.planes, self.inplanes, kernel_size=1)
            )
        else:
            self.channel_add_conv = None
            
        if 'channel_mul' in fusion_types:
            self.channel_mul_conv = nn.Sequential(
                nn.Conv2d(self.inplanes, self.planes, kernel_size=1),
                nn.LayerNorm([self.planes, 1, 1]),
                nn.ReLU(inplace=True),
                nn.Conv2d(self.planes, self.inplanes, kernel_size=1)
            )
        else:
            self.channel_mul_conv = None
        
        self.reset_parameters()
    
    def reset_parameters(self):
        """初始化参数"""
        if self.pooling_type == 'att':
            nn.init.kaiming_normal_(self.conv_mask.weight, mode='fan_in')
            nn.init.constant_(self.conv_mask.bias, 0)
        
        if self.channel_add_conv is not None:
            nn.init.constant_(self.channel_add_conv[-1].weight, 0)
            nn.init.constant_(self.channel_add_conv[-1].bias, 0)
        if self.channel_mul_conv is not None:
            nn.init.constant_(self.channel_mul_conv[-1].weight, 0)
            nn.init.constant_(self.channel_mul_conv[-1].bias, 0)
    
    def spatial_pool(self, x):
        """空间池化"""
        batch, channel, height, width = x.size()
        
        if self.pooling_type == 'att':
            # 注意力加权池化
            input_x = x
            input_x = input_x.view(batch, channel, height * width)
            input_x = input_x.unsqueeze(1)  # [N, 1, C, H*W]
            
            context_mask = self.conv_mask(x)
            context_mask = context_mask.view(batch, 1, height * width)
            context_mask = self.softmax(context_mask)  # [N, 1, H*W]
            context_mask = context_mask.unsqueeze(-1)  # [N, 1, H*W, 1]
            
            context = torch.matmul(input_x, context_mask)
            context = context.view(batch, channel, 1, 1)
        else:
            # 平均池化
            context = self.avg_pool(x)
        
        return context
    
    def forward(self, x):
        """
        前向传播
        
        参数:
            x: [B, C, H, W]
        
        返回:
            out: [B, C, H, W]
        """
        # 全局上下文
        context = self.spatial_pool(x)
        
        out = x
        if self.channel_mul_conv is not None:
            # 通道乘法（类似SE模块）
            channel_mul_term = torch.sigmoid(self.channel_mul_conv(context))
            out = out * channel_mul_term
        
        if self.channel_add_conv is not None:
            # 通道加法
            channel_add_term = self.channel_add_conv(context)
            out = out + channel_add_term
        
        return out


class GCBlock(nn.Module):
    """
    完整的GC模块：Conv + GCNet + Conv
    包含残差连接
    """
    def __init__(self, inplanes, planes, stride=1, downsample=None, 
                 ratio=16, pooling_type='att', fusion_types=('channel_add',)):
        super().__init__()
        
        # 使用通用卷积块
        self.conv1 = ConvBlock(inplanes, planes, kernel_size=1, stride=1, padding=0)
        self.conv2 = ConvBlock(planes, planes, kernel_size=3, stride=stride, padding=1)
        
        # GCNet核心
        self.context_block = ContextBlock(
            planes, ratio=ratio, 
            pooling_type=pooling_type,
            fusion_types=fusion_types
        )
        
        self.conv3 = ConvBlock(planes, planes * 4, kernel_size=1, stride=1, padding=0, activation=False)
        self.relu = nn.ReLU(inplace=True)
        self.downsample = downsample
        self.stride = stride
    
    def forward(self, x):
        """
        前向传播
        
        参数:
            x: [B, C_in, H, W]
        
        返回:
            out: [B, C_out, H', W']
        """
        residual = x
        
        out = self.conv1(x)
        out = self.conv2(out)
        
        # GCNet全局上下文建模
        out = self.context_block(out)
        
        out = self.conv3(out)
        
        if self.downsample is not None:
            residual = self.downsample(x)
        
        out += residual
        out = self.relu(out)
        
        return out


class GCNetEncoder(nn.Module):
    """
    GCNet编码器，用于图像特征提取
    
    使用4个GCBlock层进行深度特征提取
    """
    def __init__(self, in_channels=1, use_inference_mode=False):
        super().__init__()
        self.use_inference_mode = use_inference_mode
        
        # 初始卷积
        self.stem = nn.Sequential(
            nn.Conv2d(in_channels, 64, kernel_size=7, stride=2, padding=3, bias=False),
            nn.BatchNorm2d(64),
            nn.ReLU(inplace=True),
            nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
        )
        
        # GCNet层
        self.layer1 = self._make_layer(64, 64, stride=1)
        self.layer2 = self._make_layer(256, 128, stride=2)
        self.layer3 = self._make_layer(512, 256, stride=2)
        self.layer4 = self._make_layer(1024, 512, stride=2)
        
        # 全局池化
        self.global_pool = nn.AdaptiveAvgPool2d(1)
        
        self.output_dim = 2048
    
    def _make_layer(self, inplanes, planes, stride):
        """创建一个GCBlock层"""
        downsample = None
        if stride != 1 or inplanes != planes * 4:
            downsample = nn.Sequential(
                nn.Conv2d(inplanes, planes * 4, kernel_size=1, stride=stride, bias=False),
                nn.BatchNorm2d(planes * 4)
            )
        
        return GCBlock(
            inplanes, planes, stride, downsample,
            ratio=16, 
            pooling_type='att',
            fusion_types=('channel_add',)
        )
    
    def forward(self, x):
        """
        前向传播
        
        参数:
            x: [B, 1, H, W]
        
        返回:
            features: [B, 2048]
        """
        x = self.stem(x)        # [B, 64, H/4, W/4]
        x = self.layer1(x)      # [B, 256, H/4, W/4]
        x = self.layer2(x)      # [B, 512, H/8, W/8]
        x = self.layer3(x)      # [B, 1024, H/16, W/16]
        x = self.layer4(x)      # [B, 2048, H/32, W/32]
        x = self.global_pool(x) # [B, 2048, 1, 1]
        return x.view(x.size(0), -1)
    
    def switch_to_deploy(self):
        """切换到推理模式"""
        self.use_inference_mode = True
        self.eval()
        print("GCNet encoder switched to inference mode")


class GCNetVolumePredictor(nn.Module):
    """
    基于GCNet的体积预测模型
    
    使用全局上下文网络进行特征提取
    适合需要全局信息建模的任务
    
    参数:
        num_images: 每个样本的图像数量（默认20）
        num_features: 传统特征数量（默认6）
        use_inference_mode: 是否使用推理模式
    """
    def __init__(self, num_images=20, num_features=6, use_inference_mode=False):
        super().__init__()
        self.num_images = num_images
        self.num_features = num_features
        self.use_inference_mode = use_inference_mode
        
        # GCNet编码器
        self.encoder = GCNetEncoder(in_channels=1, use_inference_mode=use_inference_mode)
        
        # 图像特征处理
        self.image_processor = nn.Sequential(
            nn.Linear(2048, 512),
            nn.ReLU(inplace=True),
            nn.Dropout(0.3),
            nn.Linear(512, 128),
            nn.ReLU(inplace=True)
        )
        
        # 像素尺度处理
        self.scale_processor = nn.Sequential(
            nn.Linear(num_images, 32),
            nn.ReLU(inplace=True),
            nn.Linear(32, 16),
            nn.ReLU(inplace=True)
        )
        
        # 传统特征处理
        self.feature_processor = nn.Sequential(
            nn.Linear(num_features, 16),
            nn.ReLU(inplace=True),
            nn.Linear(16, 8),
            nn.ReLU(inplace=True)
        )
        
        # 最终预测层 (128 + 16 + 8 = 152)
        self.predictor = nn.Sequential(
            nn.Linear(152, 128),
            nn.ReLU(inplace=True),
            nn.Dropout(0.2),
            nn.Linear(128, 64),
            nn.ReLU(inplace=True),
            nn.Linear(64, 1)
        )
    
    def forward(self, images, pixel_scales, traditional_features):
        """
        前向传播
        
        参数:
            images: [B, 20, 1, H, W]
            pixel_scales: [B, 20]
            traditional_features: [B, 6]
        
        返回:
            output: [B, 1]
        """
        batch_size = images.size(0)
        
        # 处理每张图像
        image_features = []
        for i in range(self.num_images):
            img = images[:, i, :, :, :]
            encoded = self.encoder(img)
            image_features.append(encoded)
        
        # 聚合图像特征
        image_features = torch.stack(image_features, dim=1)
        aggregated_features = torch.mean(image_features, dim=1)
        
        # 处理各部分特征
        img_feat = self.image_processor(aggregated_features)
        scale_feat = self.scale_processor(pixel_scales)
        trad_feat = self.feature_processor(traditional_features)
        
        # 组合所有特征
        combined = torch.cat([img_feat, scale_feat, trad_feat], dim=1)
        
        # 预测
        output = self.predictor(combined)
        
        return output
    
    def switch_to_deploy(self):
        """切换到推理模式"""
        self.use_inference_mode = True
        self.encoder.switch_to_deploy()
        self.eval()
        print("GCNet model switched to inference mode")


if __name__ == "__main__":
    # 测试模型
    print("Testing GCNet Model...")
    
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    model = GCNetVolumePredictor(num_images=20, num_features=6).to(device)
    
    # 统计参数量
    total_params = sum(p.numel() for p in model.parameters())
    trainable_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
    
    print(f"\nModel Statistics:")
    print(f"  Total parameters: {total_params:,}")
    print(f"  Trainable parameters: {trainable_params:,}")
    print(f"  Model size: {total_params * 4 / 1024 / 1024:.2f} MB (float32)")
    
    # 测试前向传播
    batch_size = 2
    test_images = torch.randn(batch_size, 20, 1, 96, 288).to(device)
    test_scales = torch.randn(batch_size, 20).to(device)
    test_features = torch.randn(batch_size, 6).to(device)
    
    print(f"\nInput shapes:")
    print(f"  Images: {test_images.shape}")
    print(f"  Scales: {test_scales.shape}")
    print(f"  Features: {test_features.shape}")
    
    with torch.no_grad():
        output = model(test_images, test_scales, test_features)
    
    print(f"\nOutput shape: {output.shape}")
    print(f"Output values: {output.squeeze().cpu().numpy()}")
    
    print("\n✓ GCNet model test passed!")