# -*- coding: utf-8 -*-
"""
神经网络模型结构脚本

定义用于预测太阳反射率和红外发射率的神经网络模型结构。
"""

import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import os
import math


class MaterialPropertiesNet(nn.Module):
    """
    材料性能预测神经网络模型
    
    该模型用于预测材料的太阳反射率和红外发射率。
    
    参数:
        input_dim (int): 输入特征的维度
        hidden_dims (list): 隐藏层的维度列表
        dropout_rate (float): Dropout比率，用于防止过拟合
        use_batch_norm (bool): 是否使用批归一化
        activation (str): 激活函数类型，可选 'relu', 'leaky_relu', 'elu', 'selu'
    """
    def __init__(self, input_dim, hidden_dims=[64, 32], dropout_rate=0.2, 
                 use_batch_norm=True, activation='leaky_relu'):
        super(MaterialPropertiesNet, self).__init__()
        
        self.input_dim = input_dim
        self.hidden_dims = hidden_dims
        self.dropout_rate = dropout_rate
        self.use_batch_norm = use_batch_norm
        self.activation = activation
        
        # 构建网络层
        layers = []
        prev_dim = input_dim
        
        # 添加隐藏层
        for i, dim in enumerate(hidden_dims):
            # 添加线性层
            layers.append(nn.Linear(prev_dim, dim))
            
            # 添加批归一化层（如果启用）
            if use_batch_norm:
                layers.append(nn.BatchNorm1d(dim))
            
            # 添加激活函数
            if activation == 'relu':
                layers.append(nn.ReLU())
            elif activation == 'leaky_relu':
                layers.append(nn.LeakyReLU(0.1))
            elif activation == 'elu':
                layers.append(nn.ELU())
            elif activation == 'selu':
                layers.append(nn.SELU())
            else:
                layers.append(nn.ReLU())  # 默认使用ReLU
            
            # 添加Dropout层
            if dropout_rate > 0:
                layers.append(nn.Dropout(dropout_rate))
            
            prev_dim = dim
        
        # 构建模型
        self.feature_extractor = nn.Sequential(*layers)
        
        # 输出层 - 预测太阳反射率和红外发射率（两个输出）
        self.output_layer = nn.Linear(prev_dim, 2)
        
        # 使用Sigmoid激活函数确保输出在0-1范围内
        self.sigmoid = nn.Sigmoid()
        
        # 初始化权重
        self._initialize_weights()
    
    def _initialize_weights(self):
        """
        初始化网络权重
        """
        for m in self.modules():
            if isinstance(m, nn.Linear):
                nn.init.kaiming_normal_(m.weight, mode='fan_in', nonlinearity='relu')
                if m.bias is not None:
                    nn.init.constant_(m.bias, 0)
            elif isinstance(m, nn.BatchNorm1d):
                nn.init.constant_(m.weight, 1)
                nn.init.constant_(m.bias, 0)
    
    def forward(self, x):
        """
        前向传播
        
        参数:
            x (torch.Tensor): 输入特征张量
            
        返回:
            torch.Tensor: 预测的太阳反射率和红外发射率
        """
        # 获取设备信息
        device = x.device
        
        # 特征提取
        features = self.feature_extractor(x)
        
        # 确保特征在正确的设备上
        features = features.to(device)
        
        # 输出层
        output = self.output_layer(features)
        
        # 确保输出在正确的设备上
        output = output.to(device)
        
        # 使用Sigmoid确保输出在0-1范围内
        output = self.sigmoid(output)
        
        return output


class ResidualBlock(nn.Module):
    """
    残差块
    
    用于构建更深层次的神经网络，缓解梯度消失问题。
    
    参数:
        in_features (int): 输入特征维度
        out_features (int): 输出特征维度
        use_batch_norm (bool): 是否使用批归一化
        dropout_rate (float): Dropout比率
    """
    def __init__(self, in_features, out_features, use_batch_norm=True, dropout_rate=0.2):
        super(ResidualBlock, self).__init__()
        
        self.in_features = in_features
        self.out_features = out_features
        self.use_batch_norm = use_batch_norm
        self.dropout_rate = dropout_rate
        
        # 第一层
        self.fc1 = nn.Linear(in_features, out_features)
        self.bn1 = nn.BatchNorm1d(out_features) if use_batch_norm else nn.Identity()
        self.relu1 = nn.LeakyReLU(0.1)
        self.dropout1 = nn.Dropout(dropout_rate) if dropout_rate > 0 else nn.Identity()
        
        # 第二层
        self.fc2 = nn.Linear(out_features, out_features)
        self.bn2 = nn.BatchNorm1d(out_features) if use_batch_norm else nn.Identity()
        
        # 如果输入和输出维度不同，则使用1x1卷积进行调整
        self.shortcut = nn.Identity()
        if in_features != out_features:
            self.shortcut = nn.Linear(in_features, out_features)
    
    def forward(self, x):
        # 获取设备信息
        device = x.device
        
        identity = x
        
        # 第一层
        out = self.fc1(x)
        out = self.bn1(out)
        out = self.relu1(out)
        out = self.dropout1(out)
        
        # 确保张量在正确的设备上
        out = out.to(device)
        
        # 第二层
        out = self.fc2(out)
        out = self.bn2(out)
        
        # 确保张量在正确的设备上
        out = out.to(device)
        
        # 残差连接
        identity = self.shortcut(identity)
        
        # 确保identity在正确的设备上
        identity = identity.to(device)
        
        out += identity
        out = F.leaky_relu(out,0.1)
        
        # 确保输出在正确的设备上
        out = out.to(device)
        
        return out


class ResidualMaterialNet(nn.Module):
    """
    基于残差连接的材料性能预测网络
    
    使用残差块构建更深层次的神经网络，用于预测材料的太阳反射率和红外发射率。
    
    参数:
        input_dim (int): 输入特征的维度
        hidden_dims (list): 隐藏层的维度列表
        num_res_blocks (int): 残差块的数量
        dropout_rate (float): Dropout比率
        use_batch_norm (bool): 是否使用批归一化
    """
    def __init__(self, input_dim, hidden_dims=[64, 128, 64], num_res_blocks=2, 
                 dropout_rate=0.2, use_batch_norm=True):
        super(ResidualMaterialNet, self).__init__()
        
        self.input_dim = input_dim
        self.hidden_dims = hidden_dims
        self.num_res_blocks = num_res_blocks
        self.dropout_rate = dropout_rate
        self.use_batch_norm = use_batch_norm
        
        # 输入层
        self.input_layer = nn.Linear(input_dim, hidden_dims[0])
        self.input_bn = nn.BatchNorm1d(hidden_dims[0]) if use_batch_norm else nn.Identity()
        self.input_relu = nn.LeakyReLU(0.1)
        self.input_dropout = nn.Dropout(dropout_rate) if dropout_rate > 0 else nn.Identity()
        
        # 残差块
        self.res_blocks = nn.ModuleList()
        for i in range(num_res_blocks):
            in_features = hidden_dims[min(i, len(hidden_dims)-1)]
            out_features = hidden_dims[min(i+1, len(hidden_dims)-1)]
            self.res_blocks.append(
                ResidualBlock(in_features, out_features, use_batch_norm, dropout_rate)
            )
        
        # 输出层
        self.output_layer = nn.Linear(hidden_dims[min(num_res_blocks, len(hidden_dims)-1)], 2)
        self.sigmoid = nn.Sigmoid()
        
        # 初始化权重
        self._initialize_weights()
    
    def _initialize_weights(self):
        """
        初始化网络权重
        """
        for m in self.modules():
            if isinstance(m, nn.Linear):
                nn.init.kaiming_normal_(m.weight, mode='fan_in', nonlinearity='relu')
                if m.bias is not None:
                    nn.init.constant_(m.bias, 0)
            elif isinstance(m, nn.BatchNorm1d):
                nn.init.constant_(m.weight, 1)
                nn.init.constant_(m.bias, 0)
    
    def forward(self, x):
        """
        前向传播
        
        参数:
            x (torch.Tensor): 输入特征张量
            
        返回:
            torch.Tensor: 预测的太阳反射率和红外发射率
        """
        # 获取设备信息
        device = x.device
        
        # 输入层
        x = self.input_layer(x)
        x = self.input_bn(x)
        x = self.input_relu(x)
        x = self.input_dropout(x)
        
        # 确保张量在正确的设备上
        x = x.to(device)
        
        # 残差块
        for res_block in self.res_blocks:
            x = res_block(x)
            # 确保张量在正确的设备上
            x = x.to(device)
        
        # 输出层
        x = self.output_layer(x)
        x = self.sigmoid(x)
        
        # 确保输出在正确的设备上
        x = x.to(device)
        
        return x


class SelfAttention(nn.Module):
    """
    自注意力机制模块
    
    用于捕获特征之间的关系和重要性。
    
    参数:
        embed_dim (int): 特征维度
        num_heads (int): 注意力头数量
        dropout (float): Dropout比率
    """
    def __init__(self, embed_dim, num_heads=4, dropout=0.1):
        super(SelfAttention, self).__init__()
        self.embed_dim = embed_dim
        self.num_heads = num_heads
        self.head_dim = embed_dim // num_heads
        assert self.head_dim * num_heads == embed_dim, "embed_dim必须能被num_heads整除"
        
        self.query = nn.Linear(embed_dim, embed_dim)
        self.key = nn.Linear(embed_dim, embed_dim)
        self.value = nn.Linear(embed_dim, embed_dim)
        
        self.proj = nn.Linear(embed_dim, embed_dim)
        self.dropout = nn.Dropout(dropout)
        self.scale = math.sqrt(self.head_dim)
    
    def forward(self, x):
        """
        前向传播
        
        参数:
            x (torch.Tensor): 输入特征张量 [batch_size, embed_dim]
            
        返回:
            torch.Tensor: 注意力加权后的特征 [batch_size, embed_dim]
        """
        batch_size = x.size(0)
        device = x.device  # 获取输入张量的设备
        
        # 将输入转换为适合多头注意力的形状
        # 由于输入是[batch_size, embed_dim]，我们需要添加一个序列维度
        x_seq = x.unsqueeze(1)  # [batch_size, 1, embed_dim]
        
        # 计算查询、键、值
        q = self.query(x_seq).view(batch_size, 1, self.num_heads, self.head_dim).transpose(1, 2)  # [batch_size, num_heads, 1, head_dim]
        k = self.key(x_seq).view(batch_size, 1, self.num_heads, self.head_dim).transpose(1, 2)  # [batch_size, num_heads, 1, head_dim]
        v = self.value(x_seq).view(batch_size, 1, self.num_heads, self.head_dim).transpose(1, 2)  # [batch_size, num_heads, 1, head_dim]
        
        # 确保scale是张量并在正确的设备上
        scale = torch.tensor(self.scale, device=device)
        
        # 计算注意力分数
        attn_scores = torch.matmul(q, k.transpose(-2, -1)) / scale  # [batch_size, num_heads, 1, 1]
        attn_probs = F.softmax(attn_scores, dim=-1)
        attn_probs = self.dropout(attn_probs)
        
        # 应用注意力权重
        out = torch.matmul(attn_probs, v)  # [batch_size, num_heads, 1, head_dim]
        out = out.transpose(1, 2).contiguous().view(batch_size, 1, self.embed_dim)  # [batch_size, 1, embed_dim]
        out = self.proj(out).squeeze(1)  # [batch_size, embed_dim]
        
        return out


class DualBranchAttentionNet(nn.Module):
    """
    双分支注意力网络模型
    
    使用注意力机制和双分支结构，分别预测太阳反射率和红外发射率。
    
    参数:
        input_dim (int): 输入特征的维度
        hidden_dims (list): 共享特征提取器的隐藏层维度列表
        branch_dims (list): 分支网络的隐藏层维度列表
        dropout_rate (float): Dropout比率
        use_batch_norm (bool): 是否使用批归一化
        num_heads (int): 注意力头数量
    """
    def __init__(self, input_dim, hidden_dims=[64, 128], branch_dims=[64, 32], 
                 dropout_rate=0.2, use_batch_norm=True, num_heads=4):
        super(DualBranchAttentionNet, self).__init__()
        
        self.input_dim = input_dim
        self.hidden_dims = hidden_dims
        self.branch_dims = branch_dims
        self.dropout_rate = dropout_rate
        self.use_batch_norm = use_batch_norm
        
        # 共享特征提取器
        self.shared_layers = self._build_layers(input_dim, hidden_dims, dropout_rate, use_batch_norm)
        
        # 注意力机制 - 应用于共享特征
        self.attention = SelfAttention(hidden_dims[-1], num_heads=num_heads, dropout=dropout_rate)
        
        # 太阳反射率分支
        self.solar_branch = self._build_layers(hidden_dims[-1], branch_dims, dropout_rate, use_batch_norm)
        self.solar_output = nn.Linear(branch_dims[-1], 1)
        
        # 红外发射率分支
        self.infrared_branch = self._build_layers(hidden_dims[-1], branch_dims, dropout_rate, use_batch_norm)
        self.infrared_output = nn.Linear(branch_dims[-1], 1)
        
        # Sigmoid激活函数确保输出在0-1范围内
        self.sigmoid = nn.Sigmoid()
        
        # 初始化权重
        self._initialize_weights()
    
    def _build_layers(self, input_dim, hidden_dims, dropout_rate, use_batch_norm):
        """
        构建网络层
        
        参数:
            input_dim (int): 输入维度
            hidden_dims (list): 隐藏层维度列表
            dropout_rate (float): Dropout比率
            use_batch_norm (bool): 是否使用批归一化
            
        返回:
            nn.Sequential: 构建的网络层
        """
        layers = []
        prev_dim = input_dim
        
        for dim in hidden_dims:
            layers.append(nn.Linear(prev_dim, dim))
            
            if use_batch_norm:
                layers.append(nn.BatchNorm1d(dim))
            
            layers.append(nn.LeakyReLU(0.1))
            
            if dropout_rate > 0:
                layers.append(nn.Dropout(dropout_rate))
            
            prev_dim = dim
        
        return nn.Sequential(*layers)
    
    def _initialize_weights(self):
        """
        初始化网络权重
        """
        for m in self.modules():
            if isinstance(m, nn.Linear):
                nn.init.kaiming_normal_(m.weight, mode='fan_in', nonlinearity='relu')
                if m.bias is not None:
                    nn.init.constant_(m.bias, 0)
            elif isinstance(m, nn.BatchNorm1d):
                nn.init.constant_(m.weight, 1)
                nn.init.constant_(m.bias, 0)
    
    def forward(self, x):
        """
        前向传播
        
        参数:
            x (torch.Tensor): 输入特征张量
            
        返回:
            torch.Tensor: 预测的太阳反射率和红外发射率
        """
        # 获取设备信息
        device = x.device
        
        # 共享特征提取
        shared_features = self.shared_layers(x)
        
        # 确保共享特征在正确的设备上
        shared_features = shared_features.to(device)
        
        # 应用注意力机制
        attended_features = self.attention(shared_features)
        
        # 确保注意力特征在正确的设备上
        attended_features = attended_features.to(device)
        
        # 太阳反射率分支
        solar_features = self.solar_branch(attended_features)
        solar_output = self.solar_output(solar_features)
        
        # 红外发射率分支
        infrared_features = self.infrared_branch(attended_features)
        infrared_output = self.infrared_output(infrared_features)
        
        # 确保输出在正确的设备上
        solar_output = solar_output.to(device)
        infrared_output = infrared_output.to(device)
        
        # 合并输出并应用Sigmoid
        output = torch.cat([solar_output, infrared_output], dim=1)
        output = self.sigmoid(output)
        
        return output


class EnsembleMaterialNet(nn.Module):
    """
    集成神经网络模型
    
    将多个基础模型集成在一起，提高预测性能和稳定性。
    
    参数:
        input_dim (int): 输入特征的维度
        num_models (int): 基础模型的数量
        model_type (str): 基础模型类型，可选 'mlp', 'residual' 或 'dual_branch'
        hidden_dims (list): 隐藏层的维度列表
        dropout_rate (float): Dropout比率
        use_batch_norm (bool): 是否使用批归一化
    """
    def __init__(self, input_dim, num_models=3, base_model_type='mlp', 
                 hidden_dims=[64, 32], dropout_rate=0.2, use_batch_norm=True):
        super(EnsembleMaterialNet, self).__init__()
        
        self.input_dim = input_dim
        self.num_models = num_models
        self.base_model_type = base_model_type
        
        # 创建多个基础模型
        self.models = nn.ModuleList()
        for _ in range(num_models):
            if base_model_type == 'mlp':
                model = MaterialPropertiesNet(
                    input_dim, hidden_dims, dropout_rate, use_batch_norm
                )
            elif base_model_type == 'residual':
                model = ResidualMaterialNet(
                    input_dim, hidden_dims, num_res_blocks=2, 
                    dropout_rate=dropout_rate, use_batch_norm=use_batch_norm
                )
            elif base_model_type == 'dual_branch':
                model = DualBranchAttentionNet(
                    input_dim, hidden_dims, branch_dims=[64, 32],
                    dropout_rate=dropout_rate, use_batch_norm=use_batch_norm
                )
            else:
                raise ValueError(f"不支持的模型类型: {base_model_type}")
            
            self.models.append(model)
    
    def forward(self, x):
        """
        前向传播
        
        参数:
            x (torch.Tensor): 输入特征张量
            
        返回:
            torch.Tensor: 预测的太阳反射率和红外发射率
        """
        # 获取每个模型的预测结果
        outputs = [model(x) for model in self.models]
        
        # 计算平均值作为最终预测
        ensemble_output = torch.mean(torch.stack(outputs), dim=0)
        
        return ensemble_output


def create_model(model_type, input_dim, **kwargs):
    """
    创建模型实例
    
    参数:
        model_type (str): 模型类型，可选 'mlp', 'residual', 'ensemble', 'dual_branch'
        input_dim (int): 输入特征的维度
        **kwargs: 其他模型参数
        
    返回:
        nn.Module: 创建的模型实例
    """
    if model_type == 'mlp':
        return MaterialPropertiesNet(input_dim, **kwargs)
    elif model_type == 'residual':
        return ResidualMaterialNet(input_dim, **kwargs)
    elif model_type == 'ensemble':
        return EnsembleMaterialNet(input_dim, **kwargs)
    elif model_type == 'dual_branch':
        return DualBranchAttentionNet(input_dim, **kwargs)
    else:
        raise ValueError(f"不支持的模型类型: {model_type}")


def save_model(model, model_path):
    """
    保存模型
    
    参数:
        model (nn.Module): 要保存的模型
        model_path (str): 保存路径
    """
    # 确保目录存在
    os.makedirs(os.path.dirname(model_path), exist_ok=True)
    
    # 保存模型
    torch.save(model.state_dict(), model_path)
    print(f"模型已保存到: {model_path}")


def load_model(model_type, input_dim, model_path, **kwargs):
    """
    加载模型
    
    参数:
        model_type (str): 模型类型
        input_dim (int): 输入特征的维度
        model_path (str): 模型路径
        **kwargs: 其他模型参数
        
    返回:
        nn.Module: 加载的模型
    """
    # 创建模型实例
    model = create_model(model_type, input_dim, **kwargs)
    
    # 加载模型参数
    model.load_state_dict(torch.load(model_path))
    
    # 设置为评估模式
    model.eval()
    
    print(f"模型已从 {model_path} 加载")
    return model


if __name__ == "__main__":
    # 测试模型
    input_dim = 10  # 示例输入维度
    
    # 测试基本MLP模型
    mlp_model = MaterialPropertiesNet(input_dim)
    print(f"MLP模型结构:\n{mlp_model}")
    
    # 测试残差模型
    res_model = ResidualMaterialNet(input_dim)
    print(f"\n残差模型结构:\n{res_model}")
    
    # 测试双分支注意力网络模型
    dual_branch_model = DualBranchAttentionNet(input_dim)
    print(f"\n双分支注意力网络模型结构:\n{dual_branch_model}")
    
    # 测试集成模型
    ensemble_model = EnsembleMaterialNet(input_dim)
    print(f"\n集成模型结构:\n{ensemble_model}")
    
    # 测试前向传播
    x = torch.randn(5, input_dim)  # 批大小为5的随机输入
    
    with torch.no_grad():
        mlp_output = mlp_model(x)
        res_output = res_model(x)
        dual_branch_output = dual_branch_model(x)
        ensemble_output = ensemble_model(x)
    
    print(f"\nMLP输出形状: {mlp_output.shape}")
    print(f"残差网络输出形状: {res_output.shape}")
    print(f"双分支注意力网络输出形状: {dual_branch_output.shape}")
    print(f"集成模型输出形状: {ensemble_output.shape}")