#!/usr/bin/env python3
# -*- coding: utf-8 -*-

"""
联邦学习共享模型定义

功能：
1. 统一模型架构
2. 参数序列化
3. 模型初始化
"""

import torch
import torch.nn as nn
from typing import Dict, Any

class FederatedModel(nn.Module):
    """联邦学习共享模型基类"""
    
    def __init__(self, input_size: int, output_size: int, hidden_size: int = 128):
        """
        初始化模型
        
        参数:
            input_size: 输入特征维度
            output_size: 输出维度
            hidden_size: 隐藏层维度
        """
        super(FederatedModel, self).__init__()
        self.fc1 = nn.Linear(input_size, hidden_size)
        self.fc2 = nn.Linear(hidden_size, hidden_size)
        self.fc3 = nn.Linear(hidden_size, output_size)
        self.relu = nn.ReLU()
        
    def forward(self, x):
        """前向传播"""
        x = self.relu(self.fc1(x))
        x = self.relu(self.fc2(x))
        x = self.fc3(x)
        return x
    
    def get_parameters(self) -> Dict[str, Any]:
        """获取模型参数"""
        return {
            'state_dict': self.state_dict(),
            'input_size': self.fc1.in_features,
            'output_size': self.fc3.out_features,
            'hidden_size': self.fc2.out_features
        }
    
    def set_parameters(self, parameters: Dict[str, Any]):
        """设置模型参数"""
        self.load_state_dict(parameters['state_dict'])
        
    @classmethod
    def from_parameters(cls, parameters: Dict[str, Any]):
        """从参数创建模型实例"""
        model = cls(
            input_size=parameters['input_size'],
            output_size=parameters['output_size'],
            hidden_size=parameters.get('hidden_size', 128)
        )
        model.set_parameters(parameters)
        return model

class IndustrialFederatedModel(FederatedModel):
    """工业低碳优化专用联邦模型"""
    
    def __init__(self, input_size: int = 10, output_size: int = 5):
        """
        初始化工业专用模型
        
        参数:
            input_size: 工业参数数量 (默认10)
            output_size: 优化建议类别数 (默认5)
        """
        super(IndustrialFederatedModel, self).__init__(
            input_size=input_size,
            output_size=output_size,
            hidden_size=256
        )
        
        # 添加工业特定层
        self.attention = nn.Sequential(
            nn.Linear(input_size, 64),
            nn.ReLU(),
            nn.Linear(64, input_size),
            nn.Sigmoid()
        )
        
    def forward(self, x):
        """带注意力机制的前向传播"""
        attn_weights = self.attention(x)
        x = x * attn_weights
        return super().forward(x)