# models/factor_policy_net.py
import torch
import torch.nn as nn
import torch.nn.functional as F
from typing import Dict, Tuple
import numpy as np

class FactorPolicyNetwork(nn.Module):
    """因子策略网络 - 为每个因子类型定制"""
    
    def __init__(self, factor_type: str, input_dim: int = 128, hidden_dim: int = 256):
        super().__init__()
        self.factor_type = factor_type
        self.input_dim = input_dim
        self.hidden_dim = hidden_dim
        
        # 状态编码器
        self.state_encoder = nn.Sequential(
            nn.Linear(input_dim, hidden_dim),
            nn.ReLU(),
            nn.LayerNorm(hidden_dim),
            nn.Linear(hidden_dim, hidden_dim // 2),
            nn.ReLU(),
            nn.Dropout(0.1)
        )
        
        # 根据因子类型调整输出维度
        self.action_dims = self._get_action_dimensions()
        
        # 多头部策略输出
        self.policy_heads = nn.ModuleDict({
            'movement': nn.Linear(hidden_dim // 2, 8),  # 8个移动方向
            'combat': nn.Linear(hidden_dim // 2, self.action_dims['combat']),
            'support': nn.Linear(hidden_dim // 2, self.action_dims['support']),
            'special': nn.Linear(hidden_dim // 2, self.action_dims['special'])
        })
        
        # 价值网络
        self.value_net = nn.Sequential(
            nn.Linear(hidden_dim // 2, hidden_dim // 4),
            nn.ReLU(),
            nn.Linear(hidden_dim // 4, 1)
        )
        
        # 初始化权重
        self.apply(self._init_weights)
    
    def _get_action_dimensions(self) -> Dict[str, int]:
        """获取各因子类型的动作维度"""
        action_dims = {
            # 战斗动作维度
            'combat': {
                'MYDEI': 6,    # 万敌：反击特化
                'HYACINE': 4,  # 风堇：治疗特化
                'TRIBBIE': 5,  # 缇宝：移动特化
                'AGLAEA': 6,   # 阿格莱雅：召唤特化
                'ANAXA': 7,    # 那刻夏：多段攻击
                'CASTORICE': 5, # 遐蝶：烧血爆发
                'CIPHER': 6,   # 赛飞儿：高速追击
                'HYSILENS': 5, # 海瑟音：持续伤害
                'CERYDRA': 6,  # 刻律德菈：战技辅助
                'TERRAE': 4,   # 丹恒腾荒：护盾召唤
                'EVERNIGHT': 5, # 长夜月：召唤辅助
                'PHAINON': 6   # 白厄：变身跑条
            },
            # 支援动作维度
            'support': {
                'MYDEI': 3, 'HYACINE': 8, 'TRIBBIE': 4, 'AGLAEA': 5,
                'ANAXA': 3, 'CASTORICE': 2, 'CIPHER': 3, 'HYSILENS': 4,
                'CERYDRA': 7, 'TERRAE': 6, 'EVERNIGHT': 6, 'PHAINON': 4
            },
            # 特殊动作维度
            'special': {
                'MYDEI': 3, 'HYACINE': 4, 'TRIBBIE': 3, 'AGLAEA': 4,
                'ANAXA': 3, 'CASTORICE': 4, 'CIPHER': 3, 'HYSILENS': 4,
                'CERYDRA': 4, 'TERRAE': 4, 'EVERNIGHT': 4, 'PHAINON': 5
            }
        }
        
        return {
            'combat': action_dims['combat'].get(self.factor_type, 5),
            'support': action_dims['support'].get(self.factor_type, 4),
            'special': action_dims['special'].get(self.factor_type, 3)
        }
    
    def _init_weights(self, module):
        """初始化网络权重"""
        if isinstance(module, nn.Linear):
            nn.init.orthogonal_(module.weight, gain=0.01)
            nn.init.constant_(module.bias, 0.0)
    
    def forward(self, state: torch.Tensor) -> Tuple[Dict[str, torch.Tensor], torch.Tensor]:
        """前向传播"""
        encoded = self.state_encoder(state)
        
        # 策略输出
        policy_logits = {}
        for head_name, head_layer in self.policy_heads.items():
            policy_logits[head_name] = head_layer(encoded)
        
        # 价值输出
        value = self.value_net(encoded)
        
        return policy_logits, value
    
    def get_action_probabilities(self, state: torch.Tensor) -> Dict[str, torch.Tensor]:
        """获取动作概率分布"""
        policy_logits, _ = self.forward(state)
        action_probs = {}
        
        for head_name, logits in policy_logits.items():
            action_probs[head_name] = F.softmax(logits, dim=-1)
        
        return action_probs

class MultiHeadPPO:
    """多头PPO训练器 - 为每个因子类型单独训练"""
    
    def __init__(self, factor_types, lr=1e-4, gamma=0.99, clip_epsilon=0.2):
        self.factor_types = factor_types
        self.policies = {
            factor_type: FactorPolicyNetwork(factor_type.name) 
            for factor_type in factor_types
        }
        self.optimizers = {
            factor_type: torch.optim.Adam(policy.parameters(), lr=lr)
            for factor_type, policy in self.policies.items()
        }
        
        self.gamma = gamma
        self.clip_epsilon = clip_epsilon
        self.value_loss_coef = 0.5
        self.entropy_coef = 0.01
        
    def compute_loss(self, experiences):
        """计算PPO损失"""
        total_loss = 0
        policy_losses = {}
        value_losses = {}
        
        for factor_type, policy in self.policies.items():
            # 筛选该因子类型的经验
            factor_experiences = [e for e in experiences if e['factor_type'] == factor_type]
            
            if not factor_experiences:
                continue
                
            # 批量处理
            states = torch.stack([e['state'] for e in factor_experiences])
            actions = {head: torch.stack([e['actions'][head] for e in factor_experiences]) 
                      for head in ['movement', 'combat', 'support', 'special']}
            old_log_probs = {head: torch.stack([e['old_log_probs'][head] for e in factor_experiences])
                           for head in ['movement', 'combat', 'support', 'special']}
            returns = torch.stack([e['returns'] for e in factor_experiences])
            advantages = torch.stack([e['advantages'] for e in factor_experiences])
            
            # 前向传播
            policy_logits, values = policy(states)
            
            # 计算新动作的概率
            new_log_probs = {}
            ratio = 0
            for head in ['movement', 'combat', 'support', 'special']:
                log_probs = F.log_softmax(policy_logits[head], dim=-1)
                new_log_probs[head] = log_probs.gather(1, actions[head].unsqueeze(-1)).squeeze(-1)
                
                # 计算概率比
                old_log_p = old_log_probs[head]
                new_log_p = new_log_probs[head]
                ratio_head = torch.exp(new_log_p - old_log_p)
                ratio += ratio_head
            
            ratio = ratio / 4.0  # 平均四个头部的比率
            
            # PPO损失
            surr1 = ratio * advantages
            surr2 = torch.clamp(ratio, 1.0 - self.clip_epsilon, 1.0 + self.clip_epsilon) * advantages
            policy_loss = -torch.min(surr1, surr2).mean()
            
            # 价值损失
            value_loss = F.mse_loss(values.squeeze(), returns)
            
            # 熵正则化
            entropy = 0
            for head_logits in policy_logits.values():
                probs = F.softmax(head_logits, dim=-1)
                log_probs = F.log_softmax(head_logits, dim=-1)
                entropy += (-probs * log_probs).sum(dim=-1).mean()
            entropy = entropy / len(policy_logits)
            
            # 总损失
            loss = policy_loss + self.value_loss_coef * value_loss - self.entropy_coef * entropy
            
            policy_losses[factor_type] = policy_loss.item()
            value_losses[factor_type] = value_loss.item()
            total_loss += loss
        
        return total_loss, policy_losses, value_losses
    
    def update(self, experiences):
        """更新策略"""
        total_loss, policy_losses, value_losses = self.compute_loss(experiences)
        
        # 反向传播和优化
        for factor_type in self.factor_types:
            self.optimizers[factor_type].zero_grad()
        
        total_loss.backward()
        
        # 梯度裁剪
        for policy in self.policies.values():
            torch.nn.utils.clip_grad_norm_(policy.parameters(), 0.5)
        
        for optimizer in self.optimizers.values():
            optimizer.step()
        
        return {
            'total_loss': total_loss.item(),
            'policy_losses': policy_losses,
            'value_losses': value_losses
        }