# models/lightweight_ppo.py - 完整实现
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.distributions import Categorical
import numpy as np

class MultiHeadFactorNetwork(nn.Module):
    """多头部因子决策网络 - 支持不同动作类型"""
    
    def __init__(self, input_dim=128, hidden_dim=256, factor_type=None):
        super().__init__()
        self.factor_type = factor_type
        
        # 共享特征编码层
        self.shared_encoder = nn.Sequential(
            nn.Linear(input_dim, hidden_dim),
            nn.ReLU(),
            nn.Dropout(0.2),
            nn.Linear(hidden_dim, hidden_dim // 2),
            nn.ReLU(),
            nn.Dropout(0.1)
        )
        
        # 多任务输出头
        self.policy_heads = nn.ModuleDict({
            'movement': nn.Linear(hidden_dim // 2, 8),      # 8个移动方向
            'combat': nn.Linear(hidden_dim // 2, 6),        # 6种战斗动作
            'support': nn.Linear(hidden_dim // 2, 5),       # 5种支援动作
            'special': nn.Linear(hidden_dim // 2, 4)        # 4种特殊能力
        })
        
        self.value_head = nn.Sequential(
            nn.Linear(hidden_dim // 2, hidden_dim // 4),
            nn.ReLU(),
            nn.Linear(hidden_dim // 4, 1)
        )
    
    def forward(self, x):
        encoded = self.shared_encoder(x)
        
        # 策略输出
        policy_logits = {}
        for head_name, head_layer in self.policy_heads.items():
            policy_logits[head_name] = head_layer(encoded)
        
        # 价值输出
        value = self.value_head(encoded)
        
        return policy_logits, value

class MultiHeadPPO:
    """多头部PPO训练器"""
    
    def __init__(self, factor_types, lr=1e-4, gamma=0.99, clip_epsilon=0.2, 
                 value_coef=0.5, entropy_coef=0.01, max_grad_norm=0.5):
        self.policies = {
            factor_type: MultiHeadFactorNetwork(factor_type=factor_type)
            for factor_type in factor_types
        }
        
        self.optimizers = {
            factor_type: optim.Adam(policy.parameters(), lr=lr)
            for factor_type, policy in self.policies.items()
        }
        
        self.gamma = gamma
        self.clip_epsilon = clip_epsilon
        self.value_coef = value_coef
        self.entropy_coef = entropy_coef
        self.max_grad_norm = max_grad_norm
        
        # 移动到GPU（如果可用）
        self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
        for policy in self.policies.values():
            policy.to(self.device)
    
    def compute_actions(self, states, factor_types):
        """计算动作和概率"""
        actions = {}
        log_probs = {}
        values = {}
        entropies = {}
        
        for factor_type, state_batch in states.items():
            if factor_type not in self.policies:
                continue
                
            policy = self.policies[factor_type]
            state_tensor = torch.FloatTensor(state_batch).to(self.device)
            
            with torch.no_grad():
                policy_logits, value = policy(state_tensor)
                
                # 为每个动作头采样动作
                action_dict = {}
                log_prob_dict = {}
                entropy_dict = {}
                
                for head_name, logits in policy_logits.items():
                    probs = F.softmax(logits, dim=-1)
                    dist = Categorical(probs)
                    action = dist.sample()
                    
                    action_dict[head_name] = action.cpu()
                    log_prob_dict[head_name] = dist.log_prob(action).cpu()
                    entropy_dict[head_name] = dist.entropy().cpu()
                
                actions[factor_type] = action_dict
                log_probs[factor_type] = log_prob_dict
                values[factor_type] = value.cpu()
                entropies[factor_type] = entropy_dict
        
        return actions, log_probs, values, entropies
    
    def update(self, experiences, ppo_epochs=4, mini_batch_size=32):
        """PPO更新步骤"""
        total_loss = 0
        update_counts = {}
        
        for factor_type, policy in self.policies.items():
            # 过滤该因子类型的经验
            factor_experiences = [e for e in experiences if e['factor_type'] == factor_type]
            
            if not factor_experiences:
                continue
                
            # 转换为张量
            states = torch.stack([e['state'] for e in factor_experiences]).to(self.device)
            old_log_probs = {
                head: torch.stack([e['old_log_probs'][head] for e in factor_experiences])
                for head in ['movement', 'combat', 'support', 'special']
            }
            actions = {
                head: torch.stack([e['actions'][head] for e in factor_experiences])
                for head in ['movement', 'combat', 'support', 'special']
            }
            advantages = torch.stack([e['advantages'] for e in factor_experiences]).to(self.device)
            returns = torch.stack([e['returns'] for e in factor_experiences]).to(self.device)
            
            # 多轮PPO更新
            for epoch in range(ppo_epochs):
                # 小批量训练
                indices = torch.randperm(len(factor_experiences))
                for start in range(0, len(indices), mini_batch_size):
                    end = start + mini_batch_size
                    batch_indices = indices[start:end]
                    
                    batch_states = states[batch_indices]
                    batch_advantages = advantages[batch_indices]
                    batch_returns = returns[batch_indices]
                    
                    # 计算新策略
                    new_policy_logits, new_values = policy(batch_states)
                    
                    # 策略损失
                    policy_loss = 0
                    entropy_loss = 0
                    
                    for head_name in ['movement', 'combat', 'support', 'special']:
                        batch_old_log_probs = old_log_probs[head_name][batch_indices].to(self.device)
                        batch_actions = actions[head_name][batch_indices].to(self.device)
                        
                        new_probs = F.softmax(new_policy_logits[head_name], dim=-1)
                        new_dist = Categorical(new_probs)
                        new_log_probs = new_dist.log_prob(batch_actions)
                        
                        # PPO clip损失
                        ratio = torch.exp(new_log_probs - batch_old_log_probs)
                        surr1 = ratio * batch_advantages
                        surr2 = torch.clamp(ratio, 1 - self.clip_epsilon, 1 + self.clip_epsilon) * batch_advantages
                        policy_loss += -torch.min(surr1, surr2).mean()
                        
                        # 熵奖励
                        entropy_loss += -new_dist.entropy().mean()
                    
                    # 价值损失
                    value_loss = F.mse_loss(new_values.squeeze(), batch_returns)
                    
                    # 总损失
                    total_batch_loss = (policy_loss + 
                                      self.value_coef * value_loss + 
                                      self.entropy_coef * entropy_loss)
                    
                    # 反向传播
                    self.optimizers[factor_type].zero_grad()
                    total_batch_loss.backward()
                    torch.nn.utils.clip_grad_norm_(policy.parameters(), self.max_grad_norm)
                    self.optimizers[factor_type].step()
                    
                    total_loss += total_batch_loss.item()
                    update_counts[factor_type] = update_counts.get(factor_type, 0) + 1
        
        avg_loss = total_loss / (sum(update_counts.values()) + 1e-8)
        return {'total_loss': avg_loss, 'update_counts': update_counts}
    
    def save_checkpoint(self, filepath):
        """保存检查点"""
        checkpoint = {
            'policies_state': {
                factor_type.name: policy.state_dict() 
                for factor_type, policy in self.policies.items()
            },
            'optimizers_state': {
                factor_type.name: optimizer.state_dict()
                for factor_type, optimizer in self.optimizers.items()
            },
            'training_config': {
                'gamma': self.gamma,
                'clip_epsilon': self.clip_epsilon
            }
        }
        torch.save(checkpoint, filepath)
    
    def load_checkpoint(self, filepath):
        """加载检查点"""
        checkpoint = torch.load(filepath, map_location=self.device)
        
        for factor_type, policy in self.policies.items():
            policy.load_state_dict(checkpoint['policies_state'][factor_type.name])
        
        for factor_type, optimizer in self.optimizers.items():
            optimizer.load_state_dict(checkpoint['optimizers_state'][factor_type.name])