import numpy as np
import torch

def policy_entropy(actor, obs_sample):
    """计算策略熵（基于动作概率分布）"""
    with torch.no_grad():
        probs = torch.nn.functional.softmax(actor(obs_sample), dim=-1)
    entropy = -torch.sum(probs * torch.log(probs + 1e-10), dim=-1).mean()
    return entropy.item()

def action_variance(actor, obs_samples):
    """计算动作分布的方差"""
    actions = []
    for obs in obs_samples:
        with torch.no_grad():
            actions.append(actor(torch.Tensor(obs)).argmax(-1))
    return np.var(actions, axis=0).mean()