import numpy as np

def calc_entropy(episode):
    # 若有 action logits，可用 softmax 计算 entropy，这里假设 action 分布存储为 episode["action_probs"]
    if "action_probs" not in episode:
        return 0.0
    entropy_list = []
    for timestep_probs in episode["action_probs"]:  # [T, n_agents, n_actions]
        timestep_entropy = -np.sum(np.array(timestep_probs) * np.log(np.array(timestep_probs) + 1e-10), axis=-1)
        entropy_list.append(np.mean(timestep_entropy))
    return np.mean(entropy_list)

def calc_episode_reward(episode):
    return sum(episode.get("reward", []))
