"""
trajectories.npz 使用示例
演示如何在 Python 代码中加载和使用轨迹数据
"""
import numpy as np

# ============================================================
# 示例 1: 基本加载和访问
# ============================================================
print("="*60)
print("示例 1: 基本加载和访问")
print("="*60)

# 加载数据
data = np.load('eval_results/cp_900_fixed/trajectories.npz', allow_pickle=True)

# 查看包含的所有键
print("\n可用的数据键:", list(data.keys()))

# 获取数据
episode_rewards = data['episode_rewards']
episode_lengths = data['episode_lengths']
trajectories = data['trajectories']

print(f"\nEpisode 奖励数组形状: {episode_rewards.shape}")
print(f"Episode 长度数组形状: {episode_lengths.shape}")
print(f"轨迹数组形状: {trajectories.shape}")

# ============================================================
# 示例 2: 访问特定 Episode 的数据
# ============================================================
print("\n" + "="*60)
print("示例 2: 访问特定 Episode 的数据")
print("="*60)

episode_id = 0  # 第一个 episode

print(f"\nEpisode {episode_id + 1} 的信息:")
print(f"  总奖励: {episode_rewards[episode_id]}")
print(f"  总步数: {int(episode_lengths[episode_id])}")

# 获取详细轨迹
traj = trajectories[episode_id]
print(f"  轨迹类型: {type(traj)}")

if isinstance(traj, dict):
    print(f"  包含的数据: {list(traj.keys())}")

    # 访问观察、动作、奖励
    observations = traj['observations']
    actions = traj['actions']
    rewards = traj['rewards']

    print(f"\n  详细数据:")
    print(f"    观察数量: {len(observations)}")
    print(f"    动作数量: {len(actions)}")
    print(f"    奖励数量: {len(rewards)}")

# ============================================================
# 示例 3: 分析动作序列
# ============================================================
print("\n" + "="*60)
print("示例 3: 分析动作序列")
print("="*60)

traj = trajectories[0]
actions = traj['actions']

print(f"\n前 10 步的动作:")
for i, action in enumerate(actions[:10]):
    print(f"  步骤 {i+1}: Agent0={action[0]}, Agent1={action[1]}")

# 统计动作频率
agent0_actions = [a[0] for a in actions]
agent1_actions = [a[1] for a in actions]

print(f"\nAgent0 动作分布:")
unique, counts = np.unique(agent0_actions, return_counts=True)
for action, count in zip(unique, counts):
    print(f"  动作 {action}: {count} 次")

print(f"\nAgent1 动作分布:")
unique, counts = np.unique(agent1_actions, return_counts=True)
for action, count in zip(unique, counts):
    print(f"  动作 {action}: {count} 次")

# ============================================================
# 示例 4: 分析奖励分布
# ============================================================
print("\n" + "="*60)
print("示例 4: 分析奖励分布")
print("="*60)

traj = trajectories[0]
rewards = traj['rewards']

print(f"\n奖励统计:")
print(f"  总奖励: {sum(rewards)}")
print(f"  平均每步奖励: {np.mean(rewards):.4f}")
print(f"  最大单步奖励: {max(rewards)}")
print(f"  获得奖励的步数: {sum(1 for r in rewards if r > 0)}")

# 找到所有获得奖励的步骤
reward_steps = [(i, r) for i, r in enumerate(rewards) if r > 0]
if reward_steps:
    print(f"\n获得奖励的步骤:")
    for step, reward in reward_steps:
        print(f"  步骤 {step+1}: +{reward}")

# ============================================================
# 示例 5: 比较不同 Episodes
# ============================================================
print("\n" + "="*60)
print("示例 5: 比较不同 Episodes")
print("="*60)

print(f"\n所有 Episodes 的奖励:")
for i, reward in enumerate(episode_rewards):
    print(f"  Episode {i+1}: {reward:.2f}")

# 找到最好和最差的 episode
best_idx = np.argmax(episode_rewards)
worst_idx = np.argmin(episode_rewards)

print(f"\n最佳 Episode: {best_idx + 1}, 奖励={episode_rewards[best_idx]:.2f}")
print(f"最差 Episode: {worst_idx + 1}, 奖励={episode_rewards[worst_idx]:.2f}")

# ============================================================
# 示例 6: 提取观察数据
# ============================================================
print("\n" + "="*60)
print("示例 6: 访问观察数据")
print("="*60)

traj = trajectories[0]
observations = traj['observations']

print(f"\n观察数据信息:")
print(f"  总观察数: {len(observations)}")
if len(observations) > 0:
    obs = observations[0]  # 第一步的观察
    print(f"  每步观察的结构: {type(obs)}")
    if isinstance(obs, (list, np.ndarray)):
        print(f"  智能体数量: {len(obs)}")
        print(f"  Agent0 观察维度: {np.array(obs[0]).shape}")
        print(f"  Agent1 观察维度: {np.array(obs[1]).shape}")

        # 显示第一步的观察
        print(f"\n第一步 Agent0 的观察（前10维）:")
        print(f"  {np.array(obs[0])[:10]}")

# ============================================================
# 示例 7: 导出数据到 CSV
# ============================================================
print("\n" + "="*60)
print("示例 7: 导出动作和奖励到 CSV")
print("="*60)

import csv

traj = trajectories[0]
actions = traj['actions']
rewards = traj['rewards']

csv_file = 'eval_results/cp_900_fixed/episode_1_data.csv'

with open(csv_file, 'w', newline='', encoding='utf-8') as f:
    writer = csv.writer(f)
    writer.writerow(['Step', 'Agent0_Action', 'Agent1_Action', 'Reward', 'Cumulative_Reward'])

    cumulative = 0
    for i, (action, reward) in enumerate(zip(actions, rewards)):
        cumulative += reward
        writer.writerow([i+1, action[0], action[1], reward, cumulative])

print(f"\n✓ 数据已导出到: {csv_file}")

# ============================================================
# 示例 8: 计算成功率
# ============================================================
print("\n" + "="*60)
print("示例 8: 计算统计指标")
print("="*60)

success_count = sum(1 for r in episode_rewards if r > 0)
total_count = len(episode_rewards)
success_rate = success_count / total_count * 100

print(f"\n成功统计:")
print(f"  总 Episodes: {total_count}")
print(f"  成功 Episodes: {success_count}")
print(f"  成功率: {success_rate:.1f}%")

print(f"\n奖励统计:")
print(f"  平均奖励: {np.mean(episode_rewards):.2f}")
print(f"  标准差: {np.std(episode_rewards):.2f}")
print(f"  中位数: {np.median(episode_rewards):.2f}")
print(f"  最大值: {np.max(episode_rewards):.2f}")
print(f"  最小值: {np.min(episode_rewards):.2f}")

print(f"\n长度统计:")
print(f"  平均长度: {np.mean(episode_lengths):.2f} 步")
print(f"  最短: {np.min(episode_lengths):.0f} 步")
print(f"  最长: {np.max(episode_lengths):.0f} 步")

print("\n" + "="*60)
print("所有示例运行完成！")
print("="*60)
