import gymnasium as gym
import torch
import numpy as np
import pickle
import os
from Hunt_Env.Hunt_env.envs.Hunt_world import HunterEnv
from Hunt_Env.Hunt_env.envs.decision.rule_based_decision import rule_based_decision

# 确保专家数据目录存在
expert_data_dir = 'expert_data'
os.makedirs(expert_data_dir, exist_ok=True)

def process_observation(obs):
    """
    将观测转换为张量
    """
    processed_obs = []
    for key, value in obs.items():
        if isinstance(value, dict):
            processed_obs.append(process_observation(value))
        else:
            tensor_value = torch.tensor(value, dtype=torch.float32)
            if tensor_value.dim() == 0:  # 如果是零维张量，转换为一维张量
                tensor_value = tensor_value.unsqueeze(0)
            processed_obs.append(tensor_value.view(-1,))
    return torch.cat(processed_obs)

def collect_expert_data():
    """
    收集专家轨迹数据
    """
    # 创建环境
    env = HunterEnv(render_mode='rgb_array')
    
    # 定义收集参数
    num_episodes = 1000  # 收集1000个回合的数据
    max_timesteps = 300  # 每个回合的最大步数
    
    # 初始化数据容器
    expert_trajectories = []
    
    print(f"开始收集专家数据，共 {num_episodes} 个回合")
    
    for episode in range(1, num_episodes + 1):
        state = env.reset()
        episode_data = []
        
        for t in range(max_timesteps):
            # 获取基于规则的专家动作
            expert_action = rule_based_decision(state['agent_0'], '猎手')
            
            # 处理观测
            processed_state = process_observation(state['agent_0'])
            
            # 执行动作
            action = {
                'agent_0': expert_action,
                'agent_1': rule_based_decision(state['agent_1'], '逃脱者')
            }
            
            next_state, _, done, _, info = env.step(action)
            
            # 获取奖励
            reward = info['reward']['agent_0']
            
            # 保存轨迹数据
            episode_data.append({
                'state': processed_state.numpy(),
                'action': np.array(expert_action),
                'reward': reward
            })
            
            state = next_state
            
            if done:
                break
        
        # 添加完整的回合数据
        expert_trajectories.append(episode_data)
        
        # 打印进度
        if episode % 100 == 0:
            print(f"已收集 {episode}/{num_episodes} 个回合的数据")
    
    # 关闭环境
    env.close()
    
    # 保存数据
    data_path = os.path.join(expert_data_dir, 'expert_trajectories.pkl')
    with open(data_path, 'wb') as f:
        pickle.dump(expert_trajectories, f)
    
    print(f"专家数据收集完成！")
    print(f"数据保存到: {data_path}")
    print(f"共收集 {len(expert_trajectories)} 个回合的数据")
    
    # 统计数据总量
    total_steps = sum(len(episode) for episode in expert_trajectories)
    print(f"总步数: {total_steps}")
    
    return expert_trajectories

if __name__ == '__main__':
    collect_expert_data()