import numpy as np


def load_smac_data(data, n_agents):
    print("Loading SMAC data")
    states = data["states"]
    obs = data["obs"]
    avails = data["avails"]
    actions = data["actions"]
    rewards = data["rewards"]
    dones = data["dones"]
    actives = data["actives"]

    # 每个 episode 的有效长度
    lengths = actives.sum(axis=1)   # (N,)
    min_len = lengths.min()
    max_len = lengths.max()
    mean_len = lengths.mean()
    print(f"Episode 数量: {len(lengths)}")
    print(f"最短长度: {min_len}")
    print(f"最长长度: {max_len}")
    print(f"平均长度: {mean_len:.2f}")

    # print(f"states.shape:{states.shape} obs.shape:{obs.shape} avails.shape:{avails.shape} actions.shape:{actions.shape} rewards.shape:{rewards.shape} dones.shape:{dones.shape} actives.shape:{actives.shape}")
    """
    states.shape:(5000, 151, 140) obs.shape:(5000, 151, 6, 78) avails.shape:(5000, 151, 6, 14) actions.shape:(5000, 151, 6, 1) rewards.shape:(5000, 151) dones.shape:(5000, 151) actives.shape:(5000, 151)
    """

    if len(states.shape) == 3:
        data["states"] = states = np.repeat(states[:, :, None, :], n_agents, axis=2)
    if len(rewards.shape) == 2:
        data["rewards"] = rewards = np.repeat(rewards[:, :, None], n_agents, axis=2)
    if len(dones.shape) == 2:
        data["dones"] = dones = np.repeat(dones[:, :, None], n_agents, axis=2)
    if len(actives.shape) == 2:
        data["actives"] = actives = np.repeat(actives[:, :, None], n_agents, axis=2)
    
    # print(f"states.shape:{states.shape} obs.shape:{obs.shape} avails.shape:{avails.shape} actions.shape:{actions.shape} rewards.shape:{rewards.shape} dones.shape:{dones.shape} actives.shape:{actives.shape}")
    """
    states.shape:(5000, 151, 6, 140) obs.shape:(5000, 151, 6, 78) avails.shape:(5000, 151, 6, 14) actions.shape:(5000, 151, 6, 1) rewards.shape:(5000, 151, 6) dones.shape:(5000, 151, 6) actives.shape:(5000, 151, 6)
    """

    # rewards: 5000 x 151 x 6
    # _done_ids: 5000 x 6 结束 index
    rewards = np.cumsum(rewards[:, ::-1], 1)[:, ::-1]
    _done_ids = np.cumsum(actives.sum(1), 0)

    # ---- 提取出有效的 (actives=True) 片段 ----
    mask = actives.astype(bool)
    
    _states = states[:, :-1][mask[:, :-1]]
    _obs = obs[:, :-1][mask[:, :-1]]
    _avails = avails[:, :-1][mask[:, :-1]]
    _actions = actions[:, :-1][mask[:, :-1]]
    _rewards = rewards[:, :-1][mask[:, :-1]]
    _dones = dones[:, :-1][mask[:, :-1]]

    _next_states = states[:, 1:][mask[:, :-1]]
    _next_obs = obs[:, 1:][mask[:, :-1]]

    _is_inits = np.zeros_like(rewards)
    _is_inits[:, 0] = 1
    _is_inits = _is_inits[:, :-1][mask[:, :-1]]

    _states = _states.reshape(-1, n_agents, _states.shape[-1])
    _obs = _obs.reshape(-1, n_agents, _obs.shape[-1])
    _avails = _avails.reshape(-1, n_agents, _avails.shape[-1])
    _actions = _actions.reshape(-1, n_agents, 1)
    _rewards = _rewards.reshape(-1, n_agents)
    _dones = _dones.reshape(-1, n_agents)

    _next_states = _next_states.reshape(-1, n_agents, _next_states.shape[-1])
    _next_obs = _next_obs.reshape(-1, n_agents, _next_obs.shape[-1])
    _is_inits = _is_inits.reshape(-1, n_agents)

    # 验证输出维度
    # print(f"_states.shape:{_states.shape} _obs.shape:{_obs.shape} _avails.shape:{_avails.shape} _actions.shape:{_actions.shape} _rewards.shape:{_rewards.shape} _dones.shape:{_dones.shape} _next_states.shape:{_next_states.shape} _next_obs.shape:{_next_obs.shape} _is_inits.shape:{_is_inits.shape}")
    """
    _states.shape:(212000, 6, 140) _obs.shape:(212000, 6, 78) _avails.shape:(212000, 6, 14) _actions.shape:(212000, 6, 1) _rewards.shape:(212000, 6) _dones.shape:(212000, 6) _next_states.shape:(207008, 6, 140) _next_obs.shape:(207008, 6, 78) _is_inits.shape:(212000, 6)
    """

    print("Done loading SMAC data")
    
    return _states, _obs, _actions, _done_ids, _rewards, _next_states, _next_obs, _avails, _is_inits


def load_mamujoco_data(data):
    """
    1.
    ​​提取初始状态标记​​：创建 is_inits数组来标记每个序列的开始位置

    2.
    ​​过滤有效数据​​：使用 actives掩码筛选出有效的数据点

    3.
    ​​构建时间对齐的数据对​​：准备当前状态-动作-下一状态的配对数据
    """

    states = data["states"]
    obs = data["obs"]
    actions = data["actions"]
    rewards = data["rewards"]
    dones = data["dones"]
    actives = data["actives"]

    is_inits = np.zeros_like(rewards)
    is_inits[:, 0] = 1
    is_inits = is_inits[actives]

    _states = states[:, :-1][actives]
    _obs = obs[:, :-1][actives]
    _actions = actions[:, :-1][actives]
    _rewards = rewards[actives].reshape(-1, 1)
    _masks = 1 - dones[actives].reshape(-1, 1)
    _next_states = states[:, 1:][actives]
    _next_obs = obs[:, 1:][actives]

    return _states, _obs, _actions, _rewards, _masks, _next_states, _next_obs, is_inits


def evaluate(agent, env, environment="mujoco", num_evaluation=10, max_steps=None):
    episode_rewards = []
    if max_steps is None and environment == "mujoco":
        max_steps = 1000
    assert max_steps != None

    for _ in range(num_evaluation):
        obs, _, _ = env.reset()
        episode_reward = 0
        for _ in range(max_steps):

            actions = agent.step((np.array(obs)).astype(np.float32))
            action = actions.numpy()
            
            next_obs, _, reward, done, _, _ = env.step(action)
            episode_reward += reward[0,0,0]

            if done[0,0]:
                break
            obs = next_obs
        episode_rewards.append(episode_reward)
        
    return np.mean(episode_rewards)