import time
from mlagents_envs.base_env import ActionTuple
from mlagents_envs.environment import UnityEnvironment
from mlagents_envs.exception import UnityCommunicatorStoppedException, UnityCommunicationException
from mlagents_envs.side_channel.engine_configuration_channel import EngineConfigurationChannel


def connect_unity(exe_path=None, port=5004, timeout=30, time_scale=5, no_graphics=True):
    """
    :param exe_path: 可执行文件路径（None表示编辑器模式下训练）
    :param port: 通信端口（默认5004）
    :param timeout: 最大等待时间(秒)
    :param time_scale: 控制物理模拟的时间流速
    :param no_graphics: 如果为True,那么使用打包好之后的程序进行训练就不会出现图形界面

    :return: UnityEnvironment实例
    """

    print(f"正在连接Unity环境，最多等待 {timeout} 秒...")

    if exe_path is None:
        print("[编辑器模式]:请在Unity编辑器当中开启模拟环境。")

    try:
        start_time = time.time()
        engine_config_channel = EngineConfigurationChannel()
        engine_config_channel.set_configuration_parameters(time_scale=time_scale,target_frame_rate=-1)
        unity_env = UnityEnvironment(
            file_name=exe_path,
            base_port=port,
            timeout_wait=timeout,
            no_graphics=no_graphics,
            side_channels=[engine_config_channel]
        )
        unity_env.reset()
        connect_time = time.time() - start_time
        print(f"✓ 连接成功！耗时 {connect_time:.1f} 秒")
        return unity_env
    except Exception:
        print("✗ 连接失败！")
        raise

def get_unity_env_info(unity_env: UnityEnvironment):
    """
    :param unity_env: 环境实例
    :return: 返回环境当中各个behavior对应的信息
    """
    env_info = []
    behavior_specs = unity_env.behavior_specs

    for behavior_name, spec in behavior_specs.items():
        observation_dims = [obs_spec.shape for obs_spec in spec.observation_specs]
        action_spec = spec.action_spec
        continuous_size = action_spec.continuous_size
        discrete_branches = action_spec.discrete_branches

        info = {
            "behavior_name": behavior_name,
            "observation_dim": observation_dims,
            "action_dim": (continuous_size, len(discrete_branches)),
            "discrete_action_branches": discrete_branches,
        }
        env_info.append(info)

    return env_info

def reset_unity_env(unity_env: UnityEnvironment, behavior_names: list[str]):
    """
    :param behavior_names: 需要获取哪些behavior的状态（多个）
    :param unity_env: 环境实例
    :return: 返回重置之后的环境状态
    !!!会重置环境当中全部的Agent,重置之后依然非活跃的agent不会被考虑进来!!!
    """
    unity_env.reset()
    results = []
    for behavior_name in behavior_names:
        decision_steps, terminal_steps = unity_env.get_steps(behavior_name)

        observations = decision_steps.obs
        behavior_data = {
            "behavior_name": behavior_name,
            "observations": observations,
            "agent_ids": decision_steps.agent_id,
        }
        results.append(behavior_data)
    return results

def step_unity_env(unity_env, behavior_name, agent_ids, continuous_actions=None, discrete_actions=None):
    """
    :param unity_env: 环境实例
    :param behavior_name: 要指定action的behavior（单个）
    :param agent_ids: 需要指定action的agent对应的id（多个）
    :param continuous_actions: 连续动作
    :param discrete_actions: 离散动作
    """
    if continuous_actions is not None or discrete_actions is not None:
        for idx, agent_id in enumerate(agent_ids):
            action = ActionTuple()
            if continuous_actions is not None:
                action.add_continuous(continuous_actions[idx].reshape(1, -1))
            if discrete_actions is not None:
                action.add_discrete(discrete_actions[idx].reshape(1, -1))
            unity_env.set_action_for_agent(behavior_name=behavior_name, agent_id=agent_id, action=action)

    unity_env.step()
    decision_steps, terminal_steps = unity_env.get_steps(behavior_name=behavior_name)

    observations_alive = decision_steps.obs
    rewards_alive = decision_steps.reward
    ids_alive = decision_steps.agent_id

    observations_dead = terminal_steps.obs
    rewards_dead = terminal_steps.reward
    ids_dead = terminal_steps.agent_id

    return (observations_alive, rewards_alive, ids_alive), (observations_dead, rewards_dead, ids_dead)

if __name__ == "__main__":
    # =================================使用示例==================================
    env_temp = None
    try:
        env_temp = connect_unity()
        env_infor = get_unity_env_info(env_temp)
        print(f'环境信息：{env_infor}')
        print("1.使用环境信息定义神经网络。")
        # 定义神经网络......

        print("2.重置环境获取初始状态信息")
        # 重置之后取到需要的状态信息...

        print("3.根据状态信息计算action,反馈给环境")

        print("4.不断循环，直到训练完成")

    except (UnityCommunicationException, UnityCommunicatorStoppedException):
        print("和Unity的连接出错！")
    except Exception as ex:
        print(f"程序执行出错: {str(ex)}")

    finally:
        if env_temp is not None:
            env_temp.close()
            print("Unity 环境已关闭！")