from my_code.carla_env.carla_env import CarlaEnv
from my_code.rf.models.ddpg import DDPGAgent
from my_code.rf.rf_utils.replay_buffer import ReplayBuffer

if __name__ == '__main__':
    # 创建Carla仿真环境
    env = CarlaEnv()

    # 创建DDPG智能体
    agent = DDPGAgent(env.state_dim, env.action_dim, env.action_range)

    # 设置训练参数
    num_episodes = 1000
    max_steps_per_episode = 1000
    batch_size = 64

    # 创建经验回放缓冲区
    replay_buffer = ReplayBuffer(buffer_size=100000)
    max_speed = 50  # 初始速度设为50 km/h

    # 开始训练循环
    for episode in range(num_episodes):
        state = env.reset()
        episode_reward = 0

        for step in range(max_steps_per_episode):
            # 智能体选择动作
            action = agent.select_action(state)

            # 环境执行动作，并获取下一状态、奖励和是否结束标志
            next_state, reward, done = env.step(action, max_speed)

            # 将经验元组存储到经验回放缓冲区中
            replay_buffer.add(state, action, next_state, reward, done)

            # 如果经验回放缓冲区中的样本足够多，则开始训练智能体
            if len(replay_buffer) > batch_size:
                agent.train(replay_buffer, batch_size)

            # 更新当前状态和累计奖励
            state = next_state
            episode_reward += reward

            # 如果达到终止条件，则结束当前Episode
            if done:
                break

        # 打印当前Episode的信息：Episode编号和累计奖励
        print(f"Episode: {episode}, Reward: {episode_reward}")

    # 清理Carla仿真环境资源
    env.clean()
