import swanlab
from config import config


def train_agent(agent, env):
    init_experiment(env)

    for episode in range(config.EPISODES):
        state = env.reset()[0]
        total_reward = 0
        steps = 0

        while True:
            action = agent.choose_action(state)
            next_state, reward, done, _, _ = env.step(action)
            agent.store_experience(state, action, reward, next_state, done)
            agent.train()

            total_reward += reward
            state = next_state
            steps += 1
            if done or steps >= config.MAX_STEPS_PER_EPISODE:
                break

        agent.epsilon = max(config.EPSILON_END, agent.epsilon * config.EPSILON_DECAY)

        if episode % 10 == 0:
            eval_env = env
            avg_reward = agent.evaluate(eval_env)

            if avg_reward > agent.best_avg_reward:
                agent.best_avg_reward = avg_reward
                agent.best_net.load_state_dict({k: v.clone() for k, v in agent.q_net.state_dict().items()})
                agent.save_model(path=f"{config.OUTPUT_DIR}/best_model_episode{episode}.pth")

        swanlab.log({
            "train/reward": total_reward,
            "eval/best_avg_reward": agent.best_avg_reward,
            "train/epsilon": agent.epsilon
        }, step=episode)

        print(f"Episode: {episode}, Reward: {total_reward}, Best Avg: {agent.best_avg_reward}")


def test_agent(agent, env):
    agent.epsilon = 0
    agent.q_net.load_state_dict(agent.best_net.state_dict())

    for episode in range(3):
        state = env.reset()[0]
        total_reward = 0
        steps = 0

        while True:
            action = agent.choose_action(state)
            next_state, reward, done, _, _ = env.step(action)
            total_reward += reward
            state = next_state
            steps += 1

            if done or steps >= config.MAX_STEPS_PER_EPISODE:
                break

        print(f"Test Episode: {episode}, Reward: {total_reward}")


def init_experiment(env):
    state_dim = env.observation_space.shape[0]
    action_dim = env.action_space.n

    return swanlab.init(
        project="CartPole",
        experiment_name="DQN-CartPole-v1",
        config={
            "state_dim": state_dim,
            "action_dim": action_dim,
            "batch_size": config.BATCH_SIZE,
            "gamma": config.GAMMA,
            "update_target_freq": config.UPDATE_TARGET_FREQ,
            "replay_buffer_size": config.REPLAY_BUFFER_SIZE,
            "learning_rate": config.LEARNING_RATE,
            "episode": config.EPISODES,
            "epsilon_start": config.EPSILON_START,
            "epsilon_end": config.EPSILON_END,
            "epsilon_decay": config.EPSILON_DECAY
        },
        description="DQN训练参数配置"
    )
