import cv2
from environment import DriverEnv
from agent_qlearning import QlerningAgent
from agent_dqn import DqnAgent


def test_model(env, agent, video_name, epochs=1, if_show=True):
    h, w, c = env.render().shape
    fourcc = cv2.VideoWriter_fourcc(*"XVID")
    out = cv2.VideoWriter(video_name, fourcc, 20.0, (w, h))

    for epoch in range(epochs):
        test_states = env.reset()
        counter = 0
        rewards = 0
        while True:
            counter += 1
            action = agent.choose_action(test_states)
            test_states, reward, done, _ = env.step(action)
            rewards += reward
            img = env.render()
            if if_show:
                cv2.imshow("test Simulation", img)
                if cv2.waitKey(10) & 0xFF == ord("q"):
                    out.release()
                    cv2.destroyAllWindows()
                    return
            if done:
                break
            img = cv2.convertScaleAbs(img)
            out.write(img)
        print(f"Epoch \t {epoch + 1} \t Steps: {counter} \t reward \t {rewards} \t")

    out.release()
    cv2.destroyAllWindows()


# 初始化环境
env = DriverEnv("maps/0")
env.mapGenerator()
env.reset()
env.step(env.action_space.sample())

epochs = 1

# 测试Q-learning模型
print("\n=== Testing Q-learning Model ===")
qlearning_agent = QlerningAgent()
qlearning_agent.LoadTable("models/Qtable.npy")
test_model(env, qlearning_agent, "videos/output_qlearning.avi", epochs,epochs==1)

# 测试DQN模型
print("\n=== Testing DQN Model ===")
dqn_agent = DqnAgent(input_size=41, output_size=25)
dqn_agent.load("models/DQN_model.pt")
test_model(env, dqn_agent, "videos/output_dqn.avi", epochs,epochs==1)
