import os

import numpy as np
from ray.rllib.evaluation.sample_batch_builder import SampleBatchBuilder
from ray.rllib.models.preprocessors import get_preprocessor
from ray.rllib.offline.json_writer import JsonWriter

from rl_evolve.data.astar_actor import AstarActor
from rl_evolve.envs.uav_2d.uav_env import UavEnvironment
from rl_evolve.envs.uav_2d.wrappers.raster_wrapper import RasterWrapper

base_dir = os.path.dirname(os.path.abspath(__file__))
env_cfg = {
    "dimensions": [800, 800],
    "fixed_obstacles": 10,
    "dynamic_obstacles": 10,
    "occur_obstacles": 1,
    "occur_number_max": 3,
    "prevent_stiff": False,
    "show_windows": False,
    "use_lidar": True,
    "draw_lidar": False,
    "lidar_range": 250,
    "lidar_rays": 21,
    "field_of_view": 210,
    "center_obstacles": False,
}
max_eps = 500

if __name__ == "__main__":
    from tqdm import tqdm

    batch_builder = SampleBatchBuilder()  # or MultiAgentSampleBatchBuilder
    writer = JsonWriter(os.path.join(base_dir, "astar-out"))

    env = RasterWrapper(UavEnvironment(**env_cfg))
    actor = AstarActor(env, step_limit=2000)
    prep = get_preprocessor(env.observation_space)(env.observation_space)
    print("The preprocessor is", prep)

    for eps_id in tqdm(range(max_eps)):
        obs, info = env.reset()
        actor.reset()
        prev_action = np.zeros_like(env.action_space.sample())
        prev_reward = 0
        terminated = truncated = False
        t = 0
        while not terminated and not truncated:
            action = actor.get_action()
            new_obs, rew, terminated, truncated, info = env.step(action)
            batch_builder.add_values(
                t=t,
                eps_id=eps_id,
                agent_index=0,
                obs=prep.transform(obs),
                actions=action,
                action_prob=1.0,  # put the true action probability here
                action_logp=0.0,
                rewards=rew,
                prev_actions=prev_action,
                prev_rewards=prev_reward,
                terminateds=terminated,
                truncateds=truncated,
                infos={},
                new_obs=prep.transform(new_obs),
            )
            obs = new_obs
            prev_action = action
            prev_reward = rew
            t += 1
            writer.write(batch_builder.build_and_reset())
