import os

import numpy as np
from ray.rllib.evaluation.sample_batch_builder import SampleBatchBuilder
from ray.rllib.models.preprocessors import get_preprocessor
from ray.rllib.offline.json_writer import JsonWriter

from driving_gym.examples.carla_env_v2 import DrivingGym, make_carla_env
from driving_gym.misc.wrappers.single_agent_wrapper import SingleAgentWrapper
from driving_gym.simulation.adapter.carla import CarlaActorConfig
from driving_gym.simulation.adapter.carla.planner.navigation.rule_agent import RuleAgent
from driving_gym.simulation.adapter.carla.utils import carla_location


def main(env: DrivingGym, args):
    batch_builder = SampleBatchBuilder()
    writer = JsonWriter(
        args.save_dir,
        max_file_size=args.max_file_size * 1024 * 1024,
        compress_columns=["obs", "new_obs"],
    )
    prep = get_preprocessor(env.observation_space)(env.observation_space)

    for eps_id in tqdm(range(args.num_episodes)):
        obs, _ = env.reset()
        prev_action = np.zeros_like(env.action_space.sample())
        prev_reward = 0
        done = False
        t = 0

        vehicle = env.adapter.get_actor("hero")
        agent = RuleAgent(
            vehicle,
            target_speed_fast=args.target_speed,
            opt_dict={"ignore_traffic_lights": True, "ignore_stop_signs": True},
        )

        task = env.scenario.get_task("hero")
        start = CarlaActorConfig.parse_location(task["start"], agent._map)
        end = CarlaActorConfig.parse_location(task["end"], agent._map)
        plan = agent._global_planner.trace_route(
            carla_location(start.location), carla_location(end.location)
        )
        if len(plan) < 30:
            print(f"Skipping episode {eps_id} due to short plan")
            continue

        agent.init_path([(w.transform, opt) for w, opt in plan])

        while not done:
            control = agent.run_step()
            action = np.array(
                [
                    -control.brake if control.brake > 0 else control.throttle,
                    control.steer,
                ]
            )
            new_obs, reward, terminated, truncated, _ = env.step(action)
            batch_builder.add_values(
                t=t,
                eps_id=eps_id,
                agent_index=0,
                obs=prep.transform(obs),
                actions=action,
                action_prob=1.0,
                action_logp=0.0,
                rewards=reward,
                prev_actions=prev_action,
                prev_rewards=prev_reward,
                terminateds=terminated,
                truncateds=truncated,
                infos={},
                new_obs=prep.transform(new_obs),
            )
            t += 1
            done = terminated or truncated
            obs = new_obs
            prev_action = action
            prev_reward = reward

        print(f"Writing episode {eps_id} to disk...")
        writer.write(batch_builder.build_and_reset())


if __name__ == "__main__":
    import argparse

    from tqdm import tqdm

    parser = argparse.ArgumentParser("Collect offline data in RLlib format")
    parser.add_argument(
        "--save_dir", type=str, default=os.path.join(os.getcwd(), "rllib_data")
    )
    parser.add_argument("--num_episodes", type=int, default=100)
    parser.add_argument("--max_file_size", type=int, default=512)
    parser.add_argument("--map_name", type=str, default="Town01")
    parser.add_argument("--num_vehicles", type=int, default=20)
    parser.add_argument("--num_walkers", type=int, default=8)
    parser.add_argument("--traffic_range", type=int, default=150)
    parser.add_argument("--target_speed", type=float, default=6)
    args = parser.parse_args()

    try:
        env_config = {
            "map_name": args.map_name,
            "num_vehicles": args.num_vehicles,
            "num_walkers": args.num_walkers,
            "traffic_range": args.traffic_range,
        }
        env = SingleAgentWrapper(make_carla_env(env_config))
        main(env, args)
    finally:
        env.close()
