import multiprocessing as mp
import os

import numpy as np
from ray.rllib.evaluation.sample_batch_builder import SampleBatchBuilder
from ray.rllib.models.preprocessors import get_preprocessor
from ray.rllib.offline.json_writer import JsonWriter
from tqdm import tqdm

from driving_gym.examples.carla_env_roach import DrivingGym, make_carla_env
from driving_gym.simulation.adapter.carla import CarlaActorConfig
from driving_gym.simulation.common.road_classifier import RoadClassifier
from driving_gym.simulation.adapter.carla.utils import carla_location
from driving_gym.simulation.adapter.carla.planner.navigation.global_route_planner import (
    GlobalRoutePlanner,
)


def collect_data(env: DrivingGym, args):
    batch_builder = SampleBatchBuilder()
    writer = JsonWriter(
        args.save_dir,
        max_file_size=args.max_file_size * 1024 * 1024,
        compress_columns=["obs", "new_obs"],
    )
    prep = get_preprocessor(env.observation_space)(env.observation_space)

    for eps_id in tqdm(
        range(max(args.num_episodes, len(env.scenario_manager.scenarios)))
    ):
        try:
            obs, _ = env.reset()
        except Exception as e:
            print(f"Failed to reset environment: {e}")
            continue

        prev_action = np.zeros_like(env.action_space.sample())
        prev_reward = 0
        done = False
        t = 0

        carla_map = env.adapter.map
        traffic_manager = env.adapter.traffic_manager
        hero = env.adapter.get_actor("hero")

        planner = GlobalRoutePlanner(carla_map, 2.0)
        task = env.scenario.get_task("hero")
        end = CarlaActorConfig.parse_location(task["end"], carla_map)
        end = carla_location(end.location)
        plan = planner.trace_route(hero.get_location(), end)
        if len(plan) > 10:
            traffic_manager.set_path(
                env.adapter.get_actor("hero"),
                [wpt.transform.location for wpt, _ in plan],
            )

        while not done:
            sample_action = env.action_space.sample()
            new_obs, reward, terminated, truncated, _ = env.step(sample_action)
            control = env.adapter.get_control("hero")
            action = np.array(
                [
                    -control.brake if control.brake > 0 else control.throttle,
                    control.steer,
                ]
            )

            batch_builder.add_values(
                t=t,
                eps_id=eps_id,
                agent_index=0,
                obs=prep.transform(obs),
                actions=action,
                action_prob=1.0,
                action_logp=0.0,
                rewards=reward,
                prev_actions=prev_action,
                prev_rewards=prev_reward,
                terminateds=terminated,
                truncateds=truncated,
                infos={},
                new_obs=prep.transform(new_obs),
            )
            t += 1
            done = terminated or truncated
            obs = new_obs
            prev_action = action
            prev_reward = reward

        print(f"Writing episode {eps_id} to disk...")
        writer.write(batch_builder.build_and_reset())


def main(args, tasks):
    try:
        env_config = {
            "map_name": args.map_name,
            "num_vehicles": args.num_vehicles,
            "num_walkers": args.num_walkers,
            "autopilot": True,
        }
        env = make_carla_env(env_config, roach_wrapper=True, tasks=tasks)
        collect_data(env, args)
    finally:
        env.close()


if __name__ == "__main__":
    import argparse

    parser = argparse.ArgumentParser("Collect offline data in RLlib format")
    parser.add_argument("--save_dir", type=str, default="dataset")
    parser.add_argument("--num_episodes", type=int, default=100)
    parser.add_argument("--max_file_size", type=int, default=512)
    parser.add_argument("--map_name", type=str, default="Town01")
    parser.add_argument("--num_vehicles", type=int, default=0)
    parser.add_argument("--num_walkers", type=int, default=0)
    parser.add_argument("--split_scenario", action="store_true")
    args = parser.parse_args()

    origin_dir = args.save_dir
    map_template = os.path.join(
        os.environ["CARLA_ROOT"],
        "CarlaUE4/Content/Carla/Maps/OpenDrive/{}.xodr",
    )
    for map_name in ["Town01", "Town02", "Town03", "Town04", "Town05"]:
        args.map_name = map_name
        if args.map_name == "Town01":
            args.num_vehicles = [0, 150]
            args.num_walkers = [0, 300]
        elif args.map_name == "Town02":
            args.num_vehicles = [0, 100]
            args.num_walkers = [0, 200]
        elif args.map_name == "Town03":
            args.num_vehicles = [0, 120]
            args.num_walkers = [0, 120]
        elif args.map_name == "Town04":
            args.num_vehicles = [0, 160]
            args.num_walkers = [0, 160]
        elif args.map_name == "Town05":
            args.num_vehicles = [0, 160]
            args.num_walkers = [0, 160]

        if args.split_scenario:
            classifier = RoadClassifier(map_template.format(args.map_name))
            scenarios = classifier.get_classification()
        else:
            scenarios = {"mixed": None}

        for scene_type, tasks in scenarios.items():
            save_dir = os.path.join(origin_dir, args.map_name)
            args.save_dir = os.path.join(save_dir, scene_type.lower())

            proc = mp.Process(target=main, args=(args, tasks))
            proc.start()
            proc.join()
