import os

import d3rlpy
import numpy as np

from driving_gym.examples.carla_env_v2 import DrivingGym, make_carla_env
from driving_gym.misc.wrappers.flatten_space_wrapper import FlattenSpaceWrapper
from driving_gym.misc.wrappers.single_agent_wrapper import SingleAgentWrapper
from driving_gym.simulation.adapter.carla import CarlaActorConfig
from driving_gym.simulation.adapter.carla.planner.navigation.rule_agent import RuleAgent
from driving_gym.simulation.adapter.carla.utils import carla_location


def main(env: DrivingGym, args):
    observations = []
    actions = []
    rewards = []
    terminals = []
    timeouts = []

    for eps_id in tqdm(range(args.num_episodes)):
        obs, _ = env.reset()
        done = False

        vehicle = env.adapter.get_actor("hero")
        agent = RuleAgent(
            vehicle,
            target_speed_fast=args.target_speed,
            opt_dict={"ignore_traffic_lights": False, "ignore_stop_signs": True},
        )

        task = env.scenario.get_task("hero")
        start = CarlaActorConfig.parse_location(task["start"], agent._map)
        end = CarlaActorConfig.parse_location(task["end"], agent._map)
        plan = agent._global_planner.trace_route(
            carla_location(start.location), carla_location(end.location)
        )
        if len(plan) < 30:
            print(f"Skipping episode {eps_id} due to short plan")
            continue

        agent.init_path([(w.transform, opt) for w, opt in plan])

        while not done:
            control = agent.run_step()
            action = np.array(
                [
                    -control.brake if control.brake > 0 else control.throttle,
                    control.steer,
                ]
            )
            new_obs, reward, terminated, truncated, _ = env.step(action)
            done = terminated or truncated

            observations.append(obs)
            actions.append(action)
            rewards.append(reward)
            terminals.append(terminated)
            timeouts.append(truncated)

            obs = new_obs

        if (eps_id + 1) % args.episode_per_file == 0:
            save_path = os.path.join(args.save_dir, f"dataset_{eps_id + 1}.h5")
            dataset = d3rlpy.dataset.MDPDataset(
                observations=np.array(observations),
                actions=np.array(actions),
                rewards=np.array(rewards),
                terminals=np.array(terminals),
                timeouts=np.array(timeouts),
            )
            with open(save_path, "w+b") as f:
                dataset.dump(f)

            observations.clear()
            actions.clear()
            rewards.clear()
            terminals.clear()
            timeouts.clear()


if __name__ == "__main__":
    import argparse

    from tqdm import tqdm

    parser = argparse.ArgumentParser("Collect offline data in D3RLpy format")
    parser.add_argument(
        "--save_dir", type=str, default=os.path.join(os.getcwd(), "d3rlpy_data")
    )
    parser.add_argument("--num_episodes", type=int, default=15)
    parser.add_argument("--episode_per_file", type=int, default=5)
    parser.add_argument("--map_name", type=str, default="Town01")
    parser.add_argument("--num_vehicles", type=int, default=20)
    parser.add_argument("--num_walkers", type=int, default=8)
    parser.add_argument("--traffic_range", type=int, default=150)
    parser.add_argument("--target_speed", type=float, default=6)
    args = parser.parse_args()

    if not os.path.exists(args.save_dir):
        os.makedirs(args.save_dir, exist_ok=True)

    args.episode_per_file = min(args.episode_per_file, args.num_episodes)
    try:
        env_config = {
            "image_x": 224,
            "image_y": 224,
            "map_name": args.map_name,
            "num_vehicles": args.num_vehicles,
            "num_walkers": args.num_walkers,
            "traffic_range": args.traffic_range,
        }
        env = FlattenSpaceWrapper(SingleAgentWrapper(make_carla_env(env_config)))
        main(env, args)
    finally:
        env.close()
