from datetime import datetime
from os import path as osp
from traceback import format_exc

from ray.rllib.algorithms.ppo import PPOConfig

from ray.tune.registry import register_env


from thorx.env import ThorXAdapterRLlib


FORMAT_TIMESTAMP = "%Y-%m-%d-%H-%M-%S"
CLASS_ENVIRONMENT = ThorXAdapterRLlib
NAME_ENVIRONMENT = 'ThorX'


def environment_factory(config):
    env = CLASS_ENVIRONMENT(config)
    return env


def save_model(algo):
    try:
        saved = algo.save(algo.logdir)
        print(f"Model saved to '{saved.checkpoint.path}'")
    except Exception as ex:
        print(ex)


if __name__ == '__main__':
    from argparse import ArgumentParser

    parserx = ArgumentParser(description='ThorX agent main entry point')
    parserx.add_argument(
        'command',
        choices=['train', 'eval', 'record'],
        metavar='COMMAND',
        type=str,
        help='the command to execute',
    )
    parserx.add_argument(
        '-c',
        '--checkpoint',
        default=None,
        metavar='PATH',
        help='path to RLlib checkpoint directory',
    )
    parserx.add_argument(
        '-d',
        '--duration',
        type=int,
        default=10,
        metavar='SEC',
        help='video record duration',
    )
    parserx.add_argument(
        '-g',
        '--gamma',
        type=float,
        default=0.5,
        metavar='VALUE',
        help='RL gamma coefficient',
    )
    parserx.add_argument(
        '-l',
        '--learning-rate',
        type=float,
        default=3e-3,
        metavar='VALUE',
        help='algorithm learning rate',
    )
    parserx.add_argument(
        '-n',
        '--num-episodes',
        type=int,
        default=20,
        metavar='NUM',
        help='number of episodes to train',
    )
    parserx.add_argument(
        '-t',
        '--trajectory-steps',
        type=int,
        default=10,
        metavar='NUM',
        help='number of trajectory max steps',
    )
    parserx.add_argument(
        '-u',
        '--cumulative',
        action='store_true',
        help='cumulative actions (add to current positions)',
    )
    parserx.add_argument(
        '-w',
        '--num-workers',
        type=int,
        default=1,
        metavar='NUM',
        help='number of workers for parallel rollouts',
    )
    parserx.add_argument(
        '-x',
        '--checkpoint-period',
        type=int,
        default=100,
        metavar='NUM',
        help='save checkpoint every x episodes',
    )
    args = parserx.parse_args()

    register_env(NAME_ENVIRONMENT, environment_factory)
    # Create an RLlib Algorithm instance from a PPOConfig to learn how to
    # act in the above environment
    config = (
        PPOConfig()
        .training(lr=args.learning_rate, gamma=args.gamma)
        # .evaluation(evaluation_interval=1)
        .environment(
            # Env class to use (here: our gym.Env sub-class from above)
            env='ThorX',
            # Config dict to be passed to our custom env's constructor
            env_config={
                'gui': args.command == 'eval',  # enable GUI for eval only
                'depth': True,
                'mask': True,
                'absolute': not args.cumulative,
                'trajectory_max_steps': args.trajectory_steps,
            },
            clip_actions=True,
            clip_rewards=False,
            normalize_actions=False,
        )
        # Parallelize environment rollouts
        # TODO: parallel PyBullet simulations
        .rollouts(
            num_rollout_workers=(args.command == 'train') * args.num_workers
        )
        .checkpointing(export_native_model_files=True)
    )
    # Use the config's `build()` method to construct a PPO object
    algo = config.build()
    if osp.isdir(args.checkpoint or ''):
        # algo.from_checkpoint(args.checkpoint)
        algo.restore(args.checkpoint)
        print(f"Restored from '{args.checkpoint}'")
    env = algo.workers.local_worker().env

    if args.command == 'train':
        try:
            # n = 20
            # Train for num_episodes iterations and report results
            # (mean episode rewards). Since one have to guess 10 times
            # and the optimal reward is 0.0 (exact match between
            # observation and action value), one can expect to reach
            # an optimal episode reward of 0.0
            for i in range(args.num_episodes):
                results = algo.train()
                print(
                    f"Iteration {i:3d}: mean reward={results['episode_reward_mean']}"
                )
                if (i + 1) % args.checkpoint_period == 0:
                    save_model(algo)
        except Exception as ex:
            print(format_exc())
            print(ex)
        if i % args.checkpoint_period != 0:
            # Skip saving, since it should be done on interval
            save_model(algo)
    elif env is None:
        print("ERROR: can't access local environment...")
    elif args.command == 'eval':
        observation, _ = env.reset()
        reward_episode = 0
        while not env.terminated:
            action = algo.compute_single_action(observation)
            try:
                observation, reward, terminated, truncated, _ = env.step(action)
            except Exception as ex:
                break
            reward_episode += reward
            if truncated or terminated:
                print(
                    f"INFO: episode {['truncated', 'terminated'][terminated]}"
                    f" with total reward = {reward_episode:0.3f}"
                )
                reward_episode = 0
    elif args.command == 'record':
        # TODO: record env.render(mode='rgb_array') with cv.VideoWriter
        from cv2 import VideoWriter

        video = None
        try:
            filename = datetime.now().strftime(FORMAT_TIMESTAMP)
            filename = f"{NAME_ENVIRONMENT}-record-{filename}.webm"
            video = VideoWriter(
                filename,
                fourcc=VideoWriter.fourcc(*'VP09'),
                fps=env.fps,
                frameSize=env.camera_view[:2],
            )
            steps_max = round(args.duration * env.fps)
            # print(
            #     f"DEBUG: filename = {filename}, fps = {env.fps}, steps = {steps_max}"
            # )
            observation, _ = env.reset()
            for _ in range(steps_max):
                action = algo.compute_single_action(observation)
                observation, reward, terminated, truncated, info = env.step(
                    action, render=True
                )
                frame = info.get('rgb', None)
                if frame is not None:
                    # print(f"DEBUG: frame = {frame.shape}")
                    video.write(frame[..., 2::-1])
        except Exception as ex:
            print(ex)
        try:
            video.release()
        except Exception as ex:
            print(ex)
    algo.stop()
