import pprint
from datetime import datetime
import os
import numpy as np
import torch
from tianshou.env import SubprocVectorEnv, DummyVectorEnv
from torchsummary import summary

from ril_env import make_atari_env, make_atari_env_watch


def make_envs(args, agent_type='discrete'):
    env = make_atari_env(args)
    if agent_type == 'discrete':
        args.state_shape = env.observation_space.shape or env.observation_space.n
        args.action_shape = env.env.action_space.shape or env.env.action_space.n
        args.action_space = env.env.action_space or env.env.action_space
        train_envs = SubprocVectorEnv([lambda: make_atari_env(args)
                                       for _ in range(args.training_num)])
        test_envs = SubprocVectorEnv([lambda: make_atari_env_watch(args)
                                      for _ in range(args.test_num)])
    else:
        args.state_shape = env.observation_space.shape or env.observation_space.n
        args.action_shape = env.action_space.shape or env.action_space.n
        args.max_action = env.action_space.high[0]
        args.min_action = env.action_space.low[0]
        train_envs = DummyVectorEnv(
            [lambda: make_atari_env(args) for _ in range(args.training_num)])
        test_envs = DummyVectorEnv(
            [lambda: make_atari_env(args) for _ in range(args.test_num)])
    # should be N_FRAMES x H x W
    print("Observations shape:", args.state_shape)
    print("Actions shape:", args.action_shape)
    # seed
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    train_envs.seed(args.seed)
    test_envs.seed(args.seed)

    if args.stop_at_goal:
        args.reward_threshold = env.spec.reward_threshold
    return train_envs, test_envs


def print_model(model, args):
    print('==================== SUMMARY ====================')
    if hasattr(model, 'print_model'):
        model.print_model(input_size=args.state_shape,
                          batch_size=args.batch_size,
                          device=args.device)
    else:
        print(summary(model.net if hasattr(model, 'net') else model.model,
                      input_size=args.state_shape,
                      batch_size=args.batch_size,
                      device=args.device)
              )


def get_log_path(args):
    current_time = datetime.strftime(datetime.now(), '%Y-%m-%d %H.%M.%S')
    log_folder = args.agent + ('(ril)' if args.ril else '') + '_' + current_time
    return os.path.join(args.logdir, args.task, log_folder if args.resume_path is None else args.resume_path)


def watch(policy, test_envs, test_collector, args):
    if args.resume_path:
        policy_path = os.path.join(args.resume_path, 'policy.pth')
        print(f'Loading policy from {policy_path}')
        policy.load_state_dict(torch.load(policy_path, map_location=args.device))

    print("Setup test envs ...")
    policy.eval()
    policy.set_eps(args.eps_test)
    if args.ril and hasattr(policy, 'set_ril_pt'):
        policy.set_ril_pt(args.ril_p_test)
    test_envs.seed(args.seed)
    print("Testing agent ...")
    test_collector.reset()
    result = test_collector.collect(n_episode=args.test_num,
                                    render=args.render)
    pprint.pprint(result)


def default_save_fn(log_path):
    def save_fn(policy):
        torch.save(policy.state_dict(), os.path.join(log_path, 'policy.pth'))
    return save_fn


def default_stop_fn(args):
    def stop_fn(mean_rewards):
        return mean_rewards >= args.reward_threshold \
            if args.stop_at_goal else False
    return stop_fn


def default_train_fn(args, policy, logger):
    def train_fn(epoch, env_step):
        if args.ril and hasattr(policy, 'set_ril_pt'):
            ril_pt = args.ril_p0 * args.ril_gamma ** (env_step / args.ril_den)
            policy.set_ril_pt(ril_pt)
            logger.write('train/ril_pt', env_step, ril_pt)
    return train_fn


def default_test_fn(args, policy):
    def test_fn(epoch, env_step):
        if args.ril and hasattr(policy, 'set_ril_pt'):
            policy.set_ril_pt(args.ril_p_test)
    return test_fn
