import os
import pprint
import sys
from typing import Callable

import torch
from tianshou.data import VectorReplayBuffer
from tianshou.trainer import offpolicy_trainer
from torch.utils.tensorboard import SummaryWriter

from agent.common.logger import CustomLogger
from agent.common.misc import get_log_path, make_envs, print_model, watch, default_stop_fn, default_save_fn, \
    default_train_fn, default_test_fn
from agent.common.collector import CustomCollector

sys.path.append(os.path.dirname(sys.path[0]))


def test_dqn(net_factory: Callable, policy_factory: Callable, args):
    train_envs, test_envs = make_envs(args)
    # define model
    net = net_factory(state_shape=args.state_shape,
                      action_shape=args.action_shape,
                      linear_sizes=args.linear_sizes,
                      device=args.device
                      ).to(args.device)
    print_model(net, args)
    optim = torch.optim.Adam(net.parameters(), lr=args.lr)
    policy = policy_factory(net, optim, args.gamma, args.n_step,
                            target_update_freq=args.target_update_freq)
    # replay buffer: `save_last_obs` and `stack_num` can be removed together
    # when you have enough RAM
    buffer = VectorReplayBuffer(
        args.buffer_size, buffer_num=len(train_envs), ignore_obs_next=True,
        save_only_last_obs=True, stack_num=args.frames_stack)
    # collector
    train_collector = CustomCollector(policy, train_envs, buffer, exploration_noise=True)
    test_collector = CustomCollector(policy, test_envs, exploration_noise=True)

    if args.watch:
        watch(policy, test_envs, test_collector, args)
        return

    # log
    log_path = get_log_path(args)
    writer = SummaryWriter(log_path)
    writer.add_text("args", str(args))
    logger = CustomLogger(writer=writer)
    rule_train_fn = default_train_fn(args, policy, logger)
    rule_test_fn = default_test_fn(args, policy)

    def train_fn(epoch, env_step):
        # nature DQN setting, linear decay in the first 1M steps
        if env_step <= 1e6:
            eps = args.eps_train - env_step / 1e6 * \
                  (args.eps_train - args.eps_train_final)
        else:
            eps = args.eps_train_final
        policy.set_eps(eps)
        logger.write('train/eps', env_step, eps)
        rule_train_fn(epoch, env_step)

    def test_fn(epoch, env_step):
        policy.set_eps(args.eps_test)
        rule_test_fn(epoch, env_step)

    # test train_collector and start filling replay buffer
    train_collector.collect(n_step=args.batch_size * args.training_num)
    # trainer
    result = offpolicy_trainer(
        policy, train_collector, test_collector, args.epoch,
        args.step_per_epoch, args.step_per_collect, args.test_num,
        args.batch_size, train_fn=train_fn, test_fn=test_fn,
        stop_fn=default_stop_fn(args), save_fn=default_save_fn(log_path), logger=logger,
        update_per_step=args.update_per_step, test_in_train=False
    )
    pprint.pprint(result)
    print('========== FINISHED, CONGRATULATION! ==========')
