from typing import Callable

import torch
import pprint
from torch.utils.tensorboard import SummaryWriter

from tianshou.data import VectorReplayBuffer
from tianshou.utils.net.discrete import Actor, Critic
from tianshou.trainer import onpolicy_trainer

from agent.common.collector import CustomCollector
from agent.common.logger import CustomLogger
from agent.common.misc import get_log_path, make_envs, print_model, watch, default_stop_fn, default_save_fn, \
    default_train_fn, default_test_fn


def test_a2c(net_factory: Callable, policy_factory: Callable, args):
    train_envs, test_envs = make_envs(args)
    # model
    net = net_factory(args.state_shape, linear_sizes=args.linear_sizes,
                      device=args.device)
    print_model(net, args)
    actor = Actor(net, args.action_shape, device=args.device).to(args.device)
    critic = Critic(net, device=args.device).to(args.device)
    optim = torch.optim.Adam(
        list(actor.parameters()) + list(critic.parameters()), lr=args.lr)
    dist = torch.distributions.Categorical
    policy = policy_factory(
        actor, critic, optim, dist,
        discount_factor=args.gamma, gae_lambda=args.gae_lambda,
        vf_coef=args.vf_coef, ent_coef=args.ent_coef,
        max_grad_norm=args.max_grad_norm, reward_normalization=args.rew_norm,
        action_space=args.action_space)
    # collector
    train_collector = CustomCollector(
        policy, train_envs,
        VectorReplayBuffer(args.buffer_size, len(train_envs)))
    test_collector = CustomCollector(policy, test_envs)

    if args.watch:
        watch(policy, test_envs, test_collector, args)
        return

    # log
    log_path = get_log_path(args)
    writer = SummaryWriter(log_path)
    writer.add_text("args", str(args))
    logger = CustomLogger(writer=writer)

    # trainer
    result = onpolicy_trainer(
        policy, train_collector, test_collector, args.epoch,
        args.step_per_epoch, args.repeat_per_collect, args.test_num, args.batch_size,
        episode_per_collect=args.episode_per_collect,
        train_fn=default_train_fn(args, policy, logger), test_fn=default_test_fn(args, policy),
        stop_fn=default_stop_fn(args), save_fn=default_save_fn(log_path),
        logger=logger)
    pprint.pprint(result)
    print('========== FINISHED, CONGRATULATION! ==========')
