import pprint

import numpy as np
import torch
from tianshou.data import VectorReplayBuffer
from tianshou.exploration import OUNoise
from tianshou.trainer import offpolicy_trainer
from tianshou.utils import BasicLogger
from tianshou.utils.net.common import Net
from tianshou.utils.net.continuous import ActorProb, Critic
from torch.utils.tensorboard import SummaryWriter
from typing import Callable

from agent.common.collector import CustomCollector
from agent.common.misc import get_log_path, make_envs, print_model, watch, default_stop_fn, default_save_fn, \
    default_train_fn, default_test_fn


def test_sac(net_factory: Callable, policy_factory: Callable, args):
    train_envs, test_envs = make_envs(args)
    # model
    net = net_factory(args.state_shape, linear_sizes=args.linear_sizes,
                      device=args.device)
    print_model(net, args)
    actor = ActorProb(
        net, args.action_shape,
        max_action=args.max_action, device=args.device, unbounded=True
    ).to(args.device)
    actor_optim = torch.optim.Adam(actor.parameters(), lr=args.actor_lr)
    net_c1 = Net(args.state_shape, args.action_shape,
                 hidden_sizes=args.hidden_sizes, concat=True,
                 device=args.device)
    critic1 = Critic(net_c1, device=args.device).to(args.device)
    critic1_optim = torch.optim.Adam(critic1.parameters(), lr=args.critic_lr)
    net_c2 = Net(args.state_shape, args.action_shape,
                 hidden_sizes=args.hidden_sizes, concat=True,
                 device=args.device)
    critic2 = Critic(net_c2, device=args.device).to(args.device)
    critic2_optim = torch.optim.Adam(critic2.parameters(), lr=args.critic_lr)

    if args.auto_alpha:
        target_entropy = -np.prod(args.action_space)
        log_alpha = torch.zeros(1, requires_grad=True, device=args.device)
        alpha_optim = torch.optim.Adam([log_alpha], lr=args.alpha_lr)
        args.alpha = (target_entropy, log_alpha, alpha_optim)

    policy = policy_factory(
        actor, actor_optim, critic1, critic1_optim, critic2, critic2_optim,
        action_range=[args.min_action, args.max_action],
        tau=args.tau, gamma=args.gamma, alpha=args.alpha,
        reward_normalization=args.rew_norm,
        exploration_noise=OUNoise(0.0, args.noise_std))
    # collector
    train_collector = CustomCollector(
        policy, train_envs,
        VectorReplayBuffer(args.buffer_size, len(train_envs)),
        exploration_noise=True)
    test_collector = CustomCollector(policy, test_envs)

    if args.watch:
        watch(policy, test_envs, test_collector, args)
        return

    # log
    log_path = get_log_path(args)
    writer = SummaryWriter(log_path)
    writer.add_text("args", str(args))
    logger = BasicLogger(writer)

    # trainer
    result = offpolicy_trainer(
        policy, train_collector, test_collector, args.epoch,
        args.step_per_epoch, args.step_per_collect, args.test_num, args.batch_size,
        update_per_step=args.update_per_step, stop_fn=default_stop_fn(args),
        train_fn=default_train_fn(args, policy, logger),
        test_fn=default_test_fn(args, policy),
        save_fn=default_save_fn(log_path), logger=logger)
    pprint.pprint(result)
    print('========== FINISHED, CONGRATULATION! ==========')
