import os
import gym
import torch
import pprint
import numpy as np
from torch.utils.tensorboard import SummaryWriter

from tianshou.policy import PPOPolicy
from tianshou.utils import BasicLogger
from tianshou.env import DummyVectorEnv
from tianshou.utils.net.common import Net
from tianshou.trainer import onpolicy_trainer
from tianshou.data import Collector, VectorReplayBuffer
from tianshou.utils.net.discrete import Actor, Critic
from typing import Callable

from agent.common.logger import CustomLogger
from agent.common.misc import get_log_path, make_envs, print_model, watch, default_stop_fn, default_save_fn, \
    default_train_fn, default_test_fn
from agent.common.collector import CustomCollector


def test_ppo(net_factory: Callable, policy_factory: Callable, args):
    train_envs, test_envs = make_envs(args)
    # model
    net = net_factory(args.state_shape, linear_sizes=args.linear_sizes,
                      device=args.device)
    print_model(net, args)
    actor = Actor(net, args.action_shape, device=args.device).to(args.device)
    critic = Critic(net, device=args.device).to(args.device)
    # orthogonal initialization
    for m in list(actor.modules()) + list(critic.modules()):
        if isinstance(m, torch.nn.Linear):
            torch.nn.init.orthogonal_(m.weight)
            torch.nn.init.zeros_(m.bias)
    optim = torch.optim.Adam(set(
        actor.parameters()).union(critic.parameters()), lr=args.lr)
    dist = torch.distributions.Categorical
    policy = policy_factory(
        actor, critic, optim, dist,
        discount_factor=args.gamma,
        max_grad_norm=args.max_grad_norm,
        eps_clip=args.eps_clip,
        vf_coef=args.vf_coef,
        ent_coef=args.ent_coef,
        gae_lambda=args.gae_lambda,
        reward_normalization=args.rew_norm,
        dual_clip=args.dual_clip,
        value_clip=args.value_clip,
        action_space=args.action_space)
    # collector
    train_collector = Collector(
        policy, train_envs,
        VectorReplayBuffer(args.buffer_size, len(train_envs)),
        exploration_noise=True)
    test_collector = Collector(policy, test_envs)

    if args.watch:
        watch(policy, test_envs, test_collector, args)
        return

    # log
    log_path = get_log_path(args)
    writer = SummaryWriter(log_path)
    writer.add_text("args", str(args))
    logger = CustomLogger(writer=writer)

    # trainer
    result = onpolicy_trainer(
        policy, train_collector, test_collector, args.epoch,
        args.step_per_epoch, args.repeat_per_collect, args.test_num, args.batch_size,
        episode_per_collect=args.episode_per_collect,
        stop_fn=default_stop_fn(args), save_fn=default_save_fn(log_path),
        train_fn=default_train_fn(args, policy, logger),
        test_fn=default_test_fn(args, policy),
        logger=logger)
    pprint.pprint(result)
    print('========== FINISHED, CONGRATULATION! ==========')
