import os
import torch
import pprint
import sys
from typing import Callable

import numpy as np
from torch.utils.tensorboard import SummaryWriter
from agent.common.logger import CustomLogger
from tianshou.trainer import onpolicy_trainer
from tianshou.data import Collector, VectorReplayBuffer

from agent.common.misc import get_log_path, make_envs, print_model, watch, default_stop_fn, default_save_fn, \
    default_train_fn, default_test_fn

sys.path.append(os.path.dirname(sys.path[0]))


def test_pg(net_factory: Callable, policy_factory: Callable, args):
    train_envs, test_envs = make_envs(args)
    net = net_factory(state_shape=args.state_shape,
                      action_shape=args.action_shape,
                      softmax=True,
                      linear_sizes=args.linear_sizes,
                      device=args.device).to(args.device)
    print_model(net, args)
    optim = torch.optim.Adam(net.parameters(), lr=args.lr)
    dist = torch.distributions.Categorical
    # define policy
    policy = policy_factory(net, optim, dist, args.gamma,
                            reward_normalization=args.rew_norm,
                            action_space=args.action_space)
    for m in net.modules():
        if isinstance(m, torch.nn.Linear):
            # orthogonal initialization
            torch.nn.init.orthogonal_(m.weight, gain=np.sqrt(2))
            torch.nn.init.zeros_(m.bias)
    # collector
    train_collector = Collector(
        policy, train_envs,
        VectorReplayBuffer(args.buffer_size, len(train_envs)),
        exploration_noise=True)
    test_collector = Collector(policy, test_envs)
    # log
    log_path = get_log_path(args)
    writer = SummaryWriter(log_path)
    writer.add_text("args", str(args))
    logger = CustomLogger(writer=writer)

    if args.watch:
        watch(policy, test_envs, test_collector, args)
        return

    # trainer
    result = onpolicy_trainer(
        policy, train_collector, test_collector, args.epoch,
        args.step_per_epoch, args.repeat_per_collect, args.test_num, args.batch_size,
        episode_per_collect=args.episode_per_collect,
        stop_fn=default_stop_fn(args), save_fn=default_save_fn(log_path),
        train_fn=default_train_fn(args, policy, logger),
        test_fn=default_test_fn(args, policy),
        logger=logger)

    pprint.pprint(result)
    print('========== FINISHED, CONGRATULATION! ==========')
