from model.nets import TexasActor, TexasCritic, TexasBaseNet
from tianshou.utils.net.common import ActorCritic
import os
import datetime
import torch
from wrapper.env_wrapper import TexasWrapper
from wrapper.policy_manager import PolicyManager
from policy.infer_agent import Agent, InferActor, InferBaseNet
import pprint
import argparse
from tianshou.env import DummyVectorEnv
import numpy as np
from loguru import logger

from tianshou.utils.net.common import ActorCritic
from policy.TexasPPOPolicy import TexasPPOPolicy
from tianshou.data import Collector, VectorReplayBuffer
from torch.utils.tensorboard import SummaryWriter
from tianshou.utils import TensorboardLogger, WandbLogger
from tianshou.trainer import onpolicy_trainer
import pathlib
from env.chooseenv import make
from collections import OrderedDict

def get_args():
    parser = argparse.ArgumentParser()
    parser.add_argument("--task", type=str, default="fourplayers_nolimit_texas_holdem")
    parser.add_argument("--seed", type=int, default=918)
    parser.add_argument("--buffer-size", type=int, default=30000)
    parser.add_argument("--lr", type=float, default=5e-5)
    parser.add_argument("--gamma", type=float, default=0.99)
    parser.add_argument("--epoch", type=int, default=10000)
    parser.add_argument("--step-per-epoch", type=int, default=500000)
    # parser.add_argument("--step-per-collect", type=int, default=1000)
    parser.add_argument("--episode-per-collect", type=int, default=100)
    parser.add_argument("--repeat-per-collect", type=int, default=4)
    parser.add_argument("--batch-size", type=int, default=256)
    parser.add_argument("--hidden-size", type=int, default=128)
    parser.add_argument("--training-num", type=int, default=1)
    parser.add_argument("--test-num", type=int, default=1)
    parser.add_argument("--rew-norm", type=int, default=False)
    parser.add_argument("--vf-coef", type=float, default=0.5)
    parser.add_argument("--ent-coef", type=float, default=0.05)
    parser.add_argument("--gae-lambda", type=float, default=0.95)
    # parser.add_argument("--lr-decay", type=int, default=True)
    parser.add_argument("--max-grad-norm", type=float, default=0.5) 
    parser.add_argument("--eps-clip", type=float, default=0.1)
    parser.add_argument("--dual-clip", type=float, default=None)
    parser.add_argument("--value-clip", type=int, default=1)
    parser.add_argument("--norm-adv", type=int, default=0)
    parser.add_argument("--recompute-adv", type=int, default=0)
    parser.add_argument("--logdir", type=str, default="/home/dq/jd/Competition_AAMAS2023/algo/logs/test")
    parser.add_argument("--render", type=float, default=0.0)
    parser.add_argument(
        "--device",
        type=str,
        default="cuda" if torch.cuda.is_available() else "cpu",
    )
    parser.add_argument("--resume-id", type=str, default=None)
    parser.add_argument(
        "--logger",
        type=str,
        default="tensorboard",
        choices=["tensorboard", "wandb"],
    )
    parser.add_argument("--wandb-project", type=str, default="atari.benchmark")
    parser.add_argument(
        "--watch",
        default=False,
        action="store_true",
        help="watch the play of pre-trained policy only",
    )
    parser.add_argument("--resume-path", type=str, default='/home/dq/jd/Competition_AAMAS2023/algo/logs/2024_03_21-12_29_28/epoch_35_checkpoint.pth')
    parser.add_argument("--model-pool", type=str, default=None)
    parser.add_argument("--league-push-epoch", type=str, default=None)

    return parser.parse_args()

def find_last_checkpoint(fold):
    max_epoch = 0
    max_f = ''
    for f in os.listdir(fold):
        if f.endswith('.pth'):
            if int(f.split('_')[1]) > max_epoch:
                max_epoch = int(f.split('_')[1])
                max_f = f
    return os.path.join(fold, max_f)

def train(args = get_args()):
    # if args.model_poool is not None:
    #     league_manager = PolicyManager(args.model_pool)
    torch.autograd.set_detect_anomaly(True)
    # if args.resume_path:
    #     log_path = str(pathlib.Path(args.resume_path).parent)
    # else:
    # find_last_checkpoint(args.resume_path)
    log_name = f"{datetime.datetime.now().strftime('%Y_%m_%d-%H_%M_%S')}"
    log_path = os.path.join(args.logdir, log_name)

    infer_net = InferBaseNet(
        input_size = 180,
        device='cpu',
        hidden_sizes = args.hidden_size,
        output_sizes=args.hidden_size,
        n_layer=2,
    )
    infer_actor = InferActor(
        preprocess_net = infer_net,
        hidden_sizes = args.hidden_size,
        action_shape = 6,
        softmax_output=False,
        preprocess_net_output_dim=args.hidden_size,
        device='cpu',
        )

    env = make(args.task)
    policy_manager = PolicyManager(
        model_pool_dir=log_path, 
        agent = Agent(infer_actor),
        )
    env = TexasWrapper(env, policy_manager)

    train_envs = DummyVectorEnv([lambda: TexasWrapper(make(args.task, seed=args.seed), policy_manager) for _ in range(args.training_num)])
    test_envs = DummyVectorEnv([lambda: TexasWrapper(make(args.task, seed=args.seed), policy_manager) for _ in range(args.test_num)])

    # seed
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    train_envs.seed(args.seed)
    test_envs.seed(args.seed)
    net = TexasBaseNet(
        input_size = 180,
        device=args.device,
        hidden_sizes = args.hidden_size,
        output_sizes=args.hidden_size,
        n_layer=2,
    )
    actor = TexasActor(
        preprocess_net = net,
        hidden_sizes = args.hidden_size,
        action_shape = 6,
        softmax_output=False,
        preprocess_net_output_dim=args.hidden_size,
        device=args.device,
        )
    critic = TexasCritic(
        preprocess_net = net,
        hidden_sizes = args.hidden_size,
        last_size=1,
        preprocess_net_output_dim=args.hidden_size,
        device=args.device
        )
    optim = torch.optim.Adam(ActorCritic(actor, critic).parameters(), lr=args.lr)

    # define policy
    def dist(p):
        return torch.distributions.Categorical(logits=p)

    policy = TexasPPOPolicy(
        actor,
        critic,
        optim,
        dist,
        discount_factor=args.gamma,
        gae_lambda=args.gae_lambda,
        max_grad_norm=args.max_grad_norm,
        vf_coef=args.vf_coef,
        ent_coef=args.ent_coef,
        reward_normalization=args.rew_norm,
        action_scaling=False,
        lr_scheduler=None,
        action_space=env.action_space,
        eps_clip=args.eps_clip,
        value_clip=args.value_clip,
        dual_clip=args.dual_clip,
        advantage_normalization=args.norm_adv,
        recompute_advantage=args.recompute_adv,
    ).to(args.device)
    del env

    if args.resume_path:
        state_dict = torch.load(args.resume_path, map_location=args.device)['model']
        _state_dict = OrderedDict()
        for k,v in policy.state_dict().items():
            if k in state_dict:
                _state_dict[k] = v
            else:
                _state_dict[k] = policy.state_dict()[k]
        policy.load_state_dict(_state_dict)
        print("Loaded agent from: ", args.resume_path)

    buffer = VectorReplayBuffer(
        args.buffer_size,
        buffer_num=len(train_envs),
        ignore_obs_next=True,
        save_only_last_obs=False,
    )
    # collector
    train_collector = Collector(policy, train_envs, buffer, exploration_noise=True)
    # test_collector = Collector(policy, test_envs, exploration_noise=True)
    # log
    # log_name = 'ppo'

    if args.logger == "tensorboard":
        writer = SummaryWriter(log_path)
        writer.add_text("args", str(args))
        logger = TensorboardLogger(writer)
    else:
        logger = WandbLogger(
            save_interval=1,
            project=args.task,
            name=log_name,
            run_id=args.resume_id,
            config=args,
        )

    def save_fn(policy, epoch, reward):
        checkpoint_dir = os.path.join(log_path, 'models')
        if not os.path.exists(checkpoint_dir):
            os.makedirs(checkpoint_dir)
        torch.save(policy.state_dict(),  os.path.join(checkpoint_dir, f'epoch_{epoch}_policy_{reward}.pth'))
    
    def save_checkpoint_fn(epoch, env_step, gradient_step):
        # see also: https://pytorch.org/tutorials/beginner/saving_loading_models.html
        ckpt_path = os.path.join(log_path, f'epoch_{epoch}_checkpoint.pth')
        torch.save({'model': policy.state_dict()}, ckpt_path)
        return ckpt_path

    # test train_collector and start filling replay buffer
    # train_collector.collect(n_step=args.batch_size * args.training_num)
    # train_collector.collect(n_episode=args.episode_per_collect)
    # trainer
    result = onpolicy_trainer(
        policy,
        train_collector,
        # test_collector,
        None,
        args.epoch,
        args.step_per_epoch,
        args.repeat_per_collect,
        args.test_num,
        args.batch_size,
        # step_per_collect=args.step_per_collect,
        episode_per_collect = args.episode_per_collect,
        # stop_fn=stop_fn,
        save_fn=save_fn,
        logger=logger,
        test_in_train=False,
        resume_from_log=args.resume_path is not None,
        save_checkpoint_fn=save_checkpoint_fn,
    )
    pprint.pprint(result)

if __name__ == '__main__':
    train()

