#!/usr/bin/env python3
import os
import time
import ptan
import random
import argparse
import collections

from lib import game, model, mcts

from tensorboardX import SummaryWriter

import torch
import torch.optim as optim
import torch.nn.functional as F

# todo 以下参数常量的作用
# 控制器从几轮游戏中收集数据，当前的代码是1轮游戏
PLAY_EPISODES = 1  #25
MCTS_SEARCHES = 10 # todo 搜索次数
MCTS_BATCH_SIZE = 8 # todo 作用
REPLAY_BUFFER = 5000 # 30000
LEARNING_RATE = 0.1
BATCH_SIZE = 256
TRAIN_ROUNDS = 10 # 训练的轮数
MIN_REPLAY_TO_TRAIN = 2000 #10000

# 最佳网络的胜率是60%，以此来判断是否是最佳网络，更新网络以及保存网络到本地
BEST_NET_WIN_RATIO = 0.60

EVALUATE_EVERY_STEP = 100 # todo 作用
EVALUATION_ROUNDS = 20 # todo 作用
STEPS_BEFORE_TAU_0 = 10 # todo 作用


def evaluate(net1, net2, rounds, device="cpu"):
    '''
    评估玩家1的胜率
    '''
    # 记录玩家1和玩家2的胜利次数
    n1_win, n2_win = 0, 0
    mcts_stores = [mcts.MCTS(), mcts.MCTS()]

    for r_idx in range(rounds):
        # 玩一次游戏，获取玩家1的游戏结果
        r, _ = model.play_game(mcts_stores=mcts_stores, replay_buffer=None, net1=net1, net2=net2,
                               steps_before_tau_0=0, mcts_searches=20, mcts_batch_size=16,
                               device=device)
        # 根据游戏结果，更新玩家1和玩家2的胜利次数
        if r < -0.5:
            n2_win += 1
        elif r > 0.5:
            n1_win += 1
    return n1_win / (n1_win + n2_win)


if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    parser.add_argument("-n", "--name", required=True, help="Name of the run")
    parser.add_argument("--cuda", default=False, action="store_true", help="Enable CUDA")
    args = parser.parse_args()
    device = torch.device("cuda" if args.cuda else "cpu")

    saves_path = os.path.join("saves", args.name)
    os.makedirs(saves_path, exist_ok=True)
    writer = SummaryWriter(comment="-" + args.name)

    # 构建的是代码残差连接的神经网络
    net = model.Net(input_shape=model.OBS_SHAPE, actions_n=game.GAME_COLS).to(device)
    best_net = ptan.agent.TargetNet(net)
    print(net)

    optimizer = optim.SGD(net.parameters(), lr=LEARNING_RATE, momentum=0.9)
    
    # 经验重放缓冲区
    replay_buffer = collections.deque(maxlen=REPLAY_BUFFER)
    # 创建蒙特卡罗树搜索（Monte Carlo Tree Search，简称MCTS）是一种决策算法，常用于某些类型的游戏，如棋类游戏（例如围棋和国际象棋）
    mcts_store = mcts.MCTS()
    step_idx = 0
    best_idx = 0

    with ptan.common.utils.TBMeanTracker(writer, batch_size=10) as tb_tracker:
        while True:
            t = time.time()
            # todo 这边获取的是已经记录的节点经验数量吗
            prev_nodes = len(mcts_store)
            game_steps = 0
            for _ in range(PLAY_EPISODES):
                # 游玩一次游戏，获取到游戏结束时的步数
                _, steps = model.play_game(mcts_store, replay_buffer, best_net.target_model, best_net.target_model,
                                           steps_before_tau_0=STEPS_BEFORE_TAU_0, mcts_searches=MCTS_SEARCHES,
                                           mcts_batch_size=MCTS_BATCH_SIZE, device=device)
                # 统计总步数
                game_steps += steps
            # 判断每次游戏新增了多少个搜索树节点，然后统计出游戏的搜索速度
            game_nodes = len(mcts_store) - prev_nodes
            # 统计游戏的速度
            dt = time.time() - t
            speed_steps = game_steps / dt
            speed_nodes = game_nodes / dt
            tb_tracker.track("speed_steps", speed_steps, step_idx)
            tb_tracker.track("speed_nodes", speed_nodes, step_idx)
            print("Step %d, steps %3d, leaves %4d, steps/s %5.2f, leaves/s %6.2f, best_idx %d, replay %d" % (
                step_idx, game_steps, game_nodes, speed_steps, speed_nodes, best_idx, len(replay_buffer)))
            step_idx += 1

            # 判断是否收集够了经验
            if len(replay_buffer) < MIN_REPLAY_TO_TRAIN:
                continue

            # train 开始训练
            sum_loss = 0.0
            sum_value_loss = 0.0
            sum_policy_loss = 0.0

            for _ in range(TRAIN_ROUNDS):
                # 随机采样
                batch = random.sample(replay_buffer, BATCH_SIZE)
                batch_states, batch_who_moves, batch_probs, batch_values = zip(*batch)
                # 解码游戏状态，将游戏状态反解压为list，翻遍后续转换为tensor
                batch_states_lists = [game.decode_binary(state) for state in batch_states]
                states_v = model.state_lists_to_batch(batch_states_lists, batch_who_moves, device)

                optimizer.zero_grad()
                probs_v = torch.FloatTensor(batch_probs).to(device)
                values_v = torch.FloatTensor(batch_values).to(device)
                # 将状态输入到网络中，拿到网络预测的动作分布以及每个动作的回报值
                out_logits_v, out_values_v = net(states_v)

                # 计算预测的回报值和真实的回报值之间的差异
                loss_value_v = F.mse_loss(out_values_v.squeeze(-1), values_v)
                # 计算交叉熵，简单的使用交叉熵来促使网络预测的动作概率更加接近真实的动作概率
                # todo 如何控制真实的动作概率是有效，往胜利的方向行进的呢？

                loss_policy_v = -F.log_softmax(out_logits_v, dim=1) * probs_v
                # 交叉熵的平均值
                loss_policy_v = loss_policy_v.sum(dim=1).mean()
                # 根据损失值，计算梯度，更新网络参数
                loss_v = loss_policy_v + loss_value_v
                loss_v.backward()
                optimizer.step()
                sum_loss += loss_v.item()
                sum_value_loss += loss_value_v.item()
                sum_policy_loss += loss_policy_v.item()

            # 记录训练的损失值
            tb_tracker.track("loss_total", sum_loss / TRAIN_ROUNDS, step_idx)
            tb_tracker.track("loss_value", sum_value_loss / TRAIN_ROUNDS, step_idx)
            tb_tracker.track("loss_policy", sum_policy_loss / TRAIN_ROUNDS, step_idx)

            # evaluate net
            if step_idx % EVALUATE_EVERY_STEP == 0:
                # 验证游戏，统计胜率
                win_ratio = evaluate(net, best_net.target_model, rounds=EVALUATION_ROUNDS, device=device)
                print("Net evaluated, win ratio = %.2f" % win_ratio)
                writer.add_scalar("eval_win_ratio", win_ratio, step_idx)
                if win_ratio > BEST_NET_WIN_RATIO:
                    # 如果游戏的胜率超过了记录最大的胜率，那么就更新最佳网络
                    # 保存到文件中，并更新mcts的探索记录，重新进行探索
                    # 也就是说如果没有找到最佳的网络，那么依旧在已有的mcts下
                    # 重新进行探索，直到提取的训练数据足够训练出最佳网络
                    print("Net is better than cur best, sync")
                    best_net.sync()
                    best_idx += 1
                    file_name = os.path.join(saves_path, "best_%03d_%05d.dat" % (best_idx, step_idx))
                    torch.save(net.state_dict(), file_name)
                    mcts_store.clear()
