import collections
import numpy as np

import torch
import torch.nn as nn

from lib import game, mcts

# 创建观察环境数据的样本尺寸
# (通道数据，采集图像的高度，采集图像的宽度)
OBS_SHAPE = (2, game.GAME_ROWS, game.GAME_COLS)
# 特征采集过滤器的数量
NUM_FILTERS = 64


class Net(nn.Module):
    def __init__(self, input_shape, actions_n):
        '''
        param input_shape: 环境观察的数据的维度(通道数据，采集图像的高度，采集图像的宽度)
        param actions_n: 动作的维度
        '''

        super(Net, self).__init__()

        # 入口卷积特征采样器
        self.conv_in = nn.Sequential(
            nn.Conv2d(input_shape[0], NUM_FILTERS, kernel_size=3, padding=1),
            nn.BatchNorm2d(NUM_FILTERS),
            nn.LeakyReLU()
        )

        # layers with residual
        # todo 看起来像是在resnet结构
        self.conv_1 = nn.Sequential(
            nn.Conv2d(NUM_FILTERS, NUM_FILTERS, kernel_size=3, padding=1),
            nn.BatchNorm2d(NUM_FILTERS),
            nn.LeakyReLU()
        )
        self.conv_2 = nn.Sequential(
            nn.Conv2d(NUM_FILTERS, NUM_FILTERS, kernel_size=3, padding=1),
            nn.BatchNorm2d(NUM_FILTERS),
            nn.LeakyReLU()
        )
        self.conv_3 = nn.Sequential(
            nn.Conv2d(NUM_FILTERS, NUM_FILTERS, kernel_size=3, padding=1),
            nn.BatchNorm2d(NUM_FILTERS),
            nn.LeakyReLU()
        )
        self.conv_4 = nn.Sequential(
            nn.Conv2d(NUM_FILTERS, NUM_FILTERS, kernel_size=3, padding=1),
            nn.BatchNorm2d(NUM_FILTERS),
            nn.LeakyReLU()
        )
        self.conv_5 = nn.Sequential(
            nn.Conv2d(NUM_FILTERS, NUM_FILTERS, kernel_size=3, padding=1),
            nn.BatchNorm2d(NUM_FILTERS),
            nn.LeakyReLU()
        )

        # 这里是在计算输入给conv_val参数维度信息
        # 从这里可以看出，传入给conv_val的参数维度除了通道数以外，其余的均与input_shape一致
        body_out_shape = (NUM_FILTERS, ) + input_shape[1:]

        # value head
        # todo 这个的作用是什么？
        self.conv_val = nn.Sequential(
            nn.Conv2d(NUM_FILTERS, 1, kernel_size=1),
            nn.BatchNorm2d(1),
            nn.LeakyReLU()
        )
        # 获取卷积后的输出尺寸大小，用于后续的全连接层
        conv_val_size = self._get_conv_val_size(body_out_shape)
        self.value = nn.Sequential(
            nn.Linear(conv_val_size, 20),
            nn.LeakyReLU(),
            nn.Linear(20, 1),
            nn.Tanh()
        )

        # policy head
        # todo 策略的输出头
        self.conv_policy = nn.Sequential(
            nn.Conv2d(NUM_FILTERS, 2, kernel_size=1),
            nn.BatchNorm2d(2),
            nn.LeakyReLU()
        )
        conv_policy_size = self._get_conv_policy_size(body_out_shape)
        self.policy = nn.Sequential(
            nn.Linear(conv_policy_size, actions_n)
        )

    def _get_conv_val_size(self, shape):
        # 通过构建一个零矩阵，输入到 self.conv_val网络中，测量 self.conv_val卷积输出的尺寸大小
        # todo 为什么测量的是 self.conv_val卷积
        o = self.conv_val(torch.zeros(1, *shape))
        return int(np.prod(o.size()))

    def _get_conv_policy_size(self, shape):
        # 通过构建一个零矩阵，输入到 self.conv_policy网络中，测量 self.conv_policy卷积输出的尺寸大小
        # todo 为什么测量的是 self.conv_policy卷积
        o = self.conv_policy(torch.zeros(1, *shape))
        return int(np.prod(o.size()))

    def forward(self, x):
        batch_size = x.size()[0]
        v = self.conv_in(x)
        v = v + self.conv_1(v)
        v = v + self.conv_2(v)
        v = v + self.conv_3(v)
        v = v + self.conv_4(v)
        v = v + self.conv_5(v)
        val = self.conv_val(v)
        val = self.value(val.view(batch_size, -1))
        pol = self.conv_policy(v)
        pol = self.policy(pol.view(batch_size, -1))
        return pol, val


def _encode_list_state(dest_np, state_list, who_move):
    """
    todo 注释代码
    In-place encodes list state into the zero numpy array
    将游戏的状态以及玩家索引编码到numpy数组中

    :param dest_np: dest array, expected to be zero
    :param state_list: state of the game in the list form
    :param who_move: player index (game.PLAYER_WHITE or game.PLAYER_BLACK) who to move
    """
    assert dest_np.shape == OBS_SHAPE

    for col_idx, col in enumerate(state_list):
        for rev_row_idx, cell in enumerate(col):
            row_idx = game.GAME_ROWS - rev_row_idx - 1
            if cell == who_move:
                dest_np[0, row_idx, col_idx] = 1.0
            else:
                dest_np[1, row_idx, col_idx] = 1.0


def state_lists_to_batch(state_lists, who_moves_lists, device="cpu"):
    """
    Convert list of list states to batch for network
    将状态列表转换为网络的批处理
    :param state_lists: list of 'list states' 游戏状态列表
    :param who_moves_lists: list of player index who moves 当前状态下的玩家方
    :return Variable with observations 返回编码进tensor的游戏状态
    """
    assert isinstance(state_lists, list)
    batch_size = len(state_lists)
    # (batch_size,) + OBS_SHAP = (batch_size, 2, 6, 7)
    # batch shape = (batch_size, 2, 6, 7)
    batch = np.zeros((batch_size,) + OBS_SHAPE, dtype=np.float32)
    # 将游戏的状态填入到batch中
    for idx, (state, who_move) in enumerate(zip(state_lists, who_moves_lists)):
        _encode_list_state(batch[idx], state, who_move)
    return torch.tensor(batch).to(device)


# def play_game(net1, net2, cuda=False):
#     cur_player = 0
#     state = game.INITIAL_STATE
#     nets = [net1, net2]
#
#     while True:
#         state_list = game.decode_binary(state)
#         batch_v = state_lists_to_batch([state_list], [cur_player], cuda)
#         logits_v, _ = nets[cur_player](batch_v)
#         probs_v = F.softmax(logits_v, dim=1)
#         probs = probs_v[0].data.cpu().numpy()
#         while True:
#             action = np.random.choice(game.GAME_COLS, p=probs)
#             if action in game.possible_moves(state):
#                 break
#         state, won = game.move(state, action, cur_player)
#         if won:
#             return 1.0 if cur_player == 0 else -1.0
#         # check for the draw state
#         if len(game.possible_moves(state)) == 0:
#             return 0.0
#         cur_player = 1 - cur_player
#


def play_game(mcts_stores, replay_buffer, net1, net2, steps_before_tau_0, mcts_searches, mcts_batch_size,
              net1_plays_first=None, device="cpu"):
    """
    Play one single game, memorizing transitions into the replay buffer
    玩一轮游戏，将游玩的过程记录到重放缓冲区
    :param mcts_stores: could be None or single MCTS or two MCTSes for individual net
    mcts搜索数
    :param steps_before_tau_0: how many steps to make with original probabilities, not exploration ones 这个是用来控制探索的，多少步以前采用随机选则动作探索的方式进行游戏
    :param mcts_searches: todo 作用
    :param mcts_batch_size: todo 作用
    :param replay_buffer: queue with (state, probs, values), if None, nothing is stored
    经验重返缓冲区
    :param net1: player1
    ai1 和player2看起来是同一个模型，拥有相同的性能
    :param net2: player2
    :return: value for the game in respect to player1 (+1 if p1 won, -1 if lost, 0 if draw) 返回玩家1的游戏结果，以及执行的步数
    """
    # 保证各个参数的类型都是预期的类型
    assert isinstance(replay_buffer, (collections.deque, type(None)))
    assert isinstance(mcts_stores, (mcts.MCTS, type(None), list))
    assert isinstance(net1, Net)
    assert isinstance(net2, Net)
    assert isinstance(steps_before_tau_0, int) and steps_before_tau_0 >= 0
    assert isinstance(mcts_searches, int) and mcts_searches > 0
    assert isinstance(mcts_batch_size, int) and mcts_batch_size > 0

    # 如果外部没有传入mcts实例，则自行创建一个具备两个搜索树的list
    # 如果外部传入的是单个mcts实例，则将mcts实例扩展为list
    # 之所以要创建尺寸为2的搜索数，因为玩家有两个
    # todo 这样做不会导致搜索树公用内存数据导致问题吗？
    if mcts_stores is None:
        mcts_stores = [mcts.MCTS(), mcts.MCTS()]
    elif isinstance(mcts_stores, mcts.MCTS):
        mcts_stores = [mcts_stores, mcts_stores]

    # 初始化游戏状态
    state = game.INITIAL_STATE
    # 将电脑1、2组成list
    nets = [net1, net2]
    # 随机选择一个玩家开始游戏
    if net1_plays_first is None:
        cur_player = np.random.choice(2)
    else:
        cur_player = 0 if net1_plays_first else 1
    
    # todo 作用
    step = 0
    # todo 作用 看起来是如何选择需要执行的动作，0表示最大概率，1表示按照概率随机选择
    tau = 1 if steps_before_tau_0 > 0 else 0
    # todo 作用
    # [(当前状态、状态下的玩家，以及每个动作的执行概率)]
    game_history = []

    # todo 作用
    result = None # 有玩家胜利，则是1，没有玩家胜利则是0
    net1_result = None # 有玩家胜利是1或者-1，无玩家胜利是0

    while result is None:
        # 根据当前的玩家索引，从mcts_stores中选择一个搜索树，并进行搜索
        mcts_stores[cur_player].search_batch(mcts_searches, mcts_batch_size, state,
                                             cur_player, nets[cur_player], device=device)
        # 提取当前状态下，每个动作的执行概率
        probs, _ = mcts_stores[cur_player].get_policy_value(state, tau=tau)
        # 将当前状态、状态下的玩家，以及每个动作的执行概率，记录到游戏历史中
        game_history.append((state, cur_player, probs))
        action = np.random.choice(game.GAME_COLS, p=probs)
        # 如果选择的动作不在可执行的动作列表中，则打印错误信息
        # todo 这里没有重新选择，那么会有影响吗？
        if action not in game.possible_moves(state):
            print("Impossible action selected")
        # 执行一次游戏，拿到执行完毕后的状态和游戏结果
        state, won = game.move(state, action, cur_player)
        if won:
            # 如果游戏结束，todo result = 1
            result = 1
            # 记录当前游戏的结果，1表示玩家1获胜，-1表示玩家1输了，该结果仅相对于玩家1
            net1_result = 1 if cur_player == 0 else -1
            # 跳出循环
            break
        # 切换玩家
        cur_player = 1-cur_player
        # check the draw case
        if len(game.possible_moves(state)) == 0:
            # 如果当前游戏可走，但是没有玩家获胜，是平均，游戏结束
            result = 0 # 平均结果为0
            net1_result = 0 # 0 应该是没有玩家获胜
            break
        # 进行下一步
        step += 1
        if step >= steps_before_tau_0:
            # 如果执行的步数超过了预设的步数，那么就将tau设置为0，这样就会按照最大概率选择动作
            # todo 作用
            tau = 0

    if replay_buffer is not None:
        # 如果重放缓冲区不为空，则将游戏历史中的每一步，都记录到重放缓冲区中
        # todo 为什么要反向遍历？game_history已经是按照从开始到结束顺序记录了
        for state, cur_player, probs in reversed(game_history):
            # 将游戏状态，玩家索引，以及每个动作的执行概率，结果记录到重放缓冲区中
            replay_buffer.append((state, cur_player, probs, result))
            result = -result

    # 返回相对于player1的游戏结果，以及执行的步数
    return net1_result, step
