

<!DOCTYPE html>
<html lang="zh" data-default-color-scheme=&#34;auto&#34;>



<head>
  <meta charset="UTF-8">
  <link rel="apple-touch-icon" sizes="76x76" href="/walker_sue/img/favicon.png">
  <link rel="icon" type="image/png" href="/walker_sue/img/favicon.png">
  <meta name="viewport"
        content="width=device-width, initial-scale=1.0, maximum-scale=1.0, user-scalable=no, shrink-to-fit=no">
  <meta http-equiv="x-ua-compatible" content="ie=edge">
  
  <meta name="theme-color" content="#2f4154">
  <meta name="description" content="">
  <meta name="author" content="lk">
  <meta name="keywords" content="">
  <title>AlphaGo Zero实战 - Walker_Sue</title>

  <link  rel="stylesheet" href="https://cdn.staticfile.org/twitter-bootstrap/4.4.1/css/bootstrap.min.css" />


  <link  rel="stylesheet" href="https://cdn.staticfile.org/github-markdown-css/4.0.0/github-markdown.min.css" />
  <link  rel="stylesheet" href="/walker_sue/lib/hint/hint.min.css" />

  
    
    
      
      <link  rel="stylesheet" href="https://cdn.staticfile.org/highlight.js/10.0.0/styles/github-gist.min.css" />
    
  

  


<!-- 主题依赖的图标库，不要自行修改 -->

<link rel="stylesheet" href="//at.alicdn.com/t/font_1749284_ba1fz6golrf.css">



<link rel="stylesheet" href="//at.alicdn.com/t/font_1736178_kmeydafke9r.css">


<link  rel="stylesheet" href="/walker_sue/css/main.css" />

<!-- 自定义样式保持在最底部 -->


  <script  src="/walker_sue/js/utils.js" ></script>
  <script  src="/walker_sue/js/color-schema.js" ></script>
<meta name="generator" content="Hexo 5.3.0"></head>


<body>
  <header style="height: 70vh;">
    <nav id="navbar" class="navbar fixed-top  navbar-expand-lg navbar-dark scrolling-navbar">
  <div class="container">
    <a class="navbar-brand"
       href="/walker_sue/">&nbsp;<strong>Walker</strong>&nbsp;</a>

    <button id="navbar-toggler-btn" class="navbar-toggler" type="button" data-toggle="collapse"
            data-target="#navbarSupportedContent"
            aria-controls="navbarSupportedContent" aria-expanded="false" aria-label="Toggle navigation">
      <div class="animated-icon"><span></span><span></span><span></span></div>
    </button>

    <!-- Collapsible content -->
    <div class="collapse navbar-collapse" id="navbarSupportedContent">
      <ul class="navbar-nav ml-auto text-center">
        
          
          
          
          
            <li class="nav-item">
              <a class="nav-link" href="/walker_sue/">
                <i class="iconfont icon-home-fill"></i>
                主页
              </a>
            </li>
          
        
          
          
          
          
            <li class="nav-item">
              <a class="nav-link" href="/walker_sue/archives/">
                <i class="iconfont icon-archive-fill"></i>
                归档
              </a>
            </li>
          
        
          
          
          
          
            <li class="nav-item">
              <a class="nav-link" href="/walker_sue/categories/">
                <i class="iconfont icon-category-fill"></i>
                分类
              </a>
            </li>
          
        
          
          
          
          
            <li class="nav-item">
              <a class="nav-link" href="/walker_sue/tags/">
                <i class="iconfont icon-tags-fill"></i>
                标签
              </a>
            </li>
          
        
          
          
          
          
            <li class="nav-item">
              <a class="nav-link" href="/walker_sue/about/">
                <i class="iconfont icon-user-fill"></i>
                关于
              </a>
            </li>
          
        
          
          
          
          
            <li class="nav-item dropdown">
              <a class="nav-link dropdown-toggle" href="#" role="button" data-toggle="dropdown" aria-haspopup="true" aria-expanded="false">
                <i class="iconfont icon-books"></i>
                文档
              </a>
              <div class="dropdown-menu" aria-labelledby="navbarDropdown">
                
                  
                  
                  
                  <a class="dropdown-item" target="_blank" rel="noopener" href="https://hexo.fluid-dev.com/">
                    
                    主题博客
                  </a>
                
                  
                  
                  
                  <a class="dropdown-item" target="_blank" rel="noopener" href="https://hexo.fluid-dev.com/docs/guide/">
                    
                    配置指南
                  </a>
                
                  
                  
                  
                  <a class="dropdown-item" target="_blank" rel="noopener" href="https://hexo.fluid-dev.com/docs/icon/">
                    
                    图标用法
                  </a>
                
              </div>
            </li>
          
        
        
          <li class="nav-item" id="search-btn">
            <a class="nav-link" data-toggle="modal" data-target="#modalSearch">&nbsp;<i
                class="iconfont icon-search"></i>&nbsp;</a>
          </li>
        
        
          <li class="nav-item" id="color-toggle-btn">
            <a class="nav-link" href="javascript:">&nbsp;<i
                class="iconfont icon-dark" id="color-toggle-icon"></i>&nbsp;</a>
          </li>
        
      </ul>
    </div>
  </div>
</nav>

    <div class="banner intro-2" id="background" parallax=true
         style="background: url('/walker_sue/img/default.png') no-repeat center center;
           background-size: cover;">
      <div class="full-bg-img">
        <div class="mask flex-center" style="background-color: rgba(0, 0, 0, 0.3)">
          <div class="container page-header text-center fade-in-up">
    <span class="h2" id="subtitle">
    
</span>




<div class="mt-3">
  
  
    <span class="post-meta">
      <i class="iconfont icon-date-fill" aria-hidden="true"></i>
      <time datetime="2021-03-11 13:49" pubdate>
        March 11, 2021 pm
      </time>
    </span>
  
</div>

<div class="mt-1">
  
    
    <span class="post-meta mr-2">
      <i class="iconfont icon-chart"></i>
      6.8k 字
    </span>
  

  
    
    <span class="post-meta mr-2">
      <i class="iconfont icon-clock-fill"></i>
      
      
      108
       分钟
    </span>
  

  
  
</div>


</div>

          
        </div>
      </div>
    </div>
  </header>

  <main>
    
      

<div class="container-fluid">
  <div class="row">
    <div class="d-none d-lg-block col-lg-2"></div>
    <div class="col-lg-8 nopadding-md">
      <div class="container nopadding-md" id="board-ctn">
        <div class="py-5" id="board">
          <article class="post-content mx-auto" id="post">
            <!-- SEO header -->
            <h1 style="display: none">AlphaGo Zero实战</h1>
            
            <div class="markdown-body" id="post-body">
              <div align='center' ><font size='10'>机器学习-BI</font></div>

<hr>
<div align='center' ><font size='5'>Week_07</font></div>
<div align='center' ><font size='5'>AlphaGo Zero实战</font></div>

<hr>
<h2 id="AlphaGO-实战"><a href="#AlphaGO-实战" class="headerlink" title="AlphaGO 实战"></a>AlphaGO 实战</h2><p>五子棋AI工程：</p>
<ul>
<li>game.py:定义了游戏的棋盘、获取棋盘状态，下棋（更新棋盘状态），判断是否有人获胜，绘制棋盘，两个player对弈，自我对弈</li>
<li>mcts_pure.py:实现了随机走子策略的MCTS（蒙特卡洛树搜索）</li>
<li>mcts_alphaZero.py:实现AlphaGo Zero中的MCTS（蒙特卡洛树搜索），使用了策略网络来指导树搜索并计算叶节点</li>
<li>policy_value_net_pytorch.py:策略价值网络，用来指导MCTS搜索并计算叶子节点</li>
<li>train.py:训练AI主程序</li>
<li>human_play.py:人机对弈，人来输入下棋位置，调用AI进行对战</li>
</ul>
<h3 id="1-game-py"><a href="#1-game-py" class="headerlink" title="1.game.py"></a><strong>1.game.py</strong></h3><pre><code class="hljs pyhton"># -*- coding: utf-8 -*-
from __future__ import print_function
import numpy as np


# 定义棋盘
class Board(object):
    def __init__(self, **kwargs):
        # 默认宽度、高度为10
        self.width &#x3D; int(kwargs.get(&#39;width&#39;, 10))
        self.height &#x3D; int(kwargs.get(&#39;height&#39;, 10))
        # 保存棋盘状态，为字典结构 key为棋盘位置move，value为player编号
        self.states &#x3D; &#123;&#125;
        # 设置 n子棋，默认为5
        self.n_in_row &#x3D; int(kwargs.get(&#39;n_in_row&#39;, 5))
        self.players &#x3D; [1, 2]  # player1 and player2

    # 初始化棋盘，n_in_row子棋
    def init_board(self, start_player&#x3D;0):
        if self.width &lt; self.n_in_row or self.height &lt; self.n_in_row:
            raise Exception(&#39;board width and height can not be &#39;
                            &#39;less than &#123;&#125;&#39;.format(self.n_in_row))
        # 初始化current_player，设置为start player
        self.current_player &#x3D; self.players[start_player]  
        # 保存棋盘中可以下棋的位置 list类型
        self.availables &#x3D; list(range(self.width * self.height))
        self.states &#x3D; &#123;&#125;
        self.last_move &#x3D; -1

    # 通过move，返回location:h,w
    def move_to_location(self, move):
        h &#x3D; move &#x2F;&#x2F; self.width
        w &#x3D; move % self.width
        return [h, w]

    # 输入location二维数组h,w，返回move
    def location_to_move(self, location):
        if len(location) !&#x3D; 2:
            return -1
        h &#x3D; location[0]
        w &#x3D; location[1]
        move &#x3D; h * self.width + w
        if move not in range(self.width * self.height):
            return -1
        return move

    # 返回当前用户的棋盘状态，状态大小为4*width*height
    def current_state(self):
        square_state &#x3D; np.zeros((4, self.width, self.height))
        if self.states:
            # 获取每一步，以及下棋的player
            moves, players &#x3D; np.array(list(zip(*self.states.items())))
            move_curr &#x3D; moves[players &#x3D;&#x3D; self.current_player]
            move_oppo &#x3D; moves[players !&#x3D; self.current_player]
            # 当前player状态
            square_state[0][move_curr &#x2F;&#x2F; self.width,
                            move_curr % self.height] &#x3D; 1.0
            # 对手player状态
            square_state[1][move_oppo &#x2F;&#x2F; self.width,
                            move_oppo % self.height] &#x3D; 1.0
            # 记录最后一步（落子）的位置
            square_state[2][self.last_move &#x2F;&#x2F; self.width,
                            self.last_move % self.height] &#x3D; 1.0
        if len(self.states) % 2 &#x3D;&#x3D; 0:
            square_state[3][:, :] &#x3D; 1.0  # 显示的颜色值 
        return square_state[:, ::-1, :]

    # 当前current_player下了一步棋，需要保存状态，执棋方切换
    def do_move(self, move):
        # 保存当前move 是由current_player下的
        self.states[move] &#x3D; self.current_player
        # 下了一步棋，棋盘中可以下的位置就少了一个
        self.availables.remove(move)
        # 执棋方切换
        self.current_player &#x3D; (
            self.players[0] if self.current_player &#x3D;&#x3D; self.players[1]
            else self.players[1]
        )
        self.last_move &#x3D; move

    # 判断是否有人获胜
    def has_a_winner(self):
        width &#x3D; self.width
        height &#x3D; self.height
        states &#x3D; self.states
        n &#x3D; self.n_in_row

        moved &#x3D; list(set(range(width * height)) - set(self.availables))
        # 单方下棋步骤不足n_in_row
        if len(moved) &lt; self.n_in_row *2-1:
            return False, -1

        for m in moved:
            # 将move转化为 [h,w]
            h &#x3D; m &#x2F;&#x2F; width
            w &#x3D; m % width
            # 当前步是由哪个player下的
            player &#x3D; states[m]

            if (w in range(width - n + 1) and
                    len(set(states.get(i, -1) for i in range(m, m + n))) &#x3D;&#x3D; 1):
                return True, player

            if (h in range(height - n + 1) and
                    len(set(states.get(i, -1) for i in range(m, m + n * width, width))) &#x3D;&#x3D; 1):
                return True, player

            if (w in range(width - n + 1) and h in range(height - n + 1) and
                    len(set(states.get(i, -1) for i in range(m, m + n * (width + 1), width + 1))) &#x3D;&#x3D; 1):
                return True, player

            if (w in range(n - 1, width) and h in range(height - n + 1) and
                    len(set(states.get(i, -1) for i in range(m, m + n * (width - 1), width - 1))) &#x3D;&#x3D; 1):
                return True, player

        return False, -1

    # 判断游戏是否结束
    def game_end(self):
        win, winner &#x3D; self.has_a_winner()
        if win:
            return True, winner
        elif not len(self.availables):
            return True, -1
        return False, -1

    def get_current_player(self):
        return self.current_player

# Game Server
class Game(object):
    def __init__(self, board, **kwargs):
        self.board &#x3D; board

    # 绘制棋盘和棋子信息
    def graphic(self, board, player1, player2):
        width &#x3D; board.width
        height &#x3D; board.height

        print(&quot;Player&quot;, player1, &quot;with X&quot;.rjust(3))
        print(&quot;Player&quot;, player2, &quot;with O&quot;.rjust(3))
        print()
        for x in range(width):
            print(&quot;&#123;0:10&#125;&quot;.format(x), end&#x3D;&#39;&#39;)
        print(&#39;\r\n&#39;)
        for i in range(height - 1, -1, -1):
            print(&quot;&#123;0:4d&#125;&quot;.format(i), end&#x3D;&#39;&#39;)
            for j in range(width):
                loc &#x3D; i * width + j
                p &#x3D; board.states.get(loc, -1)
                if p &#x3D;&#x3D; player1:
                    print(&#39;X&#39;.center(10), end&#x3D;&#39;&#39;)
                elif p &#x3D;&#x3D; player2:
                    print(&#39;O&#39;.center(10), end&#x3D;&#39;&#39;)
                else:
                    print(&#39;_&#39;.center(10), end&#x3D;&#39;&#39;)
            print(&#39;\r\n\r\n&#39;)

    # 开始比赛，player1与player2
    def start_play(self, player1, player2, start_player&#x3D;0, is_shown&#x3D;1):
        if start_player not in (0, 1):
            raise Exception(&#39;start_player should be either 0 (player1 first) &#39;
                            &#39;or 1 (player2 first)&#39;)
        # 初始化棋盘
        self.board.init_board(start_player)
        p1, p2 &#x3D; self.board.players
        # 设置player index
        player1.set_player_ind(p1)
        player2.set_player_ind(p2)
        players &#x3D; &#123;p1: player1, p2: player2&#125;
        if is_shown:
            self.graphic(self.board, player1.player, player2.player)
        # 一直循环到比赛结束    
        while True:
            # 获取当前的player
            current_player &#x3D; self.board.get_current_player()
            player_in_turn &#x3D; players[current_player]
            move &#x3D; player_in_turn.get_action(self.board)
            self.board.do_move(move)
            if is_shown:
                self.graphic(self.board, player1.player, player2.player)
            end, winner &#x3D; self.board.game_end()
            if end:
                if is_shown:
                    if winner !&#x3D; -1:
                        print(&quot;游戏结束，获胜方为 &quot;, players[winner])
                    else:
                        print(&quot;游戏结束，双方平局&quot;)
                return winner

    # AI自我对弈，存储自我对弈数据 用于训练 self-play data: (state, mcts_probs, z)
    def start_self_play(self, player, is_shown&#x3D;0, temp&#x3D;1e-3):
        # 初始化棋盘
        self.board.init_board()
        p1, p2 &#x3D; self.board.players
        # 记录该局对应的数据：states, mcts_probs, current_players
        states, mcts_probs, current_players &#x3D; [], [], []
        # 一直循环到比赛结束
        while True:
            # 得到player的下棋位置
            move, move_probs &#x3D; player.get_action(self.board, temp&#x3D;temp, return_prob&#x3D;1)
            # 存储数据
            states.append(self.board.current_state()) #棋盘状态
            mcts_probs.append(move_probs)
            current_players.append(self.board.current_player)
            # 按照move来下棋
            self.board.do_move(move)
            if is_shown:
                self.graphic(self.board, p1, p2)
            # 判断游戏是否结束end，统计获胜方 winner
            end, winner &#x3D; self.board.game_end()
            if end:
                # 记录该局对弈中的每步分值，胜1，负-1，平局0
                winners_z &#x3D; np.zeros(len(current_players))
                if winner !&#x3D; -1:
                    winners_z[np.array(current_players) &#x3D;&#x3D; winner] &#x3D; 1.0
                    winners_z[np.array(current_players) !&#x3D; winner] &#x3D; -1.0
                # 重置MCTS根节点 reset MCTS root node
                player.reset_player()
                if is_shown:
                    if winner !&#x3D; -1:
                        print(&quot;游戏结束，获胜一方为 &quot;, winner)
                    else:
                        print(&quot;游戏结束，双方平局&quot;)
                # 返回获胜方，self-play数据: (state, mcts_probs, z)
                return winner, zip(states, mcts_probs, winners_z)
</code></pre>
<h3 id="2-mcts-pure-py"><a href="#2-mcts-pure-py" class="headerlink" title="2.mcts_pure.py"></a><strong>2.mcts_pure.py</strong></h3><pre><code class="hljs pyhton"># 实现了蒙特卡洛树搜索 MCTS

import numpy as np
import copy
from operator import itemgetter


# 快速走子策略：随机走子
def rollout_policy_fn(board):
    # 随机走，从棋盘中可以下棋的位置中随机选一个
    action_probs &#x3D; np.random.rand(len(board.availables))
    return zip(board.availables, action_probs)


# policy_value_fn 考虑了棋盘状态，输出一组(action, probability)和分数[-1,1]之间
def policy_value_fn(board):
    # 对于pure MCTS来说，返回统一的概率，得分score为0
    action_probs &#x3D; np.ones(len(board.availables))&#x2F;len(board.availables)
    return zip(board.availables, action_probs), 0

# MCTS树节点，每个节点都记录了自己的Q值，先验概率P和 访问计数调整前的得分（visit-count-adjusted prior score） u
class TreeNode(object):
    def __init__(self, parent, prior_p):
        self._parent &#x3D; parent
        self._children &#x3D; &#123;&#125;  # a map from action to TreeNode
        self._n_visits &#x3D; 0
        self._Q &#x3D; 0
        self._u &#x3D; 0
        self._P &#x3D; prior_p
    # Expand，展开叶子节点（新的孩子节点），action_priors为(action, prior probability)
    def expand(self, action_priors):
        for action, prob in action_priors:
            # 如果不是该节点的子节点，那么就expand 添加为子节点
            if action not in self._children:
                # 父亲节点为当前节点self,先验概率为prob
                self._children[action] &#x3D; TreeNode(self, prob)
    
    # Select步骤，在孩子节点中，选择具有最大行动价值UCT，通过get_value(c_puct)函数得到
    def select(self, c_puct):
        # 每次选择最大UCT值的节点，返回(action, next_node)
        return max(self._children.items(),
                   key&#x3D;lambda act_node: act_node[1].get_value(c_puct))

    # 从叶子评估中，更新节点值，leaf_value表明了当前player的子树评估值
    def update(self, leaf_value):
        # 节点访问次数+1
        self._n_visits +&#x3D; 1
        # 更新Q值，Update Q, a running average of values for all visits.
        self._Q +&#x3D; 1.0*(leaf_value - self._Q) &#x2F; self._n_visits
        
    # 递归的更新所有祖先，调用self.update
    def update_recursive(self, leaf_value):
        # 如果不是根节点，就需要先调用父亲节点的更新
        if self._parent:
            self._parent.update_recursive(-leaf_value)
        self.update(leaf_value)

    # 计算节点价值 UCT值 &#x3D; Q值 + 调整后的访问次数（exploitation + exploration）
    def get_value(self, c_puct):
        self._u &#x3D; (c_puct * self._P *
                   np.sqrt(self._parent._n_visits) &#x2F; (1 + self._n_visits))
        return self._Q + self._u

    # 判断是否为叶子节点
    def is_leaf(self):
        return self._children &#x3D;&#x3D; &#123;&#125;
    
    # 判断是否为根节点
    def is_root(self):
        return self._parent is None

# MCTS：Monte Carlo Tree Search 实现了蒙特卡洛树的搜索 
class MCTS(object):
    # policy_value_fn 考虑了棋盘状态，输出一组(action, probability)和分数[-1,1]之间(预计结束时的比分期望)
    # c_puct exploitation和exploration之间的折中系数
    def __init__(self, policy_value_fn, c_puct&#x3D;5, n_playout&#x3D;10000):
        self._root &#x3D; TreeNode(None, 1.0) # 根节点
        self._policy &#x3D; policy_value_fn   # 策略状态，考虑了棋盘状态，输出一组(action, probability)和分数[-1,1]之间
        self._c_puct &#x3D; c_puct # exploitation和exploration之间的折中系数
        self._n_playout &#x3D; n_playout

    # 从根节点到叶节点运行每一个playout，获取叶节点的值（胜负平结果1，-1,0），并通过其父节点将其传播回来
    # 状态是就地修改的，所以需要保存副本
    def _playout(self, state):
        node &#x3D; self._root
        while(1):
            if node.is_leaf():
                break
            # 基于贪心算法 选择下一步
            action, node &#x3D; node.select(self._c_puct)
            state.do_move(action)
        # 对于current player，根据state 得到一组(action, probability)，这里不需要得到得分 _
        action_probs, _ &#x3D; self._policy(state)
        # 检查游戏是否结束
        end, winner &#x3D; state.game_end()
        if not end:
            node.expand(action_probs)
        # 采用快速走子策略，评估叶子结点值（是否获胜）
        leaf_value &#x3D; self._evaluate_rollout(state)
        # 更新本次传播路径（遍历节点）中的（节点值 和 访问次数）
        node.update_recursive(-leaf_value)

    # 使用rollout策略，一直到游戏结束，如果当前选手获胜返回+1，对手获胜返回-1，平局返回0
    def _evaluate_rollout(self, state, limit&#x3D;1000):
        player &#x3D; state.get_current_player()
        for i in range(limit):
            end, winner &#x3D; state.game_end()
            if end:
                break
            # 采用快速走子策略，得到action
            action_probs &#x3D; rollout_policy_fn(state)
            max_action &#x3D; max(action_probs, key&#x3D;itemgetter(1))[0]
            state.do_move(max_action)
        else:
            # 如果没有break for循环，发出警告
            print(&quot;WARNING: rollout reached move limit&quot;)
        if winner &#x3D;&#x3D; -1:  # 平局
            return 0
        else:
            return 1 if winner &#x3D;&#x3D; player else -1
        
    # 顺序执行所有的playouts，输入的state为当前游戏的状态，返回最经常访问的action
    def get_move(self, state):
        for n in range(self._n_playout):
            state_copy &#x3D; copy.deepcopy(state)
            self._playout(state_copy)
        return max(self._root._children.items(),
                   key&#x3D;lambda act_node: act_node[1]._n_visits)[0]

    # 在树中前进一步
    def update_with_move(self, last_move):
        if last_move in self._root._children:
            self._root &#x3D; self._root._children[last_move]
            self._root._parent &#x3D; None
        else:
            self._root &#x3D; TreeNode(None, 1.0)

    def __str__(self):
        return &quot;MCTS&quot;

# 基于MCTS的AI Player
class MCTSPlayer(object):
    def __init__(self, c_puct&#x3D;5, n_playout&#x3D;2000):
        self.mcts &#x3D; MCTS(policy_value_fn, c_puct, n_playout)
        
    # 设置player index
    def set_player_ind(self, p):
        self.player &#x3D; p
        
    # 重置MCTS树
    def reset_player(self):
        self.mcts.update_with_move(-1)
    
    # 获取AI下棋的位置
    def get_action(self, board):
        sensible_moves &#x3D; board.availables
        if len(sensible_moves) &gt; 0:
            move &#x3D; self.mcts.get_move(board)
            self.mcts.update_with_move(-1)
            return move
        else:
            print(&quot;WARNING: the board is full&quot;)

    def __str__(self):
        return &quot;MCTS &#123;&#125;&quot;.format(self.player)
</code></pre>
<h3 id="3-mcts-alphaZero-py"><a href="#3-mcts-alphaZero-py" class="headerlink" title="3.mcts_alphaZero.py"></a><strong>3.mcts_alphaZero.py</strong></h3><pre><code class="hljs pyhton"># 实现AlphaGo Zero的蒙特卡罗树搜索，使用了策略网络来指导树搜索并计算叶节点
import numpy as np
import copy

# 定义SoftMax函数，求概率
def softmax(x):
    probs &#x3D; np.exp(x - np.max(x))
    probs &#x2F;&#x3D; np.sum(probs)
    return probs

# MCTS树节点，每个节点都记录了自己的Q值，先验概率P和 UCT值第二项，即调整后的访问次数u（用于exploration）
class TreeNode(object):
    # 节点初始化
    def __init__(self, parent, prior_p):
        self._parent &#x3D; parent
        self._children &#x3D; &#123;&#125;  # Action到TreeNode的映射map
        self._n_visits &#x3D; 0   # 访问次数
        self._Q &#x3D; 0          # 行动价值
        self._u &#x3D; 0          # UCT值第二项，即调整后的访问次数（exploration）
        self._P &#x3D; prior_p    # 先验概率

    # Expand，展开叶子节点（添加新的孩子节点）
    def expand(self, action_priors):
        for action, prob in action_priors:
            # 如果不是该节点的子节点，那么就expand 添加为子节点
            if action not in self._children:
                # 父亲节点为当前节点self,先验概率为prob
                self._children[action] &#x3D; TreeNode(self, prob)

    # Select步骤，在孩子节点中，选择具有最大行动价值UCT，通过get_value(c_puct)函数得到
    def select(self, c_puct):
        # 每次选择最大UCT值的节点，返回(action, next_node)
        return max(self._children.items(),
                   key&#x3D;lambda act_node: act_node[1].get_value(c_puct))

    # 从叶子评估中，更新节点Q值和访问次数
    def update(self, leaf_value):
        # 节点访问次数+1
        self._n_visits +&#x3D; 1
        # 更新Q值，变化的Q&#x3D;(leaf_value - self._Q) 对于所有访问次数进行平均
        self._Q +&#x3D; 1.0*(leaf_value - self._Q) &#x2F; self._n_visits

    # 递归的更新所有祖先，调用self.update
    def update_recursive(self, leaf_value):
        # 如果不是根节点，就需要先调用父亲节点的更新
        if self._parent:
            self._parent.update_recursive(-leaf_value)
        self.update(leaf_value)

    # 计算节点价值 UCT值 &#x3D; Q值 + 调整后的访问次数（exploitation + exploration）
    def get_value(self, c_puct):
        # 计算调整后的访问次数
        self._u &#x3D; (c_puct * self._P *
                   np.sqrt(self._parent._n_visits) &#x2F; (1 + self._n_visits))
        return self._Q + self._u
    
    # 判断是否为叶子节点
    def is_leaf(self):
        return self._children &#x3D;&#x3D; &#123;&#125;
    
    # 判断是否为根节点
    def is_root(self):
        return self._parent is None

# MCTS：Monte Carlo Tree Search 实现了蒙特卡洛树的搜索 
class MCTS(object):
    # policy_value_fn 考虑了棋盘状态，输出一组(action, probability)和分数[-1,1]之间(预计结束时的比分期望)
    # c_puct exploitation和exploration之间的折中系数
    def __init__(self, policy_value_fn, c_puct&#x3D;5, n_playout&#x3D;10000):
        self._root &#x3D; TreeNode(None, 1.0) # 根节点
        self._policy &#x3D; policy_value_fn   # 策略状态，考虑了棋盘状态，输出一组(action, probability)和分数[-1,1]之间
        self._c_puct &#x3D; c_puct # exploitation和exploration之间的折中系数
        self._n_playout &#x3D; n_playout # 模拟的次数

    # 从根节点到叶节点运行每一个playout，获取叶节点的值（胜负平结果1,-1,0），并通过其父节点将其传播回来
    # 状态是就地修改的，所以需要保存副本
    def _playout(self, state):
        # 设置当前节点
        node &#x3D; self._root
        # 必须要走到叶子节点
        while(1):
            if node.is_leaf():
                break
            # 基于贪心算法 选择下一步
            action, node &#x3D; node.select(self._c_puct)
            state.do_move(action)

        # 对于current player，根据state 得到一组(action, probability) 和分数v [-1,1]之间（比赛结束时的预期结果）
        action_probs, leaf_value &#x3D; self._policy(state)
        # 检查游戏是否结束
        end, winner &#x3D; state.game_end()
        if not end: # 没有结束，就展开节点
            node.expand(action_probs)
        else:
            # 游戏结束，计算leaf_value
            if winner &#x3D;&#x3D; -1:  # 平均
                leaf_value &#x3D; 0.0
            else:
                leaf_value &#x3D; (
                    1.0 if winner &#x3D;&#x3D; state.get_current_player() else -1.0
                )

        # 将子节点的评估值反向传播更新父节点(所有)
        node.update_recursive(-leaf_value)

    # 顺序_n_playout次playout，返回可能的actions和相应的可能性
    # state为当前棋盘状态，temp 温度参数，控制了探索的程度 (0,1]范围
    # 当MCTS搜索完成时，返回局面state下的落子概率π，与N^(1 &#x2F;temp)成正比，其中N是从根状态每次移动的访问计数，temp是控制温度的参数
    def get_move_probs(self, state, temp&#x3D;1e-3):
        # 运行_n_playout次 _playout
        for n in range(self._n_playout):
            # 在进行_playout之前需要保存当前状态的副本，因为状态是就地修改的
            state_copy &#x3D; copy.deepcopy(state)
            self._playout(state_copy)

        # 基于节点的访问次数，计算move probabilities
        act_visits &#x3D; [(act, node._n_visits) for act, node in self._root._children.items()]
        acts, visits &#x3D; zip(*act_visits)
        # 基于节点的访问次数，通过softmax计算概率
        act_probs &#x3D; softmax(1.0&#x2F;temp * np.log(np.array(visits) + 1e-10))

        return acts, act_probs

    # 在树中前进一步
    def update_with_move(self, last_move):
        if last_move in self._root._children:
            self._root &#x3D; self._root._children[last_move]
            self._root._parent &#x3D; None
        else:
            self._root &#x3D; TreeNode(None, 1.0)

    def __str__(self):
        return &quot;MCTS&quot;

# 基于MCTS的AI Player
class MCTSPlayer(object):
    def __init__(self, policy_value_function,
                 c_puct&#x3D;5, n_playout&#x3D;2000, is_selfplay&#x3D;0):
        # 使用MCTS进行搜索
        self.mcts &#x3D; MCTS(policy_value_function, c_puct, n_playout)
        self._is_selfplay &#x3D; is_selfplay
        
    # 设置player index
    def set_player_ind(self, p):
        self.player &#x3D; p

    # 重置MCTS树
    def reset_player(self):
        self.mcts.update_with_move(-1)
        
    # 获取AI下棋的位置
    def get_action(self, board, temp&#x3D;1e-3, return_prob&#x3D;0):
        # 获取所有可能的下棋位置
        sensible_moves &#x3D; board.availables
        # MCTS返回的pi向量，基于alphaGo Zero论文
        move_probs &#x3D; np.zeros(board.width*board.height)
        if len(sensible_moves) &gt; 0:
            acts, probs &#x3D; self.mcts.get_move_probs(board, temp)
            move_probs[list(acts)] &#x3D; probs
            if self._is_selfplay:
                # 为探索添加Dirichlet噪声(需要进行自我训练)
                move &#x3D; np.random.choice(
                    acts,
                    p&#x3D;0.75*probs + 0.25*np.random.dirichlet(0.3*np.ones(len(probs)))
                )
                # 更新根节点，重新使用搜索树
                self.mcts.update_with_move(move)
            else:
                # 默认temp&#x3D;1e-3, 几乎等同于选择概率最大的那一步
                move &#x3D; np.random.choice(acts, p&#x3D;probs)
                # 重置根节点 reset the root node
                self.mcts.update_with_move(-1)
#                location &#x3D; board.move_to_location(move)
#                print(&quot;AI move: %d,%d\n&quot; % (location[0], location[1]))

            if return_prob:
                return move, move_probs
            else:
                return move
        else:
            print(&quot;WARNING: the board is full&quot;)

    def __str__(self):
        return &quot;MCTS &#123;&#125;&quot;.format(self.player)
</code></pre>
<h3 id="4-policy-value-net-pytorch-py"><a href="#4-policy-value-net-pytorch-py" class="headerlink" title="4.policy_value_net_pytorch.py"></a><strong>4.policy_value_net_pytorch.py</strong></h3><pre><code class="hljs pyhton"># 使用PyTorch实现策略价值网络 PolicyValueNet

import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.autograd import Variable
import numpy as np


# 设置学习率
def set_learning_rate(optimizer, lr):
    for param_group in optimizer.param_groups:
        param_group[&#39;lr&#39;] &#x3D; lr

# 策略网络模型
class Net(nn.Module):
    def __init__(self, board_width, board_height):
        super(Net, self).__init__()

        self.board_width &#x3D; board_width
        self.board_height &#x3D; board_height
        # 通用层 common layers
        self.conv1 &#x3D; nn.Conv2d(4, 32, kernel_size&#x3D;3, padding&#x3D;1)
        self.conv2 &#x3D; nn.Conv2d(32, 64, kernel_size&#x3D;3, padding&#x3D;1)
        self.conv3 &#x3D; nn.Conv2d(64, 128, kernel_size&#x3D;3, padding&#x3D;1)
        # 行动策略层 action policy layers
        self.act_conv1 &#x3D; nn.Conv2d(128, 4, kernel_size&#x3D;1)
        self.act_fc1 &#x3D; nn.Linear(4*board_width*board_height,
                                 board_width*board_height)
        # 状态值层 state value layers
        self.val_conv1 &#x3D; nn.Conv2d(128, 2, kernel_size&#x3D;1)
        self.val_fc1 &#x3D; nn.Linear(2*board_width*board_height, 64)
        self.val_fc2 &#x3D; nn.Linear(64, 1)

    # 得到预测结果，返回行动可能性和状态值
    def forward(self, state_input):
        # 通用层 common layers
        x &#x3D; F.relu(self.conv1(state_input))
        x &#x3D; F.relu(self.conv2(x))
        x &#x3D; F.relu(self.conv3(x))
        # 行动策略层 action policy layers
        x_act &#x3D; F.relu(self.act_conv1(x))
        x_act &#x3D; x_act.view(-1, 4*self.board_width*self.board_height)
        x_act &#x3D; F.log_softmax(self.act_fc1(x_act))
        # 状态值层 state value layers
        x_val &#x3D; F.relu(self.val_conv1(x))
        x_val &#x3D; x_val.view(-1, 2*self.board_width*self.board_height)
        x_val &#x3D; F.relu(self.val_fc1(x_val))
        x_val &#x3D; F.tanh(self.val_fc2(x_val))
        # 输出行动可能性 和 终局的预期状态值
        return x_act, x_val

# 策略价值网络
class PolicyValueNet():
    def __init__(self, board_width, board_height,
                 model_file&#x3D;None, use_gpu&#x3D;False):
        self.use_gpu &#x3D; use_gpu
        self.board_width &#x3D; board_width
        self.board_height &#x3D; board_height
        self.l2_const &#x3D; 1e-4  # L2正则项系数
        # 设置策略网络参数
        if self.use_gpu:
            self.policy_value_net &#x3D; Net(board_width, board_height).cuda()
        else:
            self.policy_value_net &#x3D; Net(board_width, board_height)
        self.optimizer &#x3D; optim.Adam(self.policy_value_net.parameters(), weight_decay&#x3D;self.l2_const)

        if model_file:
            net_params &#x3D; torch.load(model_file)
            self.policy_value_net.load_state_dict(net_params)

    # 输入状态，得到行动的可能性和状态值，按照batch进行输入
    def policy_value(self, state_batch):
        if self.use_gpu:
            state_batch &#x3D; Variable(torch.FloatTensor(state_batch).cuda())
            log_act_probs, value &#x3D; self.policy_value_net(state_batch)
            # 通过e的次幂，求得action probabilities
            act_probs &#x3D; np.exp(log_act_probs.data.cpu().numpy())
            return act_probs, value.data.cpu().numpy()
        else:
            state_batch &#x3D; Variable(torch.FloatTensor(state_batch))
            log_act_probs, value &#x3D; self.policy_value_net(state_batch)
            act_probs &#x3D; np.exp(log_act_probs.data.numpy())
            return act_probs, value.data.numpy()

    # 相比于policy_value多了一个action位置，输入棋盘，输出所有可能的(action, probability)，以及棋盘状态分数[-1,1]
    def policy_value_fn(self, board):
        # 得到所有可以下棋的位置
        legal_positions &#x3D; board.availables
        # ascontiguousarray函数将一个内存不连续存储的数组转换为内存连续存储的数组，使得运行速度更快
        current_state &#x3D; np.ascontiguousarray(board.current_state().reshape(-1, 4, self.board_width, self.board_height))
        if self.use_gpu:
            # 前向传播，直接输入数据即可
            log_act_probs, value &#x3D; self.policy_value_net(Variable(torch.from_numpy(current_state)).cuda().float())
            act_probs &#x3D; np.exp(log_act_probs.data.cpu().numpy().flatten())
        else:
            log_act_probs, value &#x3D; self.policy_value_net(Variable(torch.from_numpy(current_state)).float())
            act_probs &#x3D; np.exp(log_act_probs.data.numpy().flatten())
        act_probs &#x3D; zip(legal_positions, act_probs[legal_positions])
        value &#x3D; value.data[0][0]
        # 返回 act_probs:所有可能的(action, probability)，value: 棋盘状态分数
        return act_probs, value

    # 训练一步
    def train_step(self, state_batch, mcts_probs, winner_batch, lr):
        # 包装变量
        if self.use_gpu:
            state_batch &#x3D; Variable(torch.FloatTensor(state_batch).cuda())
            mcts_probs &#x3D; Variable(torch.FloatTensor(mcts_probs).cuda())
            winner_batch &#x3D; Variable(torch.FloatTensor(winner_batch).cuda())
        else:
            state_batch &#x3D; Variable(torch.FloatTensor(state_batch))
            mcts_probs &#x3D; Variable(torch.FloatTensor(mcts_probs))
            winner_batch &#x3D; Variable(torch.FloatTensor(winner_batch))
        # 清空模型中参数的梯度，即梯度置为0
        self.optimizer.zero_grad()
        # 设置学习率
        set_learning_rate(self.optimizer, lr)
        # 前向传播
        log_act_probs, value &#x3D; self.policy_value_net(state_batch)
        # 定义 loss &#x3D; (z - v)^2 - pi^T * log(p) + c||theta||^2
        value_loss &#x3D; F.mse_loss(value.view(-1), winner_batch)
        policy_loss &#x3D; -torch.mean(torch.sum(mcts_probs*log_act_probs, 1))
        loss &#x3D; value_loss + policy_loss
        # 反向传播，优化参数
        loss.backward()
        self.optimizer.step()
        # 计算Policy信息熵
        entropy &#x3D; -torch.mean(torch.sum(torch.exp(log_act_probs) * log_act_probs, 1))
        # 返回loss和entropy
        return loss.item(), entropy.item()

    # 获得模型的参数，即state_dict
    def get_policy_param(self):
        net_params &#x3D; self.policy_value_net.state_dict()
        return net_params

    # 保存模型文件
    def save_model(self, model_file):
        # 保存模型的参数
        net_params &#x3D; self.get_policy_param()
        torch.save(net_params, model_file)
</code></pre>
<h3 id="5-train-py"><a href="#5-train-py" class="headerlink" title="5.train.py"></a><strong>5.train.py</strong></h3><pre><code class="hljs pyhton"># 训练五子棋AI

from __future__ import print_function
import random
import numpy as np
# deque 是一个双端队列
from collections import defaultdict, deque
from game import Board, Game
from mcts_pure import MCTSPlayer as MCTS_Pure # 随机走子策略的AI
from mcts_alphaZero import MCTSPlayer # AlphaGo方式的AI
from policy_value_net_pytorch import PolicyValueNet  # Pytorch

class TrainPipeline():
    def __init__(self, init_model&#x3D;None):
        # 设置棋盘和游戏的参数
        self.board_width &#x3D; 10
        self.board_height &#x3D; 10
        self.n_in_row &#x3D; 4
        self.board &#x3D; Board(width&#x3D;self.board_width,
                           height&#x3D;self.board_height,
                           n_in_row&#x3D;self.n_in_row)
        self.game &#x3D; Game(self.board)
        # 设置训练参数
        self.learn_rate &#x3D; 2e-3 # 基准学习率
        self.lr_multiplier &#x3D; 1.0  # 基于KL自动调整学习倍速
        self.temp &#x3D; 1.0  # 温度参数
        self.n_playout &#x3D; 400  # 每下一步棋，模拟的步骤数
        self.c_puct &#x3D; 5 # exploitation和exploration之间的折中系数
        self.buffer_size &#x3D; 10000
        self.batch_size &#x3D; 512  # mini-batch size for training
        self.data_buffer &#x3D; deque(maxlen&#x3D;self.buffer_size) #使用 deque 创建一个双端队列
        self.play_batch_size &#x3D; 1
        self.epochs &#x3D; 5  # num of train_steps for each update
        self.kl_targ &#x3D; 0.02 # 早停检查
        self.check_freq &#x3D; 50 # 每50次检查一次，策略价值网络是否更新
        self.game_batch_num &#x3D; 500 # 训练多少个epoch
        self.best_win_ratio &#x3D; 0.0 # 当前最佳胜率，用他来判断是否有更好的模型
        # 弱AI（纯MCTS）模拟步数，用于给训练的策略AI提供对手
        self.pure_mcts_playout_num &#x3D; 1000
        if init_model:
            # 通过init_model设置策略网络
            self.policy_value_net &#x3D; PolicyValueNet(self.board_width, self.board_height, model_file&#x3D;init_model)
        else:
            # 训练一个新的策略网络
            self.policy_value_net &#x3D; PolicyValueNet(self.board_width, self.board_height)
        # AI Player，设置is_selfplay&#x3D;1 自我对弈，因为是在进行训练
        self.mcts_player &#x3D; MCTSPlayer(self.policy_value_net.policy_value_fn,
                                      c_puct&#x3D;self.c_puct,
                                      n_playout&#x3D;self.n_playout,
                                      is_selfplay&#x3D;1)
        
    # 通过旋转和翻转增加数据集, play_data: [(state, mcts_prob, winner_z), ..., ...]
    def get_equi_data(self, play_data):
        extend_data &#x3D; []
        for state, mcts_porb, winner in play_data:
            # 在4个方向上进行expand，每个方向都进行旋转，水平翻转
            for i in [1, 2, 3, 4]:
                # 逆时针旋转
                equi_state &#x3D; np.array([np.rot90(s, i) for s in state])
                equi_mcts_prob &#x3D; np.rot90(np.flipud(mcts_porb.reshape(self.board_height, self.board_width)), i)
                extend_data.append((equi_state, np.flipud(equi_mcts_prob).flatten(), winner))
                # 水平翻转
                equi_state &#x3D; np.array([np.fliplr(s) for s in equi_state])
                equi_mcts_prob &#x3D; np.fliplr(equi_mcts_prob)
                extend_data.append((equi_state, np.flipud(equi_mcts_prob).flatten(), winner))
        return extend_data

    # 收集自我对弈数据，用于训练
    def collect_selfplay_data(self, n_games&#x3D;1):
        for i in range(n_games):
            # 与MCTS Player进行对弈
            winner, play_data &#x3D; self.game.start_self_play(self.mcts_player, temp&#x3D;self.temp)
            play_data &#x3D; list(play_data)[:]
            # 保存下了多少步
            self.episode_len &#x3D; len(play_data)
            # 增加数据 play_data
            play_data &#x3D; self.get_equi_data(play_data)
            self.data_buffer.extend(play_data)
            
    # 更新策略网络
    def policy_update(self):
        mini_batch &#x3D; random.sample(self.data_buffer, self.batch_size)
        state_batch &#x3D; [data[0] for data in mini_batch]
        mcts_probs_batch &#x3D; [data[1] for data in mini_batch]
        winner_batch &#x3D; [data[2] for data in mini_batch]
        # 保存更新前的old_probs, old_v
        old_probs, old_v &#x3D; self.policy_value_net.policy_value(state_batch)
        for i in range(self.epochs):
            # 每次训练，调整参数，返回loss和entropy
            loss, entropy &#x3D; self.policy_value_net.train_step(
                    state_batch,
                    mcts_probs_batch,
                    winner_batch,
                    self.learn_rate*self.lr_multiplier)
            # 输入状态，得到行动的可能性和状态值，按照batch进行输入
            new_probs, new_v &#x3D; self.policy_value_net.policy_value(state_batch)
            # 计算更新前后两次的loss差
            kl &#x3D; np.mean(np.sum(old_probs * (
                    np.log(old_probs + 1e-10) - np.log(new_probs + 1e-10)),
                    axis&#x3D;1)
            )
            if kl &gt; self.kl_targ * 4:  # early stopping if D_KL diverges badly
                break
        # 动态调整学习倍率 lr_multiplier
        if kl &gt; self.kl_targ * 2 and self.lr_multiplier &gt; 0.1:
            self.lr_multiplier &#x2F;&#x3D; 1.5
        elif kl &lt; self.kl_targ &#x2F; 2 and self.lr_multiplier &lt; 10:
            self.lr_multiplier *&#x3D; 1.5

        explained_var_old &#x3D; (1 -
                             np.var(np.array(winner_batch) - old_v.flatten()) &#x2F;
                             np.var(np.array(winner_batch)))
        explained_var_new &#x3D; (1 -
                             np.var(np.array(winner_batch) - new_v.flatten()) &#x2F;
                             np.var(np.array(winner_batch)))
        print((&quot;kl:&#123;:.5f&#125;,&quot;
               &quot;lr_multiplier:&#123;:.3f&#125;,&quot;
               &quot;loss:&#123;&#125;,&quot;
               &quot;entropy:&#123;&#125;,&quot;
               &quot;explained_var_old:&#123;:.3f&#125;,&quot;
               &quot;explained_var_new:&#123;:.3f&#125;&quot;
               ).format(kl,
                        self.lr_multiplier,
                        loss,
                        entropy,
                        explained_var_old,
                        explained_var_new))
        return loss, entropy

    # 用于评估训练网络的质量，评估一共10场play，返回比赛胜率（赢1分、输0分、平0.5分）
    def policy_evaluate(self, n_games&#x3D;10):
        current_mcts_player &#x3D; MCTSPlayer(self.policy_value_net.policy_value_fn,
                                         c_puct&#x3D;self.c_puct, n_playout&#x3D;self.n_playout)
        pure_mcts_player &#x3D; MCTS_Pure(c_puct&#x3D;5, n_playout&#x3D;self.pure_mcts_playout_num)
        win_cnt &#x3D; defaultdict(int)
        for i in range(n_games):
            # AI和弱AI（纯MCTS）对弈，不需要可视化 is_shown&#x3D;0，双方轮流职黑 start_player&#x3D;i % 2
            winner &#x3D; self.game.start_play(current_mcts_player, pure_mcts_player, start_player&#x3D;i % 2, is_shown&#x3D;0)
            win_cnt[winner] +&#x3D; 1
        # 计算胜率，平手计为0.5分
        win_ratio &#x3D; 1.0*(win_cnt[1] + 0.5*win_cnt[-1]) &#x2F; n_games
        print(&quot;num_playouts:&#123;&#125;, win: &#123;&#125;, lose: &#123;&#125;, tie:&#123;&#125;&quot;.format(
                self.pure_mcts_playout_num,
                win_cnt[1], win_cnt[2], win_cnt[-1]))
        return win_ratio

    def run(self):
        # 开始训练
        try:
            # 训练game_batch_num次，每个batch比赛play_batch_size场
            for i in range(self.game_batch_num):
                # 收集自我对弈数据
                self.collect_selfplay_data(self.play_batch_size)
                print(&quot;batch i:&#123;&#125;, episode_len:&#123;&#125;&quot;.format(i+1, self.episode_len))
                if len(self.data_buffer) &gt; self.batch_size:
                    loss, entropy &#x3D; self.policy_update()
                # 判断当前模型的表现，保存最优模型
                if (i+1) % self.check_freq &#x3D;&#x3D; 0:
                    print(&quot;current self-play batch: &#123;&#125;&quot;.format(i+1))
                    win_ratio &#x3D; self.policy_evaluate()
                    # 保存当前策略
                    self.policy_value_net.save_model(&#39;.&#x2F;current_policy.model&#39;)
                    if win_ratio &gt; self.best_win_ratio:
                        print(&quot;发现新的最优策略，进行策略更新&quot;)
                        self.best_win_ratio &#x3D; win_ratio
                        # 更新最优策略
                        self.policy_value_net.save_model(&#39;.&#x2F;best_policy_10_10_5.model&#39;)
                        if (self.best_win_ratio &#x3D;&#x3D; 1.0 and
                                self.pure_mcts_playout_num &lt; 5000):
                            self.pure_mcts_playout_num +&#x3D; 1000
                            self.best_win_ratio &#x3D; 0.0
        except KeyboardInterrupt:
            print(&#39;\n\rquit&#39;)


if __name__ &#x3D;&#x3D; &#39;__main__&#39;:
    training_pipeline &#x3D; TrainPipeline()
    training_pipeline.run()
</code></pre>
<p>如果用Google的Cloab GPU训练需要挂载云盘<br></p>
<pre><code class="hljs pyhton"># 挂载云盘
# Load the Drive helper and mount
from google.colab import drive
# This will prompt for authorization.
drive.mount(&#39;&#x2F;content&#x2F;drive&#39;)

#　加载模块
from drive.MyDrive.AlphaZero.game import Board, Game
from drive.MyDrive.AlphaZero.mcts_pure import MCTSPlayer as MCTS_Pure # 随机走子策略的AI
from drive.MyDrive.AlphaZero.mcts_alphaZero import MCTSPlayer # AlphaGo方式的AI
from drive.MyDrive.AlphaZero.policy_value_net_pytorch import PolicyValueNet  # Pytorch

# 其他的不变</code></pre>
<h3 id="6-human-play-py"><a href="#6-human-play-py" class="headerlink" title="6.human_play.py"></a><strong>6.human_play.py</strong></h3><pre><code class="hljs pyhton"># 调用AI与人下五子棋
from __future__ import print_function
import pickle
from game import Board, Game #定义了棋盘Board
from mcts_pure import MCTSPlayer as MCTS_Pure
from mcts_alphaZero import MCTSPlayer
from policy_value_net_pytorch import PolicyValueNet  # Pytorch
#from policy_value_net import PolicyValueNet  # Pytorch


# 由人来输入下棋的位置
class Human(object):
    def __init__(self):
        self.player &#x3D; None

    def set_player_ind(self, p):
        self.player &#x3D; p

    # 通过input交互，得到用户的下棋位置 move
    def get_action(self, board):
        try:
            location &#x3D; input(&quot;输入你下棋的位置 x,y: &quot;)
            print(location)
            if isinstance(location, str):  # for python3
                location &#x3D; [int(n, 10) for n in location.split(&quot;,&quot;)]
            move &#x3D; board.location_to_move(location)
        except Exception as e:
            move &#x3D; -1
        if move &#x3D;&#x3D; -1 or move not in board.availables:
            print(&quot;输入位置非法&quot;)
            move &#x3D; self.get_action(board)
        return move

    def __str__(self):
        return &quot;Human &#123;&#125;&quot;.format(self.player)


# GoBang主程序
def run():
    n &#x3D; 5
    # 这里可以修改棋盘的大小，需要和AI Model的棋盘大小相等
    width, height &#x3D; 10, 10
    # 调用AI模型
    model_file &#x3D; &#39;best_policy_10_10_5.model&#39;
    try:
        # 初始化棋盘
        board &#x3D; Board(width&#x3D;width, height&#x3D;height, n_in_row&#x3D;n)
        game &#x3D; Game(board)

        # ############### human VS AI ###################
        # 加载AI Model
        best_policy &#x3D; PolicyValueNet(width, height, model_file &#x3D; model_file, use_gpu&#x3D;False)
        # 设置n_playout越大，效果越好，不需要设置is_selfplay，因为不需要进行AI训练
        mcts_player &#x3D; MCTSPlayer(best_policy.policy_value_fn, c_puct&#x3D;5, n_playout&#x3D;400)  

        # 也可以使用MCTS_Pure进行对弈，但是它太弱了
        # mcts_player &#x3D; MCTS_Pure(c_puct&#x3D;5, n_playout&#x3D;1000)

        # 创建人类player, 输入下棋位置比如 3,3
        human &#x3D; Human()

        # start_player&#x3D;1表示电脑先手，0表示人先手
        game.start_play(human, mcts_player, start_player&#x3D;1, is_shown&#x3D;1)
    except KeyboardInterrupt:
        print(&#39;\n\rquit&#39;)


if __name__ &#x3D;&#x3D; &#39;__main__&#39;:
    run()
</code></pre>

<h2 id="参考资料"><a href="#参考资料" class="headerlink" title="参考资料"></a>参考资料</h2><p><a href="https://link.zhihu.com/?target=https://github.com/junxiaosong/AlphaZero_Gomoku">https://link.zhihu.com/?target=https%3A//github.com/junxiaosong/AlphaZero_Gomoku</a></p>

            </div>
            <hr>
            <div>
              <div class="post-metas mb-3">
                
                  <div class="post-meta mr-3">
                    <i class="iconfont icon-category"></i>
                    
                      <a class="hover-with-bg" href="/walker_sue/categories/%E6%9C%BA%E5%99%A8%E5%AD%A6%E4%B9%A0/">机器学习</a>
                    
                      <a class="hover-with-bg" href="/walker_sue/categories/%E6%9C%BA%E5%99%A8%E5%AD%A6%E4%B9%A0/%E5%BC%BA%E5%8C%96%E5%AD%A6%E4%B9%A0/">强化学习</a>
                    
                  </div>
                
                
                  <div class="post-meta">
                    <i class="iconfont icon-tags"></i>
                    
                      <a class="hover-with-bg" href="/walker_sue/tags/BI/">BI</a>
                    
                      <a class="hover-with-bg" href="/walker_sue/tags/%E6%9C%BA%E5%99%A8%E5%AD%A6%E4%B9%A0/">机器学习</a>
                    
                  </div>
                
              </div>
              
                <p class="note note-warning">本博客所有文章除特别声明外，均采用 <a target="_blank" href="https://creativecommons.org/licenses/by-sa/4.0/deed.zh" rel="nofollow noopener noopener">CC BY-SA 4.0 协议</a> ，转载请注明出处！</p>
              
              
                <div class="post-prevnext row">
                  <article class="post-prev col-6">
                    
                    
                      <a href="/walker_sue/2021/03/17/week8-rfm/">
                        <i class="iconfont icon-arrowleft"></i>
                        <span class="hidden-mobile">数据分析思维</span>
                        <span class="visible-mobile">Vorheriger</span>
                      </a>
                    
                  </article>
                  <article class="post-next col-6">
                    
                    
                      <a href="/walker_sue/2021/03/08/week6-RL/">
                        <span class="hidden-mobile">强化学习与推荐系统</span>
                        <span class="visible-mobile">Nächster</span>
                        <i class="iconfont icon-arrowright"></i>
                      </a>
                    
                  </article>
                </div>
              
            </div>

            
          </article>
        </div>
      </div>
    </div>
    
      <div class="d-none d-lg-block col-lg-2 toc-container" id="toc-ctn">
        <div id="toc">
  <p class="toc-header"><i class="iconfont icon-list"></i>&nbsp;TOC</p>
  <div id="tocbot"></div>
</div>

      </div>
    
  </div>
</div>

<!-- Custom -->


    
  </main>

  
    <a id="scroll-top-button" href="#" role="button">
      <i class="iconfont icon-arrowup" aria-hidden="true"></i>
    </a>
  

  
    <div class="modal fade" id="modalSearch" tabindex="-1" role="dialog" aria-labelledby="ModalLabel"
     aria-hidden="true">
  <div class="modal-dialog modal-dialog-scrollable modal-lg" role="document">
    <div class="modal-content">
      <div class="modal-header text-center">
        <h4 class="modal-title w-100 font-weight-bold">Suchen</h4>
        <button type="button" id="local-search-close" class="close" data-dismiss="modal" aria-label="Close">
          <span aria-hidden="true">&times;</span>
        </button>
      </div>
      <div class="modal-body mx-3">
        <div class="md-form mb-5">
          <input type="text" id="local-search-input" class="form-control validate">
          <label data-error="x" data-success="v"
                 for="local-search-input">Stichwort</label>
        </div>
        <div class="list-group" id="local-search-result"></div>
      </div>
    </div>
  </div>
</div>
  

  

  <footer class="text-center mt-5 py-3">
  <div class="footer-content">
     <a href="https://hexo.io" target="_blank" rel="nofollow noopener"><span>Hexo</span></a> <i class="iconfont icon-love"></i> <a href="https://github.com/fluid-dev/hexo-theme-fluid" target="_blank" rel="nofollow noopener"><span>Fluid</span></a> 
  </div>
  

  

  
</footer>

<!-- SCRIPTS -->
<script  src="https://cdn.staticfile.org/jquery/3.4.1/jquery.min.js" ></script>
<script  src="https://cdn.staticfile.org/twitter-bootstrap/4.4.1/js/bootstrap.min.js" ></script>
<script  src="/walker_sue/js/debouncer.js" ></script>
<script  src="/walker_sue/js/main.js" ></script>

<!-- Plugins -->


  
    <script  src="/walker_sue/js/lazyload.js" ></script>
  



  



  <script defer src="https://cdn.staticfile.org/clipboard.js/2.0.6/clipboard.min.js" ></script>
  <script  src="/walker_sue/js/clipboard-use.js" ></script>







  <script  src="https://cdn.staticfile.org/tocbot/4.11.1/tocbot.min.js" ></script>
  <script>
    $(document).ready(function () {
      var boardCtn = $('#board-ctn');
      var boardTop = boardCtn.offset().top;

      tocbot.init({
        tocSelector: '#tocbot',
        contentSelector: '#post-body',
        headingSelector: 'h1,h2,h3,h4,h5,h6',
        linkClass: 'tocbot-link',
        activeLinkClass: 'tocbot-active-link',
        listClass: 'tocbot-list',
        isCollapsedClass: 'tocbot-is-collapsed',
        collapsibleClass: 'tocbot-is-collapsible',
        collapseDepth: 0,
        scrollSmooth: true,
        headingsOffset: -boardTop
      });
      if ($('.toc-list-item').length > 0) {
        $('#toc').css('visibility', 'visible');
      }
    });
  </script>



  <script  src="https://cdn.staticfile.org/typed.js/2.0.11/typed.min.js" ></script>
  <script>
    function typing(id, title){
        var typed = new Typed('#' + id, {
            strings: [
              '  ',
              title + "&nbsp;",
            ],
            cursorChar: "_",
            typeSpeed: 70,
            loop: false,
        });
        typed.stop();
        $(document).ready(function () {
            $(".typed-cursor").addClass("h2");
            typed.start();
        });
    }
    
        typing("subtitle", "AlphaGo Zero实战")
    
  </script>


  <script  src="https://cdn.staticfile.org/anchor-js/4.2.2/anchor.min.js" ></script>
  <script>
    anchors.options = {
      placement: "right",
      visible: "hover",
      
    };
    var el = "h1,h2,h3,h4,h5,h6".split(",");
    var res = [];
    for (item of el) {
      res.push(".markdown-body > " + item)
    }
    anchors.add(res.join(", "))
  </script>



  <script  src="/walker_sue/js/local-search.js" ></script>
  <script>
    var path = "/walker_sue/local-search.xml";
    var inputArea = document.querySelector("#local-search-input");
    inputArea.onclick = function () {
      searchFunc(path, 'local-search-input', 'local-search-result');
      this.onclick = null
    }
  </script>



  <script  src="https://cdn.staticfile.org/fancybox/3.5.7/jquery.fancybox.min.js" ></script>
  <link  rel="stylesheet" href="https://cdn.staticfile.org/fancybox/3.5.7/jquery.fancybox.min.css" />

  <script>
    $('#post img:not(.no-zoom img, img[no-zoom]), img[zoom]').each(
      function () {
        var element = document.createElement('a');
        $(element).attr('data-fancybox', 'images');
        $(element).attr('href', $(this).attr('src'));
        $(this).wrap(element);
      }
    );
  </script>





  

  
    <!-- MathJax -->
    <script>
      MathJax = {
        tex: {
          inlineMath: [['$', '$'], ['\\(', '\\)']]
        },
        options: {
          renderActions: {
            findScript: [10, doc => {
              document.querySelectorAll('script[type^="math/tex"]').forEach(node => {
                const display = !!node.type.match(/; *mode=display/);
                const math = new doc.options.MathItem(node.textContent, doc.inputJax[0], display);
                const text = document.createTextNode('');
                node.parentNode.replaceChild(text, node);
                math.start = { node: text, delim: '', n: 0 };
                math.end = { node: text, delim: '', n: 0 };
                doc.math.push(math);
              });
            }, '', false],
            insertedScript: [200, () => {
              document.querySelectorAll('mjx-container').forEach(node => {
                let target = node.parentNode;
                if (target.nodeName.toLowerCase() === 'li') {
                  target.parentNode.classList.add('has-jax');
                }
              });
            }, '', false]
          }
        }
      };
    </script>

    <script async src="https://cdn.staticfile.org/mathjax/3.0.5/es5/tex-svg.js" ></script>

  











</body>
</html>
