<!DOCTYPE html>
<html lang="zh-CN">
<head>
  <meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1, maximum-scale=2">
<meta name="theme-color" content="#222">
<meta name="generator" content="Hexo 5.4.0">
  <link rel="apple-touch-icon" sizes="180x180" href="/images/apple-touch-icon-next.png">
  <link rel="icon" type="image/png" sizes="32x32" href="/images/favicon-32x32-next.png">
  <link rel="icon" type="image/png" sizes="16x16" href="/images/favicon-16x16-next.png">
  <link rel="mask-icon" href="/images/logo.svg" color="#222">

<link rel="stylesheet" href="/css/main.css">


<link rel="stylesheet" href="/lib/font-awesome/css/all.min.css">

<script id="hexo-configurations">
    var NexT = window.NexT || {};
    var CONFIG = {"hostname":"example.com","root":"/","scheme":"Pisces","version":"7.8.0","exturl":false,"sidebar":{"position":"left","display":"post","padding":18,"offset":12,"onmobile":false},"copycode":{"enable":false,"show_result":false,"style":null},"back2top":{"enable":true,"sidebar":false,"scrollpercent":true},"bookmark":{"enable":false,"color":"#222","save":"auto"},"fancybox":false,"mediumzoom":false,"lazyload":false,"pangu":false,"comments":{"style":"tabs","active":null,"storage":true,"lazyload":false,"nav":null},"algolia":{"hits":{"per_page":10},"labels":{"input_placeholder":"Search for Posts","hits_empty":"We didn't find any results for the search: ${query}","hits_stats":"${hits} results found in ${time} ms"}},"localsearch":{"enable":false,"trigger":"auto","top_n_per_article":1,"unescape":false,"preload":false},"motion":{"enable":true,"async":false,"transition":{"post_block":"fadeIn","post_header":"slideDownIn","post_body":"slideDownIn","coll_header":"slideLeftIn","sidebar":"slideUpIn"}}};
  </script>

  <meta name="description" content="此篇为学习强化学习的个人笔记，对应书籍Reinforcement Learning An Introduction，第五章Monte Carlo Methods">
<meta property="og:type" content="article">
<meta property="og:title" content="5-蒙特卡洛方法">
<meta property="og:url" content="http://example.com/2021/11/04/%E5%BC%BA%E5%8C%96%E5%AD%A6%E4%B9%A0/5-%E8%92%99%E7%89%B9%E5%8D%A1%E6%B4%9B%E6%96%B9%E6%B3%95/index.html">
<meta property="og:site_name" content="祖浩の博客">
<meta property="og:description" content="此篇为学习强化学习的个人笔记，对应书籍Reinforcement Learning An Introduction，第五章Monte Carlo Methods">
<meta property="og:locale" content="zh_CN">
<meta property="og:image" content="http://example.com/2021/11/04/%E5%BC%BA%E5%8C%96%E5%AD%A6%E4%B9%A0/5-%E8%92%99%E7%89%B9%E5%8D%A1%E6%B4%9B%E6%96%B9%E6%B3%95/image-20211104192901237.png">
<meta property="og:image" content="http://example.com/2021/11/04/%E5%BC%BA%E5%8C%96%E5%AD%A6%E4%B9%A0/5-%E8%92%99%E7%89%B9%E5%8D%A1%E6%B4%9B%E6%96%B9%E6%B3%95/image-20211107155114499.png">
<meta property="og:image" content="http://example.com/2021/11/04/%E5%BC%BA%E5%8C%96%E5%AD%A6%E4%B9%A0/5-%E8%92%99%E7%89%B9%E5%8D%A1%E6%B4%9B%E6%96%B9%E6%B3%95/image-20211104205810722.png">
<meta property="og:image" content="http://example.com/2021/11/04/%E5%BC%BA%E5%8C%96%E5%AD%A6%E4%B9%A0/5-%E8%92%99%E7%89%B9%E5%8D%A1%E6%B4%9B%E6%96%B9%E6%B3%95/image-20211107160347920.png">
<meta property="og:image" content="http://example.com/2021/11/04/%E5%BC%BA%E5%8C%96%E5%AD%A6%E4%B9%A0/5-%E8%92%99%E7%89%B9%E5%8D%A1%E6%B4%9B%E6%96%B9%E6%B3%95/image-20211104221234369.png">
<meta property="og:image" content="http://example.com/2021/11/04/%E5%BC%BA%E5%8C%96%E5%AD%A6%E4%B9%A0/5-%E8%92%99%E7%89%B9%E5%8D%A1%E6%B4%9B%E6%96%B9%E6%B3%95/image-20211106192526416.png">
<meta property="og:image" content="http://example.com/2021/11/04/%E5%BC%BA%E5%8C%96%E5%AD%A6%E4%B9%A0/5-%E8%92%99%E7%89%B9%E5%8D%A1%E6%B4%9B%E6%96%B9%E6%B3%95/image-20211107210544404.png">
<meta property="og:image" content="http://example.com/2021/11/04/%E5%BC%BA%E5%8C%96%E5%AD%A6%E4%B9%A0/5-%E8%92%99%E7%89%B9%E5%8D%A1%E6%B4%9B%E6%96%B9%E6%B3%95/image-20211106193622564.png">
<meta property="og:image" content="http://example.com/2021/11/04/%E5%BC%BA%E5%8C%96%E5%AD%A6%E4%B9%A0/5-%E8%92%99%E7%89%B9%E5%8D%A1%E6%B4%9B%E6%96%B9%E6%B3%95/image-20211107210810250.png">
<meta property="article:published_time" content="2021-11-04T03:20:30.000Z">
<meta property="article:modified_time" content="2022-02-28T09:12:58.072Z">
<meta property="article:author" content="谢祖浩">
<meta property="article:tag" content="强化学习">
<meta property="article:tag" content="蒙特卡洛方法">
<meta name="twitter:card" content="summary">
<meta name="twitter:image" content="http://example.com/2021/11/04/%E5%BC%BA%E5%8C%96%E5%AD%A6%E4%B9%A0/5-%E8%92%99%E7%89%B9%E5%8D%A1%E6%B4%9B%E6%96%B9%E6%B3%95/image-20211104192901237.png">

<link rel="canonical" href="http://example.com/2021/11/04/%E5%BC%BA%E5%8C%96%E5%AD%A6%E4%B9%A0/5-%E8%92%99%E7%89%B9%E5%8D%A1%E6%B4%9B%E6%96%B9%E6%B3%95/">


<script id="page-configurations">
  // https://hexo.io/docs/variables.html
  CONFIG.page = {
    sidebar: "",
    isHome : false,
    isPost : true,
    lang   : 'zh-CN'
  };
</script>

  <title>5-蒙特卡洛方法 | 祖浩の博客</title>
  






  <noscript>
  <style>
  .use-motion .brand,
  .use-motion .menu-item,
  .sidebar-inner,
  .use-motion .post-block,
  .use-motion .pagination,
  .use-motion .comments,
  .use-motion .post-header,
  .use-motion .post-body,
  .use-motion .collection-header { opacity: initial; }

  .use-motion .site-title,
  .use-motion .site-subtitle {
    opacity: initial;
    top: initial;
  }

  .use-motion .logo-line-before i { left: initial; }
  .use-motion .logo-line-after i { right: initial; }
  </style>
</noscript>

</head>

<body itemscope itemtype="http://schema.org/WebPage">
  <div class="container use-motion">
    <div class="headband"></div>

    <header class="header" itemscope itemtype="http://schema.org/WPHeader">
      <div class="header-inner"><div class="site-brand-container">
  <div class="site-nav-toggle">
    <div class="toggle" aria-label="切换导航栏">
      <span class="toggle-line toggle-line-first"></span>
      <span class="toggle-line toggle-line-middle"></span>
      <span class="toggle-line toggle-line-last"></span>
    </div>
  </div>

  <div class="site-meta">

    <a href="/" class="brand" rel="start">
      <span class="logo-line-before"><i></i></span>
      <h1 class="site-title">祖浩の博客</h1>
      <span class="logo-line-after"><i></i></span>
    </a>
  </div>

  <div class="site-nav-right">
    <div class="toggle popup-trigger">
    </div>
  </div>
</div>




<nav class="site-nav">
  <ul id="menu" class="main-menu menu">
        <li class="menu-item menu-item-home">

    <a href="/" rel="section"><i class="fa fa-home fa-fw"></i>首页</a>

  </li>
        <li class="menu-item menu-item-tags">

    <a href="/tags/" rel="section"><i class="fa fa-tags fa-fw"></i>标签</a>

  </li>
        <li class="menu-item menu-item-categories">

    <a href="/categories/" rel="section"><i class="fa fa-th fa-fw"></i>分类</a>

  </li>
        <li class="menu-item menu-item-archives">

    <a href="/archives/" rel="section"><i class="fa fa-archive fa-fw"></i>归档</a>

  </li>
  </ul>
</nav>




</div>
    </header>

    
  <div class="back-to-top">
    <i class="fa fa-arrow-up"></i>
    <span>0%</span>
  </div>


    <main class="main">
      <div class="main-inner">
        <div class="content-wrap">
          

          <div class="content post posts-expand">
            

    
  
  
  <article itemscope itemtype="http://schema.org/Article" class="post-block" lang="zh-CN">
    <link itemprop="mainEntityOfPage" href="http://example.com/2021/11/04/%E5%BC%BA%E5%8C%96%E5%AD%A6%E4%B9%A0/5-%E8%92%99%E7%89%B9%E5%8D%A1%E6%B4%9B%E6%96%B9%E6%B3%95/">

    <span hidden itemprop="author" itemscope itemtype="http://schema.org/Person">
      <meta itemprop="image" content="/images/head.jpeg">
      <meta itemprop="name" content="谢祖浩">
      <meta itemprop="description" content="驽马十驾，功在不舍">
    </span>

    <span hidden itemprop="publisher" itemscope itemtype="http://schema.org/Organization">
      <meta itemprop="name" content="祖浩の博客">
    </span>
      <header class="post-header">
        <h1 class="post-title" itemprop="name headline">
          5-蒙特卡洛方法
        </h1>

        <div class="post-meta">
            <span class="post-meta-item">
              <span class="post-meta-item-icon">
                <i class="far fa-calendar"></i>
              </span>
              <span class="post-meta-item-text">发表于</span>

              <time title="创建时间：2021-11-04 11:20:30" itemprop="dateCreated datePublished" datetime="2021-11-04T11:20:30+08:00">2021-11-04</time>
            </span>
              <span class="post-meta-item">
                <span class="post-meta-item-icon">
                  <i class="far fa-calendar-check"></i>
                </span>
                <span class="post-meta-item-text">更新于</span>
                <time title="修改时间：2022-02-28 17:12:58" itemprop="dateModified" datetime="2022-02-28T17:12:58+08:00">2022-02-28</time>
              </span>
            <span class="post-meta-item">
              <span class="post-meta-item-icon">
                <i class="far fa-folder"></i>
              </span>
              <span class="post-meta-item-text">分类于</span>
                <span itemprop="about" itemscope itemtype="http://schema.org/Thing">
                  <a href="/categories/%E5%BC%BA%E5%8C%96%E5%AD%A6%E4%B9%A0/" itemprop="url" rel="index"><span itemprop="name">强化学习</span></a>
                </span>
            </span>

          
            <div class="post-description">此篇为学习强化学习的个人笔记，对应书籍Reinforcement Learning An Introduction，第五章Monte Carlo Methods</div>

        </div>
      </header>

    
    
    
    <div class="post-body" itemprop="articleBody">

      
        <p>本章中考虑第一类实用的估计价值函数，并且寻找最优策略的算法。蒙特卡洛方法通过平均样本的回报来解决强化学习问题。</p>
<h1 id="蒙特卡洛预测">蒙特卡洛预测</h1>
<p>首先第一个要介绍的是：如何利用蒙特卡洛方法预测给定策略下状态的价值函数值。</p>
<p>根据贝尔曼公式我们知道，一个状态的价值取决于其未来回报的期望。 一个简单的想法，就是根据经验估计，对此状态后的所有回报求平均，随着越来越多的回报被观测到，平均值会收敛到期望值。</p>
<p>假设在给定策略<span class="math inline">\(\pi\)</span>下，对一个分幕式任务的状态价值函数<span class="math inline">\(v(s)\)</span>进行估计。在给定的某一慕中，每次状态<span class="math inline">\(s\)</span>的出现被称为对<span class="math inline">\(s\)</span>的一次<font face="仿宋"><strong>访问</strong></font>（<span class="math inline">\(visit\)</span>）；在同一幕中，<span class="math inline">\(s\)</span> 可能会被访问多次，第一次访问<span class="math inline">\(s\)</span>被称为<font face="仿宋"><strong>首次访问</strong></font><span class="math inline">\(first-visit\)</span>。</p>
<p><font face="仿宋"><strong>首次访问型MC算法</strong></font>：用<span class="math inline">\(s\)</span>​的所有首次访问回报的平均值估计<span class="math inline">\(v_{\pi}(s)\)</span>.</p>
<p><font face="仿宋"><strong>每次访问型MC算法</strong></font>：用<span class="math inline">\(s\)</span>的所有所有访问回报的平均值估计<span class="math inline">\(v_{\pi}(s)\)</span>.</p>
<p><img src="image-20211104192901237.png" alt="image-20211104192901237" style="zoom:33%;" /></p>
<ol type="1">
<li>为每个状态都新建一个列表<span class="math inline">\(Returns(s)\)</span>，用于存储这状态之后的回报。</li>
<li>使用策略生成一个幕。对于此幕中每个状态，把状态第一次出现之后的回报都放入列表中。</li>
<li>对列表求平均，作为这个状态的价值（也可加权平均，引入折扣概念）</li>
<li>无限重复2-3步</li>
</ol>
<p><img src="image-20211107155114499.png" alt="image-20211107155114499" style="zoom:33%;" /></p>
<p><span class="math inline">\(Return(s)\)</span>中会存放每一幕中的回报，新的幕产生的回报被添加到旧回报的后面。</p>
<h2 id="三个优点">三个优点</h2>
<ol type="1">
<li>可从实际经历中学习</li>
<li>可从模拟经历中学习</li>
<li>计算某一状态的计算开销与状态个数无关，适合特定状态的价值计算。</li>
</ol>
<h1 id="动作价值的蒙特卡洛预测">动作价值的蒙特卡洛预测</h1>
<p>如果无法得到环境的模型，计算动作价值比计算状态价值回更有用。动作价值指“状态-动作”二元组的价值。在没有模型的情况下，仅仅拥有状态价值函数是不够的（<font color= red>WHY?</font>）必须显示的确定每个动作的价值来确定一个策略。</p>
<p>所以目标转化为用蒙特卡洛算法确定<span class="math inline">\(q_*\)</span>​，首先是动作价值函数的策略评估问题。</p>
<p>只需将之前对状态的访问，修改为对“状态-动作”二元组的访问，蒙特卡洛方法就可用几乎和之前完全相同的方法解决这个问题。</p>
<p>但同时我们遇到的问题：某些“状态-动作”二元组可能永远都不会被访问。这是一个保持试探的问题，我们提出一种<strong>假设</strong>：把指定的二元组作为起点出发，同时保证每个二元组都有非零概率被访问，这被称为“<strong>试探性出发</strong>”。</p>
<h1 id="蒙特卡洛控制">蒙特卡洛控制</h1>
<p>首先讨论<strong>经典迭代算法</strong>的蒙特卡洛版本。从任意策略<span class="math inline">\(\pi_0\)</span>​开始，交替进行<strong>完整</strong>的策略评估和策略改进。</p>
<p>做出两条假设来保证每个二元组都会被访问：</p>
<ul>
<li>试探性出发假设</li>
<li>策略评估时，有无穷多幕样本序列</li>
</ul>
<p>如果想要得到实际可用的算法，这两条假设必须去除；这在后面再讨论。</p>
<p>策略改进就是在当前的动作价值函数上使用贪心算法：</p>
<p><span class="math display">\[
\pi(s) \doteq \arg \max _{a} q(s, a)
\]</span></p>
<p>对于蒙特卡洛策略迭代，可以使用幕交替进行评估和改进，每一幕结束后，用观测到的回报进行策略评估，然后在该幕访问到的每一个状态上进行策略的改进，这个算法被称为<font face="仿宋"><strong>基于试探性出发的蒙特卡洛（蒙特卡洛ES）</strong></font></p>
<p><img src="image-20211104205810722.png" alt="image-20211104205810722" style="zoom: 33%;" /></p>
<p>相对于之前首次访问型MC算法，这个算法只有两个变化：</p>
<ul>
<li>加入了试探性假设，保证每个二元组都有概率被选中</li>
<li>由对价值<span class="math inline">\(v_{\pi}\)</span>​的评估，变为了对最优策略<span class="math inline">\(\pi_{*}\)</span>​的寻找，加入了对策略的更新：每次更新完二元组价值后，更改策略为——总是选择价值更高的二元组</li>
</ul>
<p><img src="image-20211107160347920.png" alt="image-20211107160347920" style="zoom:33%;" /></p>
<p>关于策略的更新可看下面这段代码:</p>
<figure class="highlight python"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br><span class="line">12</span><br><span class="line">13</span><br><span class="line">14</span><br><span class="line">15</span><br><span class="line">16</span><br></pre></td><td class="code"><pre><span class="line"><span class="comment"># 某个状态-动作二元组由4个元素所确认：[玩家点数,庄家点数,是否有可用ace,要/停牌]</span></span><br><span class="line"><span class="function"><span class="keyword">def</span> <span class="title">behavior_policy</span>(<span class="params">usable_ace, player_sum, dealer_card</span>):</span></span><br><span class="line">    usable_ace = <span class="built_in">int</span>(usable_ace)</span><br><span class="line">    player_sum -= <span class="number">12</span></span><br><span class="line">    dealer_card -= <span class="number">1</span></span><br><span class="line">    <span class="comment"># 对当前的价值进行评估</span></span><br><span class="line">    values_ = state_action_values[player_sum, dealer_card, usable_ace, :] / \</span><br><span class="line">              state_action_pair_count[player_sum, dealer_card, usable_ace, :]</span><br><span class="line">    <span class="comment"># 选择当前状态下价值更高的动作，如果价值相同则随机选择</span></span><br><span class="line">    <span class="keyword">return</span> np.random.choice([action_ <span class="keyword">for</span> action_, value_ <span class="keyword">in</span> <span class="built_in">enumerate</span>(values_) <span class="keyword">if</span> value_ == np.<span class="built_in">max</span>(values_)])</span><br><span class="line">    <span class="comment"># 这句return和下面的注释是一个效果</span></span><br><span class="line">    <span class="comment"># L = []</span></span><br><span class="line">    <span class="comment"># for action_, value_ in enumerate(values_): # action 返回的是索引，但值正好和策略对应，所以返回的也是策略</span></span><br><span class="line">    <span class="comment">#     if value_ == np.max(values_):</span></span><br><span class="line">    <span class="comment">#         L.append(action_)</span></span><br><span class="line">    <span class="comment"># return np.random.choice(L)</span></span><br></pre></td></tr></table></figure>
<p>初始情况下，所有二元组的价值未知，所以是随机选择；一旦我们结束了第一幕，经历一些了二元组，二元组的价值被我们逐渐摸清楚，我们的策略就变为选择价值高的二元组。不断的经历，不断改进我们的策略，每一幕结束后，策略会更新，并且用于下一幕中二元组的选择，这被称为<font face="仿宋"><strong>同轨策略</strong></font>，后面会给出更具体的定义。</p>
<h2 id="两个假设的解决">两个假设的解决</h2>
<p>对于无穷多幕序列的假设，有两个方法解决：（<font color=red>没看懂？</font>）</p>
<ul>
<li>在每次策略评估中，对<span class="math inline">\(q_{\pi_k}\)</span>做出最好的逼近</li>
<li>不要求在策略改进前就完成策略评估</li>
</ul>
<p>第二个假设在下一节中进行解决：</p>
<h1 id="没有试探性出发假设的蒙特卡洛控制">没有试探性出发假设的蒙特卡洛控制</h1>
<p>试探性假设的一般解决方案是智能体能持续不断的选择所有可能。有两种方法可以保证这一点：</p>
<p><font face ="仿宋">同轨策略</font>（<span class="math inline">\(on-policy\)</span>​​）：用于生成采样数据序列的策略和用于评估（改进）的策略相同。（蒙特卡洛ES）</p>
<p><font face ="仿宋">离轨策略</font>（<span class="math inline">\(off-policy\)</span>​）：用于生成采样数据序列的策略和用于评估（改进）的策略不同。</p>
<p>前面，我们具体解释了同轨策略：</p>
<blockquote>
<p>不断的经历，不断改进我们的策略，每一幕结束后，策略会更新，并且用于下一幕中二元组的选择</p>
</blockquote>
<p>那么，离轨策略就是：</p>
<blockquote>
<p>不断的经历，不断改进我们的<strong>目标策略</strong>，每一幕结束后，<strong>目标策略</strong>会更新，但不用于下一幕中二元组的选择，下一幕中二元组的选择遵循<strong>行动策略</strong></p>
</blockquote>
<p>本节中，先介绍如何设计一个同轨策略的蒙特卡洛控制方法。</p>
<p>在同轨策略中，策略一般是“软性”的：对于任意的状态<span class="math inline">\(s\)</span>和动作<span class="math inline">\(a\)</span>，有<span class="math inline">\(\pi(a|s)&gt;0\)</span>，但他会逼近一个确定策略。可以使用的方法有：<span class="math inline">\(\epsilon\)</span>-贪心策略，大部分情况选择最优动作，以<span class="math inline">\(\epsilon\)</span>的概率去等概率选择动作。<span class="math inline">\(\epsilon\)</span>-贪心策略是<span class="math inline">\(\epsilon\)</span>-软性策略的一个例子。所有非贪心动作有<span class="math inline">\(\frac{\epsilon}{|\mathcal{A}(s)|}\)</span> 的概率被选中。在所有<span class="math inline">\(\epsilon\)</span>-软性策略中，<span class="math inline">\(\epsilon\)</span>-贪心策略是最接近贪心策略的。</p>
<p><img src="image-20211104221234369.png" alt="image-20211104221234369" style="zoom: 33%;" /></p>
<p>相对于之前的蒙特卡洛ES，这里取消了试探性假设，并且在更新策略时，使用的是<span class="math inline">\(\epsilon\)</span>​-贪心策略，保证每个动作都有非零概率被选中。</p>
<h1 id="基于重要度采样的离轨策略">基于重要度采样的离轨策略</h1>
<p>离轨策略使用两个策略：一个用来学习并且最终称为最优策略；另一个更加有试探性质，用来产生智能体的行动样本。用于学习的策略叫做<font face="仿宋"><strong>目标策略</strong></font>（<span class="math inline">\(target~policy\)</span>），用于产生行动样本的策略被称为<font face="仿宋"><strong>行动策略</strong></font>（<span class="math inline">\(behavior~policy\)</span>）。</p>
<p>本节中，通过讨论预测问题来对离轨策略方法学习，目标策略和行动策略固定，希望预测<span class="math inline">\(v_{\pi}\)</span>​和<span class="math inline">\(q_{\pi}\)</span>​，<span class="math inline">\(\pi\)</span>​为目标策略，<span class="math inline">\(b\)</span>​为行动策略，样本为策略<span class="math inline">\(b\)</span>产生的若干幕。</p>
<p>为了使用从<span class="math inline">\(b\)</span>得到的多幕样本序列去预测<span class="math inline">\(\pi\)</span>，要求在<span class="math inline">\(\pi\)</span>下发生的每个动作都至少偶尔能在<span class="math inline">\(b\)</span>下发生，这被称为覆盖假设。</p>
<h2 id="重要度采样比">重要度采样比</h2>
<p>首先我们引入<font face="仿宋"><strong>重要度采样比</strong></font>（<span class="math inline">\(importance-sampling~ratio\)</span>​​）的概念:</p>
<p>轨迹：指一条马尔科夫链，从初始状态到幕结束的一条由动作和状态组成的链</p>
<p>给定起始状态<span class="math inline">\(S_t\)</span>，结束时刻为<span class="math inline">\(T\)</span>，后续的状态-动作轨迹<span class="math inline">\(A_t,S_{t+1}...S_T\)</span>在策略<span class="math inline">\(\pi\)</span>​下发生的概率是：</p>
<p><span class="math display">\[
\begin{aligned}
\operatorname{Pr}\{&amp;\left.A_{t}, S_{t+1}, A_{t+1}, \ldots, S_{T} \mid S_{t}, A_{t: T-1} \sim \pi\right\} \\
&amp;=\pi\left(A_{t} \mid S_{t}\right) p\left(S_{t+1} \mid S_{t}, A_{t}\right) \pi\left(A_{t+1} \mid S_{t+1}\right) \cdots p\left(S_{T} \mid S_{T-1}, A_{T-1}\right) \\
&amp;=\prod_{k=t}^{T-1} \pi\left(A_{k} \mid S_{k}\right) p\left(S_{k+1} \mid S_{k}, A_{k}\right),
\end{aligned}
\]</span></p>
<p>重要度采样比定义为<strong>某一轨迹在目标策略下发生和在行动策略下发生概率之比</strong>：</p>
<p><span class="math display">\[
\rho_{t: T-1} \doteq \frac{\prod_{k=t}^{T-1} \pi\left(A_{k} \mid S_{k}\right) p\left(S_{k+1} \mid S_{k}, A_{k}\right)}{\prod_{k=t}^{T-1} b\left(A_{k} \mid S_{k}\right) p\left(S_{k+1} \mid S_{k}, A_{k}\right)}=\prod_{k=t}^{T-1} \frac{\pi\left(A_{k} \mid S_{k}\right)}{b\left(A_{k} \mid S_{k}\right)}
\]</span></p>
<p>可见，虽然轨迹的发生概率依赖于状态转移概率，但重要度采样比只依赖于具体的策略，而和MDP的动态特性（状态转移概率）无关。注意此处下标：<span class="math inline">\(t\)</span>表示从<span class="math inline">\(t\)</span>时刻开始，<span class="math inline">\(T-1\)</span>表示结束时刻为<span class="math inline">\(T\)</span>.</p>
<h2 id="重要度采样">重要度采样</h2>
<p>我们希望估计目标策略下的回报，但现在我们只有行动策略下的回报<span class="math inline">\(G_t\)</span>，使用重要度采样比来定义目标策略回报的期望：</p>
<p><span class="math display">\[
E[\rho_{t:T-1} \cdot G_t|S_t=s] = v_{\pi}(s)
\]</span></p>
<p>可以理解为：根据轨迹在目标策略和行动策略中出现的相对概率，对回报值进行加权</p>
<p>下面介绍：通过观察由行动策略<span class="math inline">\(b\)</span>​产生的多幕序列的回报，来预测目标策略<span class="math inline">\(\pi\)</span>​的价值函数<span class="math inline">\(v_{\pi}\)</span>​的蒙特卡洛算法：</p>
<p>为方便，对时刻的编号是连续的，即使有了幕的变化，时刻也一直顺序编号。</p>
<p>定义：</p>
<p><span class="math inline">\(\mathcal{T}(s)\)</span>：所有访问过状态<span class="math inline">\(s\)</span>的时刻集合（对于首次访问，只包含首次访问<span class="math inline">\(s\)</span>的时刻；对于每次访问，包含每一次）</p>
<p><span class="math inline">\(T(t)\)</span>：时刻<span class="math inline">\(t\)</span>后的首次终止</p>
<p><span class="math inline">\(G(t)\)</span>：在<span class="math inline">\(t\)</span>之后到达<span class="math inline">\(T(t)\)</span>时的回报值</p>
<p><span class="math inline">\(\left\{G_t\right\}_{t \in \mathcal{T}(s)}\)</span>即为状态<span class="math inline">\(s\)</span>的回报，<span class="math inline">\(\left\{\rho_{t:T(t)-1} \right\}_{t \in \mathcal{T}(s)}\)</span>为其对应的重要度采样比，普通重要度采样（<span class="math inline">\(ordinary~ importance~sampling\)</span>）公式如下：</p>
<p><span class="math display">\[
V(s) \doteq \frac{\sum_{t \in \mathcal{T}(s)} \rho_{t: T(t)-1} G_{t}}{|\mathcal{T}(s)|}
\]</span></p>
<p>另一种是加权重要度采样（<span class="math inline">\(weighted~importance~sampling\)</span>）：</p>
<p><span class="math display">\[
\begin{equation} \label{weighted importance sampling}
V(s) \doteq \frac{\sum_{t \in \mathcal{T}(s)} \rho_{t: T(t)-1} G_{t}}{\sum_{t \in \mathcal{T}(s)} \rho_{t: T(t)-1}}
\end{equation}
\]</span></p>
<p>定义式<span class="math inline">\(\eqref{weighted importance sampling}\)</span>​​分母为0时，值为0.</p>
<h2 id="增量式实现">增量式实现</h2>
<p>讨论加权重要度采样，回报序列为<span class="math inline">\(G_{n}\)</span>​，权重为<span class="math inline">\(W_i\)</span>​（如：<span class="math inline">\(W_i = \rho_{t:T(t)-1}\)</span>），改写公式为：</p>
<p><span class="math display">\[
V_{n} \doteq \frac{\sum_{k=1}^{n-1} W_{k} G_{k}}{\sum_{k=1}^{n-1} W_{k}}
\]</span></p>
<p>增量公式如下：</p>
<p><span class="math display">\[
\begin{aligned}
&amp;V_{n+1} \doteq V_{n}+\frac{W_{n}}{C_{n}}\left[G_{n}-V_{n}\right], \quad n \geq 1 \\
&amp;C_{n+1} \doteq C_{n}+W_{n+1}
\end{aligned}
\]</span></p>
<p>定义<span class="math inline">\(C_0 = 0\)</span>.</p>
<p><img src="image-20211106192526416.png" alt="image-20211106192526416" style="zoom:33%;" /></p>
<p>实际上离轨的预测和蒙特卡洛预测是完全一样的，只是价值的更新方式不同：蒙特卡洛预测对价值的更新使用的是平均值，而离轨预测使用的是重要度采样的方法。</p>
<p><img src="image-20211107210544404.png" alt="image-20211107210544404" style="zoom:33%;" /></p>
<h2 id="离轨策略蒙特卡洛控制">离轨策略蒙特卡洛控制</h2>
<p>离轨策略蒙特卡洛控制遵循行动策略并对目标策略进行学习和改进。我们需要满足覆盖假设，要求行动策略是软性的，保证目标策略可能做出的动作都有非零的概率被选中。</p>
<p><img src="image-20211106193622564.png" alt="image-20211106193622564" style="zoom:33%;" /></p>
<p>相对预测，当然是加入了对目标策略的更新；使用的更新策略就是贪心策略；编程时需要注意，因为目标策略更新，新的一幕中不同的二元组对应的<span class="math inline">\(\rho\)</span>也要更新。</p>
<p><img src="image-20211107210810250.png" alt="image-20211107210810250" style="zoom:50%;" /></p>
<p>在程序中，行动策略是随机的，并且进行非常多幕的探索，这样才能保证二元组的价值收敛于正确值。</p>
<h1 id="本章小结">本章小结</h1>
<p>本章实质上就介绍了4个算法：蒙特卡洛预测，蒙特卡洛控制，离轨预测，离轨控制。</p>
<p>其他算法都是为了解决两个假设：一个是试探性假设，使用软性策略解决（就是重新分配概率，使得都有可能）。一个是无限幕的序列，使用类似蒙特卡洛ES的方法进行，每幕结束后就评估加改进。</p>

    </div>

    
    
    

      <footer class="post-footer">
          <div class="post-tags">
              <a href="/tags/%E5%BC%BA%E5%8C%96%E5%AD%A6%E4%B9%A0/" rel="tag"># 强化学习</a>
              <a href="/tags/%E8%92%99%E7%89%B9%E5%8D%A1%E6%B4%9B%E6%96%B9%E6%B3%95/" rel="tag"># 蒙特卡洛方法</a>
          </div>

        


        
    <div class="post-nav">
      <div class="post-nav-item">
    <a href="/2021/10/30/%E5%BC%BA%E5%8C%96%E5%AD%A6%E4%B9%A0/4-%E5%8A%A8%E6%80%81%E8%A7%84%E5%88%92/" rel="prev" title="4-动态规划">
      <i class="fa fa-chevron-left"></i> 4-动态规划
    </a></div>
      <div class="post-nav-item">
    <a href="/2022/02/28/%E5%BC%BA%E5%8C%96%E5%AD%A6%E4%B9%A0/6-%E6%B7%B1%E5%BA%A6%E5%BC%BA%E5%8C%96%E5%AD%A6%E4%B9%A01/" rel="next" title="6-深度强化学习1">
      6-深度强化学习1 <i class="fa fa-chevron-right"></i>
    </a></div>
    </div>
      </footer>
    
  </article>
  
  
  



          </div>
          

<script>
  window.addEventListener('tabs:register', () => {
    let { activeClass } = CONFIG.comments;
    if (CONFIG.comments.storage) {
      activeClass = localStorage.getItem('comments_active') || activeClass;
    }
    if (activeClass) {
      let activeTab = document.querySelector(`a[href="#comment-${activeClass}"]`);
      if (activeTab) {
        activeTab.click();
      }
    }
  });
  if (CONFIG.comments.storage) {
    window.addEventListener('tabs:click', event => {
      if (!event.target.matches('.tabs-comment .tab-content .tab-pane')) return;
      let commentClass = event.target.classList[1];
      localStorage.setItem('comments_active', commentClass);
    });
  }
</script>

        </div>
          
  
  <div class="toggle sidebar-toggle">
    <span class="toggle-line toggle-line-first"></span>
    <span class="toggle-line toggle-line-middle"></span>
    <span class="toggle-line toggle-line-last"></span>
  </div>

  <aside class="sidebar">
    <div class="sidebar-inner">

      <ul class="sidebar-nav motion-element">
        <li class="sidebar-nav-toc">
          文章目录
        </li>
        <li class="sidebar-nav-overview">
          站点概览
        </li>
      </ul>

      <!--noindex-->
      <div class="post-toc-wrap sidebar-panel">
          <div class="post-toc motion-element"><ol class="nav"><li class="nav-item nav-level-1"><a class="nav-link" href="#%E8%92%99%E7%89%B9%E5%8D%A1%E6%B4%9B%E9%A2%84%E6%B5%8B"><span class="nav-number">1.</span> <span class="nav-text">蒙特卡洛预测</span></a><ol class="nav-child"><li class="nav-item nav-level-2"><a class="nav-link" href="#%E4%B8%89%E4%B8%AA%E4%BC%98%E7%82%B9"><span class="nav-number">1.1.</span> <span class="nav-text">三个优点</span></a></li></ol></li><li class="nav-item nav-level-1"><a class="nav-link" href="#%E5%8A%A8%E4%BD%9C%E4%BB%B7%E5%80%BC%E7%9A%84%E8%92%99%E7%89%B9%E5%8D%A1%E6%B4%9B%E9%A2%84%E6%B5%8B"><span class="nav-number">2.</span> <span class="nav-text">动作价值的蒙特卡洛预测</span></a></li><li class="nav-item nav-level-1"><a class="nav-link" href="#%E8%92%99%E7%89%B9%E5%8D%A1%E6%B4%9B%E6%8E%A7%E5%88%B6"><span class="nav-number">3.</span> <span class="nav-text">蒙特卡洛控制</span></a><ol class="nav-child"><li class="nav-item nav-level-2"><a class="nav-link" href="#%E4%B8%A4%E4%B8%AA%E5%81%87%E8%AE%BE%E7%9A%84%E8%A7%A3%E5%86%B3"><span class="nav-number">3.1.</span> <span class="nav-text">两个假设的解决</span></a></li></ol></li><li class="nav-item nav-level-1"><a class="nav-link" href="#%E6%B2%A1%E6%9C%89%E8%AF%95%E6%8E%A2%E6%80%A7%E5%87%BA%E5%8F%91%E5%81%87%E8%AE%BE%E7%9A%84%E8%92%99%E7%89%B9%E5%8D%A1%E6%B4%9B%E6%8E%A7%E5%88%B6"><span class="nav-number">4.</span> <span class="nav-text">没有试探性出发假设的蒙特卡洛控制</span></a></li><li class="nav-item nav-level-1"><a class="nav-link" href="#%E5%9F%BA%E4%BA%8E%E9%87%8D%E8%A6%81%E5%BA%A6%E9%87%87%E6%A0%B7%E7%9A%84%E7%A6%BB%E8%BD%A8%E7%AD%96%E7%95%A5"><span class="nav-number">5.</span> <span class="nav-text">基于重要度采样的离轨策略</span></a><ol class="nav-child"><li class="nav-item nav-level-2"><a class="nav-link" href="#%E9%87%8D%E8%A6%81%E5%BA%A6%E9%87%87%E6%A0%B7%E6%AF%94"><span class="nav-number">5.1.</span> <span class="nav-text">重要度采样比</span></a></li><li class="nav-item nav-level-2"><a class="nav-link" href="#%E9%87%8D%E8%A6%81%E5%BA%A6%E9%87%87%E6%A0%B7"><span class="nav-number">5.2.</span> <span class="nav-text">重要度采样</span></a></li><li class="nav-item nav-level-2"><a class="nav-link" href="#%E5%A2%9E%E9%87%8F%E5%BC%8F%E5%AE%9E%E7%8E%B0"><span class="nav-number">5.3.</span> <span class="nav-text">增量式实现</span></a></li><li class="nav-item nav-level-2"><a class="nav-link" href="#%E7%A6%BB%E8%BD%A8%E7%AD%96%E7%95%A5%E8%92%99%E7%89%B9%E5%8D%A1%E6%B4%9B%E6%8E%A7%E5%88%B6"><span class="nav-number">5.4.</span> <span class="nav-text">离轨策略蒙特卡洛控制</span></a></li></ol></li><li class="nav-item nav-level-1"><a class="nav-link" href="#%E6%9C%AC%E7%AB%A0%E5%B0%8F%E7%BB%93"><span class="nav-number">6.</span> <span class="nav-text">本章小结</span></a></li></ol></div>
      </div>
      <!--/noindex-->

      <div class="site-overview-wrap sidebar-panel">
        <div class="site-author motion-element" itemprop="author" itemscope itemtype="http://schema.org/Person">
    <img class="site-author-image" itemprop="image" alt="谢祖浩"
      src="/images/head.jpeg">
  <p class="site-author-name" itemprop="name">谢祖浩</p>
  <div class="site-description" itemprop="description">驽马十驾，功在不舍</div>
</div>
<div class="site-state-wrap motion-element">
  <nav class="site-state">
      <div class="site-state-item site-state-posts">
          <a href="/archives/">
        
          <span class="site-state-item-count">9</span>
          <span class="site-state-item-name">日志</span>
        </a>
      </div>
      <div class="site-state-item site-state-categories">
            <a href="/categories/">
          
        <span class="site-state-item-count">2</span>
        <span class="site-state-item-name">分类</span></a>
      </div>
      <div class="site-state-item site-state-tags">
            <a href="/tags/">
          
        <span class="site-state-item-count">13</span>
        <span class="site-state-item-name">标签</span></a>
      </div>
  </nav>
</div>



      </div>

    </div>
  </aside>
  <div id="sidebar-dimmer"></div>


      </div>
    </main>

    <footer class="footer">
      <div class="footer-inner">
        

        

<div class="copyright">
  
  &copy; 
  <span itemprop="copyrightYear">2022</span>
  <span class="with-love">
    <i class="fa fa-heart"></i>
  </span>
  <span class="author" itemprop="copyrightHolder">谢祖浩</span>
</div>
  <div class="powered-by">由 <a href="https://hexo.io/" class="theme-link" rel="noopener" target="_blank">Hexo</a> & <a href="https://pisces.theme-next.org/" class="theme-link" rel="noopener" target="_blank">NexT.Pisces</a> 强力驱动
  </div>

        








      </div>
    </footer>
  </div>

  
  <script src="/lib/anime.min.js"></script>
  <script src="/lib/velocity/velocity.min.js"></script>
  <script src="/lib/velocity/velocity.ui.min.js"></script>

<script src="/js/utils.js"></script>

<script src="/js/motion.js"></script>


<script src="/js/schemes/pisces.js"></script>


<script src="/js/next-boot.js"></script>




  















  

  
      

<script>
  if (typeof MathJax === 'undefined') {
    window.MathJax = {
      loader: {
        source: {
          '[tex]/amsCd': '[tex]/amscd',
          '[tex]/AMScd': '[tex]/amscd'
        }
      },
      tex: {
        inlineMath: {'[+]': [['$', '$']]},
        tags: 'ams'
      },
      options: {
        renderActions: {
          findScript: [10, doc => {
            document.querySelectorAll('script[type^="math/tex"]').forEach(node => {
              const display = !!node.type.match(/; *mode=display/);
              const math = new doc.options.MathItem(node.textContent, doc.inputJax[0], display);
              const text = document.createTextNode('');
              node.parentNode.replaceChild(text, node);
              math.start = {node: text, delim: '', n: 0};
              math.end = {node: text, delim: '', n: 0};
              doc.math.push(math);
            });
          }, '', false],
          insertedScript: [200, () => {
            document.querySelectorAll('mjx-container').forEach(node => {
              let target = node.parentNode;
              if (target.nodeName.toLowerCase() === 'li') {
                target.parentNode.classList.add('has-jax');
              }
            });
          }, '', false]
        }
      }
    };
    (function () {
      var script = document.createElement('script');
      script.src = '//cdn.jsdelivr.net/npm/mathjax@3/es5/tex-mml-chtml.js';
      script.defer = true;
      document.head.appendChild(script);
    })();
  } else {
    MathJax.startup.document.state(0);
    MathJax.texReset();
    MathJax.typeset();
  }
</script>

    

  

</body>
</html>
