<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
  "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">


<html xmlns="http://www.w3.org/1999/xhtml">
  <head>
    <meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
    
    <title>pgmpy.inference.ExactInference &#8212; pgmpy 0.1.2 documentation</title>
    
    <link rel="stylesheet" href="../../../_static/sphinxdoc.css" type="text/css" />
    <link rel="stylesheet" href="../../../_static/pygments.css" type="text/css" />
    
    <script type="text/javascript">
      var DOCUMENTATION_OPTIONS = {
        URL_ROOT:    '../../../',
        VERSION:     '0.1.2',
        COLLAPSE_INDEX: false,
        FILE_SUFFIX: '.html',
        HAS_SOURCE:  true,
        SOURCELINK_SUFFIX: '.txt'
      };
    </script>
    <script type="text/javascript" src="../../../_static/jquery.js"></script>
    <script type="text/javascript" src="../../../_static/underscore.js"></script>
    <script type="text/javascript" src="../../../_static/doctools.js"></script>
    <link rel="index" title="Index" href="../../../genindex.html" />
    <link rel="search" title="Search" href="../../../search.html" /> 
  </head>
  <body role="document">
    <div class="related" role="navigation" aria-label="related navigation">
      <h3>Navigation</h3>
      <ul>
        <li class="right" style="margin-right: 10px">
          <a href="../../../genindex.html" title="General Index"
             accesskey="I">index</a></li>
        <li class="right" >
          <a href="../../../py-modindex.html" title="Python Module Index"
             >modules</a> |</li>
        <li class="nav-item nav-item-0"><a href="../../../index.html">pgmpy 0.1.2 documentation</a> &#187;</li>
          <li class="nav-item nav-item-1"><a href="../../index.html" accesskey="U">Module code</a> &#187;</li> 
      </ul>
    </div>
      <div class="sphinxsidebar" role="navigation" aria-label="main navigation">
        <div class="sphinxsidebarwrapper">
            <p class="logo"><a href="../../../index.html">
              <img class="logo" src="../../../_static/logo.png" alt="Logo"/>
            </a></p>
<div id="searchbox" style="display: none" role="search">
  <h3>Quick search</h3>
    <form class="search" action="../../../search.html" method="get">
      <div><input type="text" name="q" /></div>
      <div><input type="submit" value="Go" /></div>
      <input type="hidden" name="check_keywords" value="yes" />
      <input type="hidden" name="area" value="default" />
    </form>
</div>
<script type="text/javascript">$('#searchbox').show(0);</script>
        </div>
      </div>

    <div class="document">
      <div class="documentwrapper">
        <div class="bodywrapper">
          <div class="body" role="main">
            
  <h1>Source code for pgmpy.inference.ExactInference</h1><div class="highlight"><pre>
<span></span><span class="ch">#!/usr/bin/env python3</span>
<span class="kn">import</span> <span class="nn">copy</span>
<span class="kn">import</span> <span class="nn">itertools</span>

<span class="kn">import</span> <span class="nn">networkx</span> <span class="k">as</span> <span class="nn">nx</span>
<span class="kn">import</span> <span class="nn">numpy</span> <span class="k">as</span> <span class="nn">np</span>
<span class="kn">from</span> <span class="nn">pgmpy.extern.six.moves</span> <span class="k">import</span> <span class="nb">filter</span><span class="p">,</span> <span class="nb">range</span>

<span class="kn">from</span> <span class="nn">pgmpy.extern.six</span> <span class="k">import</span> <span class="n">string_types</span>
<span class="kn">from</span> <span class="nn">pgmpy.factors</span> <span class="k">import</span> <span class="n">factor_product</span>
<span class="kn">from</span> <span class="nn">pgmpy.inference</span> <span class="k">import</span> <span class="n">Inference</span>
<span class="kn">from</span> <span class="nn">pgmpy.models</span> <span class="k">import</span> <span class="n">JunctionTree</span>
<span class="kn">from</span> <span class="nn">pgmpy.utils</span> <span class="k">import</span> <span class="n">StateNameDecorator</span>


<div class="viewcode-block" id="VariableElimination"><a class="viewcode-back" href="../../../inference.html#pgmpy.inference.ExactInference.VariableElimination">[docs]</a><span class="k">class</span> <span class="nc">VariableElimination</span><span class="p">(</span><span class="n">Inference</span><span class="p">):</span>

    <span class="nd">@StateNameDecorator</span><span class="p">(</span><span class="n">argument</span><span class="o">=</span><span class="s1">&#39;evidence&#39;</span><span class="p">,</span> <span class="n">return_val</span><span class="o">=</span><span class="kc">None</span><span class="p">)</span>
    <span class="k">def</span> <span class="nf">_variable_elimination</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">variables</span><span class="p">,</span> <span class="n">operation</span><span class="p">,</span> <span class="n">evidence</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">elimination_order</span><span class="o">=</span><span class="kc">None</span><span class="p">):</span>
        <span class="sd">&quot;&quot;&quot;</span>
<span class="sd">        Implementation of a generalized variable elimination.</span>

<span class="sd">        Parameters</span>
<span class="sd">        ----------</span>
<span class="sd">        variables: list, array-like</span>
<span class="sd">            variables that are not to be eliminated.</span>
<span class="sd">        operation: str (&#39;marginalize&#39; | &#39;maximize&#39;)</span>
<span class="sd">            The operation to do for eliminating the variable.</span>
<span class="sd">        evidence: dict</span>
<span class="sd">            a dict key, value pair as {var: state_of_var_observed}</span>
<span class="sd">            None if no evidence</span>
<span class="sd">        elimination_order: list, array-like</span>
<span class="sd">            list of variables representing the order in which they</span>
<span class="sd">            are to be eliminated. If None order is computed automatically.</span>
<span class="sd">        &quot;&quot;&quot;</span>
        <span class="k">if</span> <span class="nb">isinstance</span><span class="p">(</span><span class="n">variables</span><span class="p">,</span> <span class="n">string_types</span><span class="p">):</span>
            <span class="k">raise</span> <span class="ne">TypeError</span><span class="p">(</span><span class="s2">&quot;variables must be a list of strings&quot;</span><span class="p">)</span>
        <span class="k">if</span> <span class="nb">isinstance</span><span class="p">(</span><span class="n">evidence</span><span class="p">,</span> <span class="n">string_types</span><span class="p">):</span>
            <span class="k">raise</span> <span class="ne">TypeError</span><span class="p">(</span><span class="s2">&quot;evidence must be a list of strings&quot;</span><span class="p">)</span>

        <span class="c1"># Dealing with the case when variables is not provided.</span>
        <span class="k">if</span> <span class="ow">not</span> <span class="n">variables</span><span class="p">:</span>
            <span class="n">all_factors</span> <span class="o">=</span> <span class="p">[]</span>
            <span class="k">for</span> <span class="n">factor_li</span> <span class="ow">in</span> <span class="bp">self</span><span class="o">.</span><span class="n">factors</span><span class="o">.</span><span class="n">values</span><span class="p">():</span>
                <span class="n">all_factors</span><span class="o">.</span><span class="n">extend</span><span class="p">(</span><span class="n">factor_li</span><span class="p">)</span>
            <span class="k">return</span> <span class="nb">set</span><span class="p">(</span><span class="n">all_factors</span><span class="p">)</span>

        <span class="n">eliminated_variables</span> <span class="o">=</span> <span class="nb">set</span><span class="p">()</span>
        <span class="n">working_factors</span> <span class="o">=</span> <span class="p">{</span><span class="n">node</span><span class="p">:</span> <span class="p">{</span><span class="n">factor</span> <span class="k">for</span> <span class="n">factor</span> <span class="ow">in</span> <span class="bp">self</span><span class="o">.</span><span class="n">factors</span><span class="p">[</span><span class="n">node</span><span class="p">]}</span>
                           <span class="k">for</span> <span class="n">node</span> <span class="ow">in</span> <span class="bp">self</span><span class="o">.</span><span class="n">factors</span><span class="p">}</span>

        <span class="c1"># Dealing with evidence. Reducing factors over it before VE is run.</span>
        <span class="k">if</span> <span class="n">evidence</span><span class="p">:</span>
            <span class="k">for</span> <span class="n">evidence_var</span> <span class="ow">in</span> <span class="n">evidence</span><span class="p">:</span>
                <span class="k">for</span> <span class="n">factor</span> <span class="ow">in</span> <span class="n">working_factors</span><span class="p">[</span><span class="n">evidence_var</span><span class="p">]:</span>
                    <span class="n">factor_reduced</span> <span class="o">=</span> <span class="n">factor</span><span class="o">.</span><span class="n">reduce</span><span class="p">([(</span><span class="n">evidence_var</span><span class="p">,</span> <span class="n">evidence</span><span class="p">[</span><span class="n">evidence_var</span><span class="p">])],</span> <span class="n">inplace</span><span class="o">=</span><span class="kc">False</span><span class="p">)</span>
                    <span class="k">for</span> <span class="n">var</span> <span class="ow">in</span> <span class="n">factor_reduced</span><span class="o">.</span><span class="n">scope</span><span class="p">():</span>
                        <span class="n">working_factors</span><span class="p">[</span><span class="n">var</span><span class="p">]</span><span class="o">.</span><span class="n">remove</span><span class="p">(</span><span class="n">factor</span><span class="p">)</span>
                        <span class="n">working_factors</span><span class="p">[</span><span class="n">var</span><span class="p">]</span><span class="o">.</span><span class="n">add</span><span class="p">(</span><span class="n">factor_reduced</span><span class="p">)</span>
                <span class="k">del</span> <span class="n">working_factors</span><span class="p">[</span><span class="n">evidence_var</span><span class="p">]</span>

        <span class="c1"># TODO: Modify it to find the optimal elimination order</span>
        <span class="k">if</span> <span class="ow">not</span> <span class="n">elimination_order</span><span class="p">:</span>
            <span class="n">elimination_order</span> <span class="o">=</span> <span class="nb">list</span><span class="p">(</span><span class="nb">set</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">variables</span><span class="p">)</span> <span class="o">-</span>
                                     <span class="nb">set</span><span class="p">(</span><span class="n">variables</span><span class="p">)</span> <span class="o">-</span>
                                     <span class="nb">set</span><span class="p">(</span><span class="n">evidence</span><span class="o">.</span><span class="n">keys</span><span class="p">()</span> <span class="k">if</span> <span class="n">evidence</span> <span class="k">else</span> <span class="p">[]))</span>

        <span class="k">elif</span> <span class="nb">any</span><span class="p">(</span><span class="n">var</span> <span class="ow">in</span> <span class="n">elimination_order</span> <span class="k">for</span> <span class="n">var</span> <span class="ow">in</span>
                 <span class="nb">set</span><span class="p">(</span><span class="n">variables</span><span class="p">)</span><span class="o">.</span><span class="n">union</span><span class="p">(</span><span class="nb">set</span><span class="p">(</span><span class="n">evidence</span><span class="o">.</span><span class="n">keys</span><span class="p">()</span> <span class="k">if</span> <span class="n">evidence</span> <span class="k">else</span> <span class="p">[]))):</span>
            <span class="k">raise</span> <span class="ne">ValueError</span><span class="p">(</span><span class="s2">&quot;Elimination order contains variables which are in&quot;</span>
                             <span class="s2">&quot; variables or evidence args&quot;</span><span class="p">)</span>

        <span class="k">for</span> <span class="n">var</span> <span class="ow">in</span> <span class="n">elimination_order</span><span class="p">:</span>
            <span class="c1"># Removing all the factors containing the variables which are</span>
            <span class="c1"># eliminated (as all the factors should be considered only once)</span>
            <span class="n">factors</span> <span class="o">=</span> <span class="p">[</span><span class="n">factor</span> <span class="k">for</span> <span class="n">factor</span> <span class="ow">in</span> <span class="n">working_factors</span><span class="p">[</span><span class="n">var</span><span class="p">]</span>
                       <span class="k">if</span> <span class="ow">not</span> <span class="nb">set</span><span class="p">(</span><span class="n">factor</span><span class="o">.</span><span class="n">variables</span><span class="p">)</span><span class="o">.</span><span class="n">intersection</span><span class="p">(</span><span class="n">eliminated_variables</span><span class="p">)]</span>
            <span class="n">phi</span> <span class="o">=</span> <span class="n">factor_product</span><span class="p">(</span><span class="o">*</span><span class="n">factors</span><span class="p">)</span>
            <span class="n">phi</span> <span class="o">=</span> <span class="nb">getattr</span><span class="p">(</span><span class="n">phi</span><span class="p">,</span> <span class="n">operation</span><span class="p">)([</span><span class="n">var</span><span class="p">],</span> <span class="n">inplace</span><span class="o">=</span><span class="kc">False</span><span class="p">)</span>
            <span class="k">del</span> <span class="n">working_factors</span><span class="p">[</span><span class="n">var</span><span class="p">]</span>
            <span class="k">for</span> <span class="n">variable</span> <span class="ow">in</span> <span class="n">phi</span><span class="o">.</span><span class="n">variables</span><span class="p">:</span>
                <span class="n">working_factors</span><span class="p">[</span><span class="n">variable</span><span class="p">]</span><span class="o">.</span><span class="n">add</span><span class="p">(</span><span class="n">phi</span><span class="p">)</span>
            <span class="n">eliminated_variables</span><span class="o">.</span><span class="n">add</span><span class="p">(</span><span class="n">var</span><span class="p">)</span>

        <span class="n">final_distribution</span> <span class="o">=</span> <span class="nb">set</span><span class="p">()</span>
        <span class="k">for</span> <span class="n">node</span> <span class="ow">in</span> <span class="n">working_factors</span><span class="p">:</span>
            <span class="n">factors</span> <span class="o">=</span> <span class="n">working_factors</span><span class="p">[</span><span class="n">node</span><span class="p">]</span>
            <span class="k">for</span> <span class="n">factor</span> <span class="ow">in</span> <span class="n">factors</span><span class="p">:</span>
                <span class="k">if</span> <span class="ow">not</span> <span class="nb">set</span><span class="p">(</span><span class="n">factor</span><span class="o">.</span><span class="n">variables</span><span class="p">)</span><span class="o">.</span><span class="n">intersection</span><span class="p">(</span><span class="n">eliminated_variables</span><span class="p">):</span>
                    <span class="n">final_distribution</span><span class="o">.</span><span class="n">add</span><span class="p">(</span><span class="n">factor</span><span class="p">)</span>

        <span class="n">query_var_factor</span> <span class="o">=</span> <span class="p">{}</span>
        <span class="k">for</span> <span class="n">query_var</span> <span class="ow">in</span> <span class="n">variables</span><span class="p">:</span>
            <span class="n">phi</span> <span class="o">=</span> <span class="n">factor_product</span><span class="p">(</span><span class="o">*</span><span class="n">final_distribution</span><span class="p">)</span>
            <span class="n">query_var_factor</span><span class="p">[</span><span class="n">query_var</span><span class="p">]</span> <span class="o">=</span> <span class="n">phi</span><span class="o">.</span><span class="n">marginalize</span><span class="p">(</span><span class="nb">list</span><span class="p">(</span><span class="nb">set</span><span class="p">(</span><span class="n">variables</span><span class="p">)</span> <span class="o">-</span>
                                                               <span class="nb">set</span><span class="p">([</span><span class="n">query_var</span><span class="p">])),</span>
                                                          <span class="n">inplace</span><span class="o">=</span><span class="kc">False</span><span class="p">)</span><span class="o">.</span><span class="n">normalize</span><span class="p">(</span><span class="n">inplace</span><span class="o">=</span><span class="kc">False</span><span class="p">)</span>
        <span class="k">return</span> <span class="n">query_var_factor</span>

<div class="viewcode-block" id="VariableElimination.query"><a class="viewcode-back" href="../../../inference.html#pgmpy.inference.ExactInference.VariableElimination.query">[docs]</a>    <span class="k">def</span> <span class="nf">query</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">variables</span><span class="p">,</span> <span class="n">evidence</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">elimination_order</span><span class="o">=</span><span class="kc">None</span><span class="p">):</span>
        <span class="sd">&quot;&quot;&quot;</span>
<span class="sd">        Parameters</span>
<span class="sd">        ----------</span>
<span class="sd">        variables: list</span>
<span class="sd">            list of variables for which you want to compute the probability</span>
<span class="sd">        evidence: dict</span>
<span class="sd">            a dict key, value pair as {var: state_of_var_observed}</span>
<span class="sd">            None if no evidence</span>
<span class="sd">        elimination_order: list</span>
<span class="sd">            order of variable eliminations (if nothing is provided) order is</span>
<span class="sd">            computed automatically</span>

<span class="sd">        Examples</span>
<span class="sd">        --------</span>
<span class="sd">        &gt;&gt;&gt; from pgmpy.inference import VariableElimination</span>
<span class="sd">        &gt;&gt;&gt; from pgmpy.models import BayesianModel</span>
<span class="sd">        &gt;&gt;&gt; import numpy as np</span>
<span class="sd">        &gt;&gt;&gt; import pandas as pd</span>
<span class="sd">        &gt;&gt;&gt; values = pd.DataFrame(np.random.randint(low=0, high=2, size=(1000, 5)),</span>
<span class="sd">        ...                       columns=[&#39;A&#39;, &#39;B&#39;, &#39;C&#39;, &#39;D&#39;, &#39;E&#39;])</span>
<span class="sd">        &gt;&gt;&gt; model = BayesianModel([(&#39;A&#39;, &#39;B&#39;), (&#39;C&#39;, &#39;B&#39;), (&#39;C&#39;, &#39;D&#39;), (&#39;B&#39;, &#39;E&#39;)])</span>
<span class="sd">        &gt;&gt;&gt; model.fit(values)</span>
<span class="sd">        &gt;&gt;&gt; inference = VariableElimination(model)</span>
<span class="sd">        &gt;&gt;&gt; phi_query = inference.query([&#39;A&#39;, &#39;B&#39;])</span>
<span class="sd">        &quot;&quot;&quot;</span>
        <span class="k">return</span> <span class="bp">self</span><span class="o">.</span><span class="n">_variable_elimination</span><span class="p">(</span><span class="n">variables</span><span class="p">,</span> <span class="s1">&#39;marginalize&#39;</span><span class="p">,</span>
                                          <span class="n">evidence</span><span class="o">=</span><span class="n">evidence</span><span class="p">,</span> <span class="n">elimination_order</span><span class="o">=</span><span class="n">elimination_order</span><span class="p">)</span></div>

<div class="viewcode-block" id="VariableElimination.max_marginal"><a class="viewcode-back" href="../../../inference.html#pgmpy.inference.ExactInference.VariableElimination.max_marginal">[docs]</a>    <span class="k">def</span> <span class="nf">max_marginal</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">variables</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">evidence</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">elimination_order</span><span class="o">=</span><span class="kc">None</span><span class="p">):</span>
        <span class="sd">&quot;&quot;&quot;</span>
<span class="sd">        Computes the max-marginal over the variables given the evidence.</span>

<span class="sd">        Parameters</span>
<span class="sd">        ----------</span>
<span class="sd">        variables: list</span>
<span class="sd">            list of variables over which we want to compute the max-marginal.</span>
<span class="sd">        evidence: dict</span>
<span class="sd">            a dict key, value pair as {var: state_of_var_observed}</span>
<span class="sd">            None if no evidence</span>
<span class="sd">        elimination_order: list</span>
<span class="sd">            order of variable eliminations (if nothing is provided) order is</span>
<span class="sd">            computed automatically</span>

<span class="sd">        Examples</span>
<span class="sd">        --------</span>
<span class="sd">        &gt;&gt;&gt; import numpy as np</span>
<span class="sd">        &gt;&gt;&gt; import pandas as pd</span>
<span class="sd">        &gt;&gt;&gt; from pgmpy.models import BayesianModel</span>
<span class="sd">        &gt;&gt;&gt; from pgmpy.inference import VariableElimination</span>
<span class="sd">        &gt;&gt;&gt; values = pd.DataFrame(np.random.randint(low=0, high=2, size=(1000, 5)),</span>
<span class="sd">        ...                       columns=[&#39;A&#39;, &#39;B&#39;, &#39;C&#39;, &#39;D&#39;, &#39;E&#39;])</span>
<span class="sd">        &gt;&gt;&gt; model = BayesianModel([(&#39;A&#39;, &#39;B&#39;), (&#39;C&#39;, &#39;B&#39;), (&#39;C&#39;, &#39;D&#39;), (&#39;B&#39;, &#39;E&#39;)])</span>
<span class="sd">        &gt;&gt;&gt; model.fit(values)</span>
<span class="sd">        &gt;&gt;&gt; inference = VariableElimination(model)</span>
<span class="sd">        &gt;&gt;&gt; phi_query = inference.max_marginal([&#39;A&#39;, &#39;B&#39;])</span>
<span class="sd">        &quot;&quot;&quot;</span>
        <span class="k">if</span> <span class="ow">not</span> <span class="n">variables</span><span class="p">:</span>
            <span class="n">variables</span> <span class="o">=</span> <span class="p">[]</span>
        <span class="n">final_distribution</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">_variable_elimination</span><span class="p">(</span><span class="n">variables</span><span class="p">,</span> <span class="s1">&#39;maximize&#39;</span><span class="p">,</span>
                                                        <span class="n">evidence</span><span class="o">=</span><span class="n">evidence</span><span class="p">,</span>
                                                        <span class="n">elimination_order</span><span class="o">=</span><span class="n">elimination_order</span><span class="p">)</span>

        <span class="c1"># To handle the case when no argument is passed then</span>
        <span class="c1"># _variable_elimination returns a dict.</span>
        <span class="k">if</span> <span class="nb">isinstance</span><span class="p">(</span><span class="n">final_distribution</span><span class="p">,</span> <span class="nb">dict</span><span class="p">):</span>
            <span class="n">final_distribution</span> <span class="o">=</span> <span class="n">final_distribution</span><span class="o">.</span><span class="n">values</span><span class="p">()</span>
        <span class="k">return</span> <span class="n">np</span><span class="o">.</span><span class="n">max</span><span class="p">(</span><span class="n">factor_product</span><span class="p">(</span><span class="o">*</span><span class="n">final_distribution</span><span class="p">)</span><span class="o">.</span><span class="n">values</span><span class="p">)</span></div>

    <span class="nd">@StateNameDecorator</span><span class="p">(</span><span class="n">argument</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">return_val</span><span class="o">=</span><span class="kc">True</span><span class="p">)</span>
<div class="viewcode-block" id="VariableElimination.map_query"><a class="viewcode-back" href="../../../inference.html#pgmpy.inference.ExactInference.VariableElimination.map_query">[docs]</a>    <span class="k">def</span> <span class="nf">map_query</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">variables</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">evidence</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">elimination_order</span><span class="o">=</span><span class="kc">None</span><span class="p">):</span>
        <span class="sd">&quot;&quot;&quot;</span>
<span class="sd">        Computes the MAP Query over the variables given the evidence.</span>

<span class="sd">        Parameters</span>
<span class="sd">        ----------</span>
<span class="sd">        variables: list</span>
<span class="sd">            list of variables over which we want to compute the max-marginal.</span>
<span class="sd">        evidence: dict</span>
<span class="sd">            a dict key, value pair as {var: state_of_var_observed}</span>
<span class="sd">            None if no evidence</span>
<span class="sd">        elimination_order: list</span>
<span class="sd">            order of variable eliminations (if nothing is provided) order is</span>
<span class="sd">            computed automatically</span>

<span class="sd">        Examples</span>
<span class="sd">        --------</span>
<span class="sd">        &gt;&gt;&gt; from pgmpy.inference import VariableElimination</span>
<span class="sd">        &gt;&gt;&gt; from pgmpy.models import BayesianModel</span>
<span class="sd">        &gt;&gt;&gt; import numpy as np</span>
<span class="sd">        &gt;&gt;&gt; import pandas as pd</span>
<span class="sd">        &gt;&gt;&gt; values = pd.DataFrame(np.random.randint(low=0, high=2, size=(1000, 5)),</span>
<span class="sd">        ...                       columns=[&#39;A&#39;, &#39;B&#39;, &#39;C&#39;, &#39;D&#39;, &#39;E&#39;])</span>
<span class="sd">        &gt;&gt;&gt; model = BayesianModel([(&#39;A&#39;, &#39;B&#39;), (&#39;C&#39;, &#39;B&#39;), (&#39;C&#39;, &#39;D&#39;), (&#39;B&#39;, &#39;E&#39;)])</span>
<span class="sd">        &gt;&gt;&gt; model.fit(values)</span>
<span class="sd">        &gt;&gt;&gt; inference = VariableElimination(model)</span>
<span class="sd">        &gt;&gt;&gt; phi_query = inference.map_query([&#39;A&#39;, &#39;B&#39;])</span>
<span class="sd">        &quot;&quot;&quot;</span>
        <span class="n">elimination_variables</span> <span class="o">=</span> <span class="nb">set</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">variables</span><span class="p">)</span> <span class="o">-</span> <span class="nb">set</span><span class="p">(</span><span class="n">evidence</span><span class="o">.</span><span class="n">keys</span><span class="p">())</span> <span class="k">if</span> <span class="n">evidence</span> <span class="k">else</span> <span class="nb">set</span><span class="p">()</span>
        <span class="n">final_distribution</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">_variable_elimination</span><span class="p">(</span><span class="n">elimination_variables</span><span class="p">,</span> <span class="s1">&#39;maximize&#39;</span><span class="p">,</span>
                                                        <span class="n">evidence</span><span class="o">=</span><span class="n">evidence</span><span class="p">,</span>
                                                        <span class="n">elimination_order</span><span class="o">=</span><span class="n">elimination_order</span><span class="p">)</span>
        <span class="c1"># To handle the case when no argument is passed then</span>
        <span class="c1"># _variable_elimination returns a dict.</span>
        <span class="k">if</span> <span class="nb">isinstance</span><span class="p">(</span><span class="n">final_distribution</span><span class="p">,</span> <span class="nb">dict</span><span class="p">):</span>
            <span class="n">final_distribution</span> <span class="o">=</span> <span class="n">final_distribution</span><span class="o">.</span><span class="n">values</span><span class="p">()</span>
        <span class="n">distribution</span> <span class="o">=</span> <span class="n">factor_product</span><span class="p">(</span><span class="o">*</span><span class="n">final_distribution</span><span class="p">)</span>
        <span class="n">argmax</span> <span class="o">=</span> <span class="n">np</span><span class="o">.</span><span class="n">argmax</span><span class="p">(</span><span class="n">distribution</span><span class="o">.</span><span class="n">values</span><span class="p">)</span>
        <span class="n">assignment</span> <span class="o">=</span> <span class="n">distribution</span><span class="o">.</span><span class="n">assignment</span><span class="p">([</span><span class="n">argmax</span><span class="p">])[</span><span class="mi">0</span><span class="p">]</span>

        <span class="n">map_query_results</span> <span class="o">=</span> <span class="p">{}</span>
        <span class="k">for</span> <span class="n">var_assignment</span> <span class="ow">in</span> <span class="n">assignment</span><span class="p">:</span>
            <span class="n">var</span><span class="p">,</span> <span class="n">value</span> <span class="o">=</span> <span class="n">var_assignment</span>
            <span class="n">map_query_results</span><span class="p">[</span><span class="n">var</span><span class="p">]</span> <span class="o">=</span> <span class="n">value</span>

        <span class="k">if</span> <span class="ow">not</span> <span class="n">variables</span><span class="p">:</span>
            <span class="k">return</span> <span class="n">map_query_results</span>
        <span class="k">else</span><span class="p">:</span>
            <span class="n">return_dict</span> <span class="o">=</span> <span class="p">{}</span>
            <span class="k">for</span> <span class="n">var</span> <span class="ow">in</span> <span class="n">variables</span><span class="p">:</span>
                <span class="n">return_dict</span><span class="p">[</span><span class="n">var</span><span class="p">]</span> <span class="o">=</span> <span class="n">map_query_results</span><span class="p">[</span><span class="n">var</span><span class="p">]</span>
            <span class="k">return</span> <span class="n">return_dict</span></div>

<div class="viewcode-block" id="VariableElimination.induced_graph"><a class="viewcode-back" href="../../../inference.html#pgmpy.inference.ExactInference.VariableElimination.induced_graph">[docs]</a>    <span class="k">def</span> <span class="nf">induced_graph</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">elimination_order</span><span class="p">):</span>
        <span class="sd">&quot;&quot;&quot;</span>
<span class="sd">        Returns the induced graph formed by running Variable Elimination on the network.</span>

<span class="sd">        Parameters</span>
<span class="sd">        ----------</span>
<span class="sd">        elimination_order: list, array like</span>
<span class="sd">            List of variables in the order in which they are to be eliminated.</span>

<span class="sd">        Examples</span>
<span class="sd">        --------</span>
<span class="sd">        &gt;&gt;&gt; import numpy as np</span>
<span class="sd">        &gt;&gt;&gt; import pandas as pd</span>
<span class="sd">        &gt;&gt;&gt; from pgmpy.models import BayesianModel</span>
<span class="sd">        &gt;&gt;&gt; from pgmpy.inference import VariableElimination</span>
<span class="sd">        &gt;&gt;&gt; values = pd.DataFrame(np.random.randint(low=0, high=2, size=(1000, 5)),</span>
<span class="sd">        ...                       columns=[&#39;A&#39;, &#39;B&#39;, &#39;C&#39;, &#39;D&#39;, &#39;E&#39;])</span>
<span class="sd">        &gt;&gt;&gt; model = BayesianModel([(&#39;A&#39;, &#39;B&#39;), (&#39;C&#39;, &#39;B&#39;), (&#39;C&#39;, &#39;D&#39;), (&#39;B&#39;, &#39;E&#39;)])</span>
<span class="sd">        &gt;&gt;&gt; model.fit(values)</span>
<span class="sd">        &gt;&gt;&gt; inference = VariableElimination(model)</span>
<span class="sd">        &gt;&gt;&gt; inference.induced_graph([&#39;C&#39;, &#39;D&#39;, &#39;A&#39;, &#39;B&#39;, &#39;E&#39;])</span>
<span class="sd">        &lt;networkx.classes.graph.Graph at 0x7f34ac8c5160&gt;</span>
<span class="sd">        &quot;&quot;&quot;</span>
        <span class="c1"># If the elimination order does not contain the same variables as the model</span>
        <span class="k">if</span> <span class="nb">set</span><span class="p">(</span><span class="n">elimination_order</span><span class="p">)</span> <span class="o">!=</span> <span class="nb">set</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">variables</span><span class="p">):</span>
            <span class="k">raise</span> <span class="ne">ValueError</span><span class="p">(</span><span class="s2">&quot;Set of variables in elimination order&quot;</span>
                             <span class="s2">&quot; different from variables in model&quot;</span><span class="p">)</span>

        <span class="n">eliminated_variables</span> <span class="o">=</span> <span class="nb">set</span><span class="p">()</span>
        <span class="n">working_factors</span> <span class="o">=</span> <span class="p">{</span><span class="n">node</span><span class="p">:</span> <span class="p">[</span><span class="n">factor</span><span class="o">.</span><span class="n">scope</span><span class="p">()</span> <span class="k">for</span> <span class="n">factor</span> <span class="ow">in</span> <span class="bp">self</span><span class="o">.</span><span class="n">factors</span><span class="p">[</span><span class="n">node</span><span class="p">]]</span>
                           <span class="k">for</span> <span class="n">node</span> <span class="ow">in</span> <span class="bp">self</span><span class="o">.</span><span class="n">factors</span><span class="p">}</span>

        <span class="c1"># The set of cliques that should be in the induced graph</span>
        <span class="n">cliques</span> <span class="o">=</span> <span class="nb">set</span><span class="p">()</span>
        <span class="k">for</span> <span class="n">factors</span> <span class="ow">in</span> <span class="n">working_factors</span><span class="o">.</span><span class="n">values</span><span class="p">():</span>
            <span class="k">for</span> <span class="n">factor</span> <span class="ow">in</span> <span class="n">factors</span><span class="p">:</span>
                <span class="n">cliques</span><span class="o">.</span><span class="n">add</span><span class="p">(</span><span class="nb">tuple</span><span class="p">(</span><span class="n">factor</span><span class="p">))</span>

        <span class="c1"># Removing all the factors containing the variables which are</span>
        <span class="c1"># eliminated (as all the factors should be considered only once)</span>
        <span class="k">for</span> <span class="n">var</span> <span class="ow">in</span> <span class="n">elimination_order</span><span class="p">:</span>
            <span class="n">factors</span> <span class="o">=</span> <span class="p">[</span><span class="n">factor</span> <span class="k">for</span> <span class="n">factor</span> <span class="ow">in</span> <span class="n">working_factors</span><span class="p">[</span><span class="n">var</span><span class="p">]</span>
                       <span class="k">if</span> <span class="ow">not</span> <span class="nb">set</span><span class="p">(</span><span class="n">factor</span><span class="p">)</span><span class="o">.</span><span class="n">intersection</span><span class="p">(</span><span class="n">eliminated_variables</span><span class="p">)]</span>
            <span class="n">phi</span> <span class="o">=</span> <span class="nb">set</span><span class="p">(</span><span class="n">itertools</span><span class="o">.</span><span class="n">chain</span><span class="p">(</span><span class="o">*</span><span class="n">factors</span><span class="p">))</span><span class="o">.</span><span class="n">difference</span><span class="p">({</span><span class="n">var</span><span class="p">})</span>
            <span class="n">cliques</span><span class="o">.</span><span class="n">add</span><span class="p">(</span><span class="nb">tuple</span><span class="p">(</span><span class="n">phi</span><span class="p">))</span>
            <span class="k">del</span> <span class="n">working_factors</span><span class="p">[</span><span class="n">var</span><span class="p">]</span>
            <span class="k">for</span> <span class="n">variable</span> <span class="ow">in</span> <span class="n">phi</span><span class="p">:</span>
                <span class="n">working_factors</span><span class="p">[</span><span class="n">variable</span><span class="p">]</span><span class="o">.</span><span class="n">append</span><span class="p">(</span><span class="nb">list</span><span class="p">(</span><span class="n">phi</span><span class="p">))</span>
            <span class="n">eliminated_variables</span><span class="o">.</span><span class="n">add</span><span class="p">(</span><span class="n">var</span><span class="p">)</span>

        <span class="n">edges_comb</span> <span class="o">=</span> <span class="p">[</span><span class="n">itertools</span><span class="o">.</span><span class="n">combinations</span><span class="p">(</span><span class="n">c</span><span class="p">,</span> <span class="mi">2</span><span class="p">)</span>
                      <span class="k">for</span> <span class="n">c</span> <span class="ow">in</span> <span class="nb">filter</span><span class="p">(</span><span class="k">lambda</span> <span class="n">x</span><span class="p">:</span> <span class="nb">len</span><span class="p">(</span><span class="n">x</span><span class="p">)</span> <span class="o">&gt;</span> <span class="mi">1</span><span class="p">,</span> <span class="n">cliques</span><span class="p">)]</span>
        <span class="k">return</span> <span class="n">nx</span><span class="o">.</span><span class="n">Graph</span><span class="p">(</span><span class="n">itertools</span><span class="o">.</span><span class="n">chain</span><span class="p">(</span><span class="o">*</span><span class="n">edges_comb</span><span class="p">))</span></div>

<div class="viewcode-block" id="VariableElimination.induced_width"><a class="viewcode-back" href="../../../inference.html#pgmpy.inference.ExactInference.VariableElimination.induced_width">[docs]</a>    <span class="k">def</span> <span class="nf">induced_width</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">elimination_order</span><span class="p">):</span>
        <span class="sd">&quot;&quot;&quot;</span>
<span class="sd">        Returns the width (integer) of the induced graph formed by running Variable Elimination on the network.</span>
<span class="sd">        The width is the defined as the number of nodes in the largest clique in the graph minus 1.</span>

<span class="sd">        Parameters</span>
<span class="sd">        ----------</span>
<span class="sd">        elimination_order: list, array like</span>
<span class="sd">            List of variables in the order in which they are to be eliminated.</span>

<span class="sd">        Examples</span>
<span class="sd">        --------</span>
<span class="sd">        &gt;&gt;&gt; import numpy as np</span>
<span class="sd">        &gt;&gt;&gt; import pandas as pd</span>
<span class="sd">        &gt;&gt;&gt; from pgmpy.models import BayesianModel</span>
<span class="sd">        &gt;&gt;&gt; from pgmpy.inference import VariableElimination</span>
<span class="sd">        &gt;&gt;&gt; values = pd.DataFrame(np.random.randint(low=0, high=2, size=(1000, 5)),</span>
<span class="sd">        ...                       columns=[&#39;A&#39;, &#39;B&#39;, &#39;C&#39;, &#39;D&#39;, &#39;E&#39;])</span>
<span class="sd">        &gt;&gt;&gt; model = BayesianModel([(&#39;A&#39;, &#39;B&#39;), (&#39;C&#39;, &#39;B&#39;), (&#39;C&#39;, &#39;D&#39;), (&#39;B&#39;, &#39;E&#39;)])</span>
<span class="sd">        &gt;&gt;&gt; model.fit(values)</span>
<span class="sd">        &gt;&gt;&gt; inference = VariableElimination(model)</span>
<span class="sd">        &gt;&gt;&gt; inference.induced_width([&#39;C&#39;, &#39;D&#39;, &#39;A&#39;, &#39;B&#39;, &#39;E&#39;])</span>
<span class="sd">        3</span>
<span class="sd">        &quot;&quot;&quot;</span>
        <span class="n">induced_graph</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">induced_graph</span><span class="p">(</span><span class="n">elimination_order</span><span class="p">)</span>
        <span class="k">return</span> <span class="n">nx</span><span class="o">.</span><span class="n">graph_clique_number</span><span class="p">(</span><span class="n">induced_graph</span><span class="p">)</span> <span class="o">-</span> <span class="mi">1</span></div></div>


<div class="viewcode-block" id="BeliefPropagation"><a class="viewcode-back" href="../../../inference.html#pgmpy.inference.ExactInference.BeliefPropagation">[docs]</a><span class="k">class</span> <span class="nc">BeliefPropagation</span><span class="p">(</span><span class="n">Inference</span><span class="p">):</span>
    <span class="sd">&quot;&quot;&quot;</span>
<span class="sd">    Class for performing inference using Belief Propagation method.</span>

<span class="sd">    Creates a Junction Tree or Clique Tree (JunctionTree class) for the input</span>
<span class="sd">    probabilistic graphical model and performs calibration of the junction tree</span>
<span class="sd">    so formed using belief propagation.</span>

<span class="sd">    Parameters</span>
<span class="sd">    ----------</span>
<span class="sd">    model: BayesianModel, MarkovModel, FactorGraph, JunctionTree</span>
<span class="sd">        model for which inference is to performed</span>
<span class="sd">    &quot;&quot;&quot;</span>

    <span class="k">def</span> <span class="nf">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">model</span><span class="p">):</span>
        <span class="nb">super</span><span class="p">(</span><span class="n">BeliefPropagation</span><span class="p">,</span> <span class="bp">self</span><span class="p">)</span><span class="o">.</span><span class="n">__init__</span><span class="p">(</span><span class="n">model</span><span class="p">)</span>

        <span class="k">if</span> <span class="ow">not</span> <span class="nb">isinstance</span><span class="p">(</span><span class="n">model</span><span class="p">,</span> <span class="n">JunctionTree</span><span class="p">):</span>
            <span class="bp">self</span><span class="o">.</span><span class="n">junction_tree</span> <span class="o">=</span> <span class="n">model</span><span class="o">.</span><span class="n">to_junction_tree</span><span class="p">()</span>
        <span class="k">else</span><span class="p">:</span>
            <span class="bp">self</span><span class="o">.</span><span class="n">junction_tree</span> <span class="o">=</span> <span class="n">copy</span><span class="o">.</span><span class="n">deepcopy</span><span class="p">(</span><span class="n">model</span><span class="p">)</span>

        <span class="bp">self</span><span class="o">.</span><span class="n">clique_beliefs</span> <span class="o">=</span> <span class="p">{}</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">sepset_beliefs</span> <span class="o">=</span> <span class="p">{}</span>

<div class="viewcode-block" id="BeliefPropagation.get_cliques"><a class="viewcode-back" href="../../../inference.html#pgmpy.inference.ExactInference.BeliefPropagation.get_cliques">[docs]</a>    <span class="k">def</span> <span class="nf">get_cliques</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
        <span class="sd">&quot;&quot;&quot;</span>
<span class="sd">        Returns cliques used for belief propagation.</span>
<span class="sd">        &quot;&quot;&quot;</span>
        <span class="k">return</span> <span class="bp">self</span><span class="o">.</span><span class="n">junction_tree</span><span class="o">.</span><span class="n">nodes</span><span class="p">()</span></div>

<div class="viewcode-block" id="BeliefPropagation.get_clique_beliefs"><a class="viewcode-back" href="../../../inference.html#pgmpy.inference.ExactInference.BeliefPropagation.get_clique_beliefs">[docs]</a>    <span class="k">def</span> <span class="nf">get_clique_beliefs</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
        <span class="sd">&quot;&quot;&quot;</span>
<span class="sd">        Returns clique beliefs. Should be called after the clique tree (or</span>
<span class="sd">        junction tree) is calibrated.</span>
<span class="sd">        &quot;&quot;&quot;</span>
        <span class="k">return</span> <span class="bp">self</span><span class="o">.</span><span class="n">clique_beliefs</span></div>

<div class="viewcode-block" id="BeliefPropagation.get_sepset_beliefs"><a class="viewcode-back" href="../../../inference.html#pgmpy.inference.ExactInference.BeliefPropagation.get_sepset_beliefs">[docs]</a>    <span class="k">def</span> <span class="nf">get_sepset_beliefs</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
        <span class="sd">&quot;&quot;&quot;</span>
<span class="sd">        Returns sepset beliefs. Should be called after clique tree (or junction</span>
<span class="sd">        tree) is calibrated.</span>
<span class="sd">        &quot;&quot;&quot;</span>
        <span class="k">return</span> <span class="bp">self</span><span class="o">.</span><span class="n">sepset_beliefs</span></div>

    <span class="k">def</span> <span class="nf">_update_beliefs</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">sending_clique</span><span class="p">,</span> <span class="n">recieving_clique</span><span class="p">,</span> <span class="n">operation</span><span class="p">):</span>
        <span class="sd">&quot;&quot;&quot;</span>
<span class="sd">        This is belief-update method.</span>

<span class="sd">        Parameters</span>
<span class="sd">        ----------</span>
<span class="sd">        sending_clique: node (as the operation is on junction tree, node should be a tuple)</span>
<span class="sd">            Node sending the message</span>
<span class="sd">        recieving_clique: node (as the operation is on junction tree, node should be a tuple)</span>
<span class="sd">            Node recieving the message</span>
<span class="sd">        operation: str (&#39;marginalize&#39; | &#39;maximize&#39;)</span>
<span class="sd">            The operation to do for passing messages between nodes.</span>

<span class="sd">        Takes belief of one clique and uses it to update the belief of the</span>
<span class="sd">        neighboring ones.</span>
<span class="sd">        &quot;&quot;&quot;</span>
        <span class="n">sepset</span> <span class="o">=</span> <span class="nb">frozenset</span><span class="p">(</span><span class="n">sending_clique</span><span class="p">)</span><span class="o">.</span><span class="n">intersection</span><span class="p">(</span><span class="nb">frozenset</span><span class="p">(</span><span class="n">recieving_clique</span><span class="p">))</span>
        <span class="n">sepset_key</span> <span class="o">=</span> <span class="nb">frozenset</span><span class="p">((</span><span class="n">sending_clique</span><span class="p">,</span> <span class="n">recieving_clique</span><span class="p">))</span>

        <span class="c1"># \sigma_{i \rightarrow j} = \sum_{C_i - S_{i, j}} \beta_i</span>
        <span class="c1"># marginalize the clique over the sepset</span>
        <span class="n">sigma</span> <span class="o">=</span> <span class="nb">getattr</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">clique_beliefs</span><span class="p">[</span><span class="n">sending_clique</span><span class="p">],</span> <span class="n">operation</span><span class="p">)(</span><span class="nb">list</span><span class="p">(</span><span class="nb">frozenset</span><span class="p">(</span><span class="n">sending_clique</span><span class="p">)</span> <span class="o">-</span> <span class="n">sepset</span><span class="p">),</span>
                                                                        <span class="n">inplace</span><span class="o">=</span><span class="kc">False</span><span class="p">)</span>

        <span class="c1"># \beta_j = \beta_j * \frac{\sigma_{i \rightarrow j}}{\mu_{i, j}}</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">clique_beliefs</span><span class="p">[</span><span class="n">recieving_clique</span><span class="p">]</span> <span class="o">*=</span> <span class="p">(</span><span class="n">sigma</span> <span class="o">/</span> <span class="bp">self</span><span class="o">.</span><span class="n">sepset_beliefs</span><span class="p">[</span><span class="n">sepset_key</span><span class="p">]</span>
                                                  <span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">sepset_beliefs</span><span class="p">[</span><span class="n">sepset_key</span><span class="p">]</span> <span class="k">else</span> <span class="n">sigma</span><span class="p">)</span>

        <span class="c1"># \mu_{i, j} = \sigma_{i \rightarrow j}</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">sepset_beliefs</span><span class="p">[</span><span class="n">sepset_key</span><span class="p">]</span> <span class="o">=</span> <span class="n">sigma</span>

    <span class="k">def</span> <span class="nf">_is_converged</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">operation</span><span class="p">):</span>
        <span class="sd">&quot;&quot;&quot;</span>
<span class="sd">        Checks whether the calibration has converged or not. At convergence</span>
<span class="sd">        the sepset belief would be precisely the sepset marginal.</span>

<span class="sd">        Parameters</span>
<span class="sd">        ----------</span>
<span class="sd">        operation: str (&#39;marginalize&#39; | &#39;maximize&#39;)</span>
<span class="sd">            The operation to do for passing messages between nodes.</span>
<span class="sd">            if operation == marginalize, it checks whether the junction tree is calibrated or not</span>
<span class="sd">            else if operation == maximize, it checks whether the juction tree is max calibrated or not</span>

<span class="sd">        Formally, at convergence or at calibration this condition would be satisified for</span>

<span class="sd">        .. math:: \sum_{C_i - S_{i, j}} \beta_i = \sum_{C_j - S_{i, j}} \beta_j = \mu_{i, j}</span>

<span class="sd">        and at max calibration this condition would be satisfied</span>

<span class="sd">        .. math:: \max_{C_i - S_{i, j}} \beta_i = \max_{C_j - S_{i, j}} \beta_j = \mu_{i, j}</span>
<span class="sd">        &quot;&quot;&quot;</span>
        <span class="c1"># If no clique belief, then the clique tree is not calibrated</span>
        <span class="k">if</span> <span class="ow">not</span> <span class="bp">self</span><span class="o">.</span><span class="n">clique_beliefs</span><span class="p">:</span>
            <span class="k">return</span> <span class="kc">False</span>

        <span class="k">for</span> <span class="n">edge</span> <span class="ow">in</span> <span class="bp">self</span><span class="o">.</span><span class="n">junction_tree</span><span class="o">.</span><span class="n">edges</span><span class="p">():</span>
            <span class="n">sepset</span> <span class="o">=</span> <span class="nb">frozenset</span><span class="p">(</span><span class="n">edge</span><span class="p">[</span><span class="mi">0</span><span class="p">])</span><span class="o">.</span><span class="n">intersection</span><span class="p">(</span><span class="nb">frozenset</span><span class="p">(</span><span class="n">edge</span><span class="p">[</span><span class="mi">1</span><span class="p">]))</span>
            <span class="n">sepset_key</span> <span class="o">=</span> <span class="nb">frozenset</span><span class="p">(</span><span class="n">edge</span><span class="p">)</span>
            <span class="k">if</span> <span class="p">(</span><span class="n">edge</span><span class="p">[</span><span class="mi">0</span><span class="p">]</span> <span class="ow">not</span> <span class="ow">in</span> <span class="bp">self</span><span class="o">.</span><span class="n">clique_beliefs</span> <span class="ow">or</span> <span class="n">edge</span><span class="p">[</span><span class="mi">1</span><span class="p">]</span> <span class="ow">not</span> <span class="ow">in</span> <span class="bp">self</span><span class="o">.</span><span class="n">clique_beliefs</span> <span class="ow">or</span>
                    <span class="n">sepset_key</span> <span class="ow">not</span> <span class="ow">in</span> <span class="bp">self</span><span class="o">.</span><span class="n">sepset_beliefs</span><span class="p">):</span>
                <span class="k">return</span> <span class="kc">False</span>

            <span class="n">marginal_1</span> <span class="o">=</span> <span class="nb">getattr</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">clique_beliefs</span><span class="p">[</span><span class="n">edge</span><span class="p">[</span><span class="mi">0</span><span class="p">]],</span> <span class="n">operation</span><span class="p">)(</span><span class="nb">list</span><span class="p">(</span><span class="nb">frozenset</span><span class="p">(</span><span class="n">edge</span><span class="p">[</span><span class="mi">0</span><span class="p">])</span> <span class="o">-</span> <span class="n">sepset</span><span class="p">),</span>
                                                                          <span class="n">inplace</span><span class="o">=</span><span class="kc">False</span><span class="p">)</span>
            <span class="n">marginal_2</span> <span class="o">=</span> <span class="nb">getattr</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">clique_beliefs</span><span class="p">[</span><span class="n">edge</span><span class="p">[</span><span class="mi">1</span><span class="p">]],</span> <span class="n">operation</span><span class="p">)(</span><span class="nb">list</span><span class="p">(</span><span class="nb">frozenset</span><span class="p">(</span><span class="n">edge</span><span class="p">[</span><span class="mi">1</span><span class="p">])</span> <span class="o">-</span> <span class="n">sepset</span><span class="p">),</span>
                                                                          <span class="n">inplace</span><span class="o">=</span><span class="kc">False</span><span class="p">)</span>
            <span class="k">if</span> <span class="n">marginal_1</span> <span class="o">!=</span> <span class="n">marginal_2</span> <span class="ow">or</span> <span class="n">marginal_1</span> <span class="o">!=</span> <span class="bp">self</span><span class="o">.</span><span class="n">sepset_beliefs</span><span class="p">[</span><span class="n">sepset_key</span><span class="p">]:</span>
                <span class="k">return</span> <span class="kc">False</span>
        <span class="k">return</span> <span class="kc">True</span>

    <span class="k">def</span> <span class="nf">_calibrate_junction_tree</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">operation</span><span class="p">):</span>
        <span class="sd">&quot;&quot;&quot;</span>
<span class="sd">        Generalized calibration of junction tree or clique using belief propagation. This method can be used for both</span>
<span class="sd">        calibrating as well as max-calibrating.</span>
<span class="sd">        Uses Lauritzen-Spiegelhalter algorithm or belief-update message passing.</span>

<span class="sd">        Parameters</span>
<span class="sd">        ----------</span>
<span class="sd">        operation: str (&#39;marginalize&#39; | &#39;maximize&#39;)</span>
<span class="sd">            The operation to do for passing messages between nodes.</span>

<span class="sd">        Reference</span>
<span class="sd">        ---------</span>
<span class="sd">        Algorithm 10.3 Calibration using belief propagation in clique tree</span>
<span class="sd">        Probabilistic Graphical Models: Principles and Techniques</span>
<span class="sd">        Daphne Koller and Nir Friedman.</span>
<span class="sd">        &quot;&quot;&quot;</span>
        <span class="c1"># Initialize clique beliefs as well as sepset beliefs</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">clique_beliefs</span> <span class="o">=</span> <span class="p">{</span><span class="n">clique</span><span class="p">:</span> <span class="bp">self</span><span class="o">.</span><span class="n">junction_tree</span><span class="o">.</span><span class="n">get_factors</span><span class="p">(</span><span class="n">clique</span><span class="p">)</span>
                               <span class="k">for</span> <span class="n">clique</span> <span class="ow">in</span> <span class="bp">self</span><span class="o">.</span><span class="n">junction_tree</span><span class="o">.</span><span class="n">nodes</span><span class="p">()}</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">sepset_beliefs</span> <span class="o">=</span> <span class="p">{</span><span class="nb">frozenset</span><span class="p">(</span><span class="n">edge</span><span class="p">):</span> <span class="kc">None</span> <span class="k">for</span> <span class="n">edge</span> <span class="ow">in</span> <span class="bp">self</span><span class="o">.</span><span class="n">junction_tree</span><span class="o">.</span><span class="n">edges</span><span class="p">()}</span>

        <span class="k">for</span> <span class="n">clique</span> <span class="ow">in</span> <span class="bp">self</span><span class="o">.</span><span class="n">junction_tree</span><span class="o">.</span><span class="n">nodes</span><span class="p">():</span>
            <span class="k">if</span> <span class="ow">not</span> <span class="bp">self</span><span class="o">.</span><span class="n">_is_converged</span><span class="p">(</span><span class="n">operation</span><span class="o">=</span><span class="n">operation</span><span class="p">):</span>
                <span class="n">neighbors</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">junction_tree</span><span class="o">.</span><span class="n">neighbors</span><span class="p">(</span><span class="n">clique</span><span class="p">)</span>
                <span class="c1"># update root&#39;s belief using nieighbor clique&#39;s beliefs</span>
                <span class="c1"># upward pass</span>
                <span class="k">for</span> <span class="n">neighbor_clique</span> <span class="ow">in</span> <span class="n">neighbors</span><span class="p">:</span>
                    <span class="bp">self</span><span class="o">.</span><span class="n">_update_beliefs</span><span class="p">(</span><span class="n">neighbor_clique</span><span class="p">,</span> <span class="n">clique</span><span class="p">,</span> <span class="n">operation</span><span class="o">=</span><span class="n">operation</span><span class="p">)</span>
                <span class="n">bfs_edges</span> <span class="o">=</span> <span class="n">nx</span><span class="o">.</span><span class="n">algorithms</span><span class="o">.</span><span class="n">breadth_first_search</span><span class="o">.</span><span class="n">bfs_edges</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">junction_tree</span><span class="p">,</span> <span class="n">clique</span><span class="p">)</span>
                <span class="c1"># update the beliefs of all the nodes starting from the root to leaves using root&#39;s belief</span>
                <span class="c1"># downward pass</span>
                <span class="k">for</span> <span class="n">edge</span> <span class="ow">in</span> <span class="n">bfs_edges</span><span class="p">:</span>
                    <span class="bp">self</span><span class="o">.</span><span class="n">_update_beliefs</span><span class="p">(</span><span class="n">edge</span><span class="p">[</span><span class="mi">0</span><span class="p">],</span> <span class="n">edge</span><span class="p">[</span><span class="mi">1</span><span class="p">],</span> <span class="n">operation</span><span class="o">=</span><span class="n">operation</span><span class="p">)</span>
            <span class="k">else</span><span class="p">:</span>
                <span class="k">break</span>

<div class="viewcode-block" id="BeliefPropagation.calibrate"><a class="viewcode-back" href="../../../inference.html#pgmpy.inference.ExactInference.BeliefPropagation.calibrate">[docs]</a>    <span class="k">def</span> <span class="nf">calibrate</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
        <span class="sd">&quot;&quot;&quot;</span>
<span class="sd">        Calibration using belief propagation in junction tree or clique tree.</span>

<span class="sd">        Examples</span>
<span class="sd">        --------</span>
<span class="sd">        &gt;&gt;&gt; from pgmpy.models import BayesianModel</span>
<span class="sd">        &gt;&gt;&gt; from pgmpy.factors.discrete import TabularCPD</span>
<span class="sd">        &gt;&gt;&gt; from pgmpy.inference import BeliefPropagation</span>
<span class="sd">        &gt;&gt;&gt; G = BayesianModel([(&#39;diff&#39;, &#39;grade&#39;), (&#39;intel&#39;, &#39;grade&#39;),</span>
<span class="sd">        ...                    (&#39;intel&#39;, &#39;SAT&#39;), (&#39;grade&#39;, &#39;letter&#39;)])</span>
<span class="sd">        &gt;&gt;&gt; diff_cpd = TabularCPD(&#39;diff&#39;, 2, [[0.2], [0.8]])</span>
<span class="sd">        &gt;&gt;&gt; intel_cpd = TabularCPD(&#39;intel&#39;, 3, [[0.5], [0.3], [0.2]])</span>
<span class="sd">        &gt;&gt;&gt; grade_cpd = TabularCPD(&#39;grade&#39;, 3,</span>
<span class="sd">        ...                        [[0.1, 0.1, 0.1, 0.1, 0.1, 0.1],</span>
<span class="sd">        ...                         [0.1, 0.1, 0.1, 0.1, 0.1, 0.1],</span>
<span class="sd">        ...                         [0.8, 0.8, 0.8, 0.8, 0.8, 0.8]],</span>
<span class="sd">        ...                        evidence=[&#39;diff&#39;, &#39;intel&#39;],</span>
<span class="sd">        ...                        evidence_card=[2, 3])</span>
<span class="sd">        &gt;&gt;&gt; sat_cpd = TabularCPD(&#39;SAT&#39;, 2,</span>
<span class="sd">        ...                      [[0.1, 0.2, 0.7],</span>
<span class="sd">        ...                       [0.9, 0.8, 0.3]],</span>
<span class="sd">        ...                      evidence=[&#39;intel&#39;], evidence_card=[3])</span>
<span class="sd">        &gt;&gt;&gt; letter_cpd = TabularCPD(&#39;letter&#39;, 2,</span>
<span class="sd">        ...                         [[0.1, 0.4, 0.8],</span>
<span class="sd">        ...                          [0.9, 0.6, 0.2]],</span>
<span class="sd">        ...                         evidence=[&#39;grade&#39;], evidence_card=[3])</span>
<span class="sd">        &gt;&gt;&gt; G.add_cpds(diff_cpd, intel_cpd, grade_cpd, sat_cpd, letter_cpd)</span>
<span class="sd">        &gt;&gt;&gt; bp = BeliefPropagation(G)</span>
<span class="sd">        &gt;&gt;&gt; bp.calibrate()</span>
<span class="sd">        &quot;&quot;&quot;</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">_calibrate_junction_tree</span><span class="p">(</span><span class="n">operation</span><span class="o">=</span><span class="s1">&#39;marginalize&#39;</span><span class="p">)</span></div>

<div class="viewcode-block" id="BeliefPropagation.max_calibrate"><a class="viewcode-back" href="../../../inference.html#pgmpy.inference.ExactInference.BeliefPropagation.max_calibrate">[docs]</a>    <span class="k">def</span> <span class="nf">max_calibrate</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
        <span class="sd">&quot;&quot;&quot;</span>
<span class="sd">        Max-calibration of the junction tree using belief propagation.</span>

<span class="sd">        Examples</span>
<span class="sd">        --------</span>
<span class="sd">        &gt;&gt;&gt; from pgmpy.models import BayesianModel</span>
<span class="sd">        &gt;&gt;&gt; from pgmpy.factors.discrete import TabularCPD</span>
<span class="sd">        &gt;&gt;&gt; from pgmpy.inference import BeliefPropagation</span>
<span class="sd">        &gt;&gt;&gt; G = BayesianModel([(&#39;diff&#39;, &#39;grade&#39;), (&#39;intel&#39;, &#39;grade&#39;),</span>
<span class="sd">        ...                    (&#39;intel&#39;, &#39;SAT&#39;), (&#39;grade&#39;, &#39;letter&#39;)])</span>
<span class="sd">        &gt;&gt;&gt; diff_cpd = TabularCPD(&#39;diff&#39;, 2, [[0.2], [0.8]])</span>
<span class="sd">        &gt;&gt;&gt; intel_cpd = TabularCPD(&#39;intel&#39;, 3, [[0.5], [0.3], [0.2]])</span>
<span class="sd">        &gt;&gt;&gt; grade_cpd = TabularCPD(&#39;grade&#39;, 3,</span>
<span class="sd">        ...                        [[0.1, 0.1, 0.1, 0.1, 0.1, 0.1],</span>
<span class="sd">        ...                         [0.1, 0.1, 0.1, 0.1, 0.1, 0.1],</span>
<span class="sd">        ...                         [0.8, 0.8, 0.8, 0.8, 0.8, 0.8]],</span>
<span class="sd">        ...                        evidence=[&#39;diff&#39;, &#39;intel&#39;],</span>
<span class="sd">        ...                        evidence_card=[2, 3])</span>
<span class="sd">        &gt;&gt;&gt; sat_cpd = TabularCPD(&#39;SAT&#39;, 2,</span>
<span class="sd">        ...                      [[0.1, 0.2, 0.7],</span>
<span class="sd">        ...                       [0.9, 0.8, 0.3]],</span>
<span class="sd">        ...                      evidence=[&#39;intel&#39;], evidence_card=[3])</span>
<span class="sd">        &gt;&gt;&gt; letter_cpd = TabularCPD(&#39;letter&#39;, 2,</span>
<span class="sd">        ...                         [[0.1, 0.4, 0.8],</span>
<span class="sd">        ...                          [0.9, 0.6, 0.2]],</span>
<span class="sd">        ...                         evidence=[&#39;grade&#39;], evidence_card=[3])</span>
<span class="sd">        &gt;&gt;&gt; G.add_cpds(diff_cpd, intel_cpd, grade_cpd, sat_cpd, letter_cpd)</span>
<span class="sd">        &gt;&gt;&gt; bp = BeliefPropagation(G)</span>
<span class="sd">        &gt;&gt;&gt; bp.max_calibrate()</span>
<span class="sd">        &quot;&quot;&quot;</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">_calibrate_junction_tree</span><span class="p">(</span><span class="n">operation</span><span class="o">=</span><span class="s1">&#39;maximize&#39;</span><span class="p">)</span></div>

    <span class="k">def</span> <span class="nf">_query</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">variables</span><span class="p">,</span> <span class="n">operation</span><span class="p">,</span> <span class="n">evidence</span><span class="o">=</span><span class="kc">None</span><span class="p">):</span>
        <span class="sd">&quot;&quot;&quot;</span>
<span class="sd">        This is a generalized query method that can be used for both query and map query.</span>

<span class="sd">        Parameters</span>
<span class="sd">        ----------</span>
<span class="sd">        variables: list</span>
<span class="sd">            list of variables for which you want to compute the probability</span>
<span class="sd">        operation: str (&#39;marginalize&#39; | &#39;maximize&#39;)</span>
<span class="sd">            The operation to do for passing messages between nodes.</span>
<span class="sd">        evidence: dict</span>
<span class="sd">            a dict key, value pair as {var: state_of_var_observed}</span>
<span class="sd">            None if no evidence</span>

<span class="sd">        Examples</span>
<span class="sd">        --------</span>
<span class="sd">        &gt;&gt;&gt; from pgmpy.inference import BeliefPropagation</span>
<span class="sd">        &gt;&gt;&gt; from pgmpy.models import BayesianModel</span>
<span class="sd">        &gt;&gt;&gt; import numpy as np</span>
<span class="sd">        &gt;&gt;&gt; import pandas as pd</span>
<span class="sd">        &gt;&gt;&gt; values = pd.DataFrame(np.random.randint(low=0, high=2, size=(1000, 5)),</span>
<span class="sd">        ...                       columns=[&#39;A&#39;, &#39;B&#39;, &#39;C&#39;, &#39;D&#39;, &#39;E&#39;])</span>
<span class="sd">        &gt;&gt;&gt; model = BayesianModel([(&#39;A&#39;, &#39;B&#39;), (&#39;C&#39;, &#39;B&#39;), (&#39;C&#39;, &#39;D&#39;), (&#39;B&#39;, &#39;E&#39;)])</span>
<span class="sd">        &gt;&gt;&gt; model.fit(values)</span>
<span class="sd">        &gt;&gt;&gt; inference = BeliefPropagation(model)</span>
<span class="sd">        &gt;&gt;&gt; phi_query = inference.query([&#39;A&#39;, &#39;B&#39;])</span>

<span class="sd">        References</span>
<span class="sd">        ----------</span>
<span class="sd">        Algorithm 10.4 Out-of-clique inference in clique tree</span>
<span class="sd">        Probabilistic Graphical Models: Principles and Techniques Daphne Koller and Nir Friedman.</span>
<span class="sd">        &quot;&quot;&quot;</span>

        <span class="n">is_calibrated</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">_is_converged</span><span class="p">(</span><span class="n">operation</span><span class="o">=</span><span class="n">operation</span><span class="p">)</span>
        <span class="c1"># Calibrate the junction tree if not calibrated</span>
        <span class="k">if</span> <span class="ow">not</span> <span class="n">is_calibrated</span><span class="p">:</span>
            <span class="bp">self</span><span class="o">.</span><span class="n">calibrate</span><span class="p">()</span>

        <span class="k">if</span> <span class="ow">not</span> <span class="nb">isinstance</span><span class="p">(</span><span class="n">variables</span><span class="p">,</span> <span class="p">(</span><span class="nb">list</span><span class="p">,</span> <span class="nb">tuple</span><span class="p">,</span> <span class="nb">set</span><span class="p">)):</span>
            <span class="n">query_variables</span> <span class="o">=</span> <span class="p">[</span><span class="n">variables</span><span class="p">]</span>
        <span class="k">else</span><span class="p">:</span>
            <span class="n">query_variables</span> <span class="o">=</span> <span class="nb">list</span><span class="p">(</span><span class="n">variables</span><span class="p">)</span>
        <span class="n">query_variables</span><span class="o">.</span><span class="n">extend</span><span class="p">(</span><span class="n">evidence</span><span class="o">.</span><span class="n">keys</span><span class="p">()</span> <span class="k">if</span> <span class="n">evidence</span> <span class="k">else</span> <span class="p">[])</span>

        <span class="c1"># Find a tree T&#39; such that query_variables are a subset of scope(T&#39;)</span>
        <span class="n">nodes_with_query_variables</span> <span class="o">=</span> <span class="nb">set</span><span class="p">()</span>
        <span class="k">for</span> <span class="n">var</span> <span class="ow">in</span> <span class="n">query_variables</span><span class="p">:</span>
            <span class="n">nodes_with_query_variables</span><span class="o">.</span><span class="n">update</span><span class="p">(</span><span class="nb">filter</span><span class="p">(</span><span class="k">lambda</span> <span class="n">x</span><span class="p">:</span> <span class="n">var</span> <span class="ow">in</span> <span class="n">x</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">junction_tree</span><span class="o">.</span><span class="n">nodes</span><span class="p">()))</span>
        <span class="n">subtree_nodes</span> <span class="o">=</span> <span class="n">nodes_with_query_variables</span>

        <span class="c1"># Conversion of set to tuple just for indexing</span>
        <span class="n">nodes_with_query_variables</span> <span class="o">=</span> <span class="nb">tuple</span><span class="p">(</span><span class="n">nodes_with_query_variables</span><span class="p">)</span>
        <span class="c1"># As junction tree is a tree, that means that there would be only path between any two nodes in the tree</span>
        <span class="c1"># thus we can just take the path between any two nodes; no matter there order is</span>
        <span class="k">for</span> <span class="n">i</span> <span class="ow">in</span> <span class="nb">range</span><span class="p">(</span><span class="nb">len</span><span class="p">(</span><span class="n">nodes_with_query_variables</span><span class="p">)</span> <span class="o">-</span> <span class="mi">1</span><span class="p">):</span>
            <span class="n">subtree_nodes</span><span class="o">.</span><span class="n">update</span><span class="p">(</span><span class="n">nx</span><span class="o">.</span><span class="n">shortest_path</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">junction_tree</span><span class="p">,</span> <span class="n">nodes_with_query_variables</span><span class="p">[</span><span class="n">i</span><span class="p">],</span>
                                                  <span class="n">nodes_with_query_variables</span><span class="p">[</span><span class="n">i</span> <span class="o">+</span> <span class="mi">1</span><span class="p">]))</span>
        <span class="n">subtree_undirected_graph</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">junction_tree</span><span class="o">.</span><span class="n">subgraph</span><span class="p">(</span><span class="n">subtree_nodes</span><span class="p">)</span>
        <span class="c1"># Converting subtree into a junction tree</span>
        <span class="k">if</span> <span class="nb">len</span><span class="p">(</span><span class="n">subtree_nodes</span><span class="p">)</span> <span class="o">==</span> <span class="mi">1</span><span class="p">:</span>
            <span class="n">subtree</span> <span class="o">=</span> <span class="n">JunctionTree</span><span class="p">()</span>
            <span class="n">subtree</span><span class="o">.</span><span class="n">add_node</span><span class="p">(</span><span class="n">subtree_nodes</span><span class="o">.</span><span class="n">pop</span><span class="p">())</span>
        <span class="k">else</span><span class="p">:</span>
            <span class="n">subtree</span> <span class="o">=</span> <span class="n">JunctionTree</span><span class="p">(</span><span class="n">subtree_undirected_graph</span><span class="o">.</span><span class="n">edges</span><span class="p">())</span>

        <span class="c1"># Selecting a node is root node. Root node would be having only one neighbor</span>
        <span class="k">if</span> <span class="nb">len</span><span class="p">(</span><span class="n">subtree</span><span class="o">.</span><span class="n">nodes</span><span class="p">())</span> <span class="o">==</span> <span class="mi">1</span><span class="p">:</span>
            <span class="n">root_node</span> <span class="o">=</span> <span class="n">subtree</span><span class="o">.</span><span class="n">nodes</span><span class="p">()[</span><span class="mi">0</span><span class="p">]</span>
        <span class="k">else</span><span class="p">:</span>
            <span class="n">root_node</span> <span class="o">=</span> <span class="nb">tuple</span><span class="p">(</span><span class="nb">filter</span><span class="p">(</span><span class="k">lambda</span> <span class="n">x</span><span class="p">:</span> <span class="nb">len</span><span class="p">(</span><span class="n">subtree</span><span class="o">.</span><span class="n">neighbors</span><span class="p">(</span><span class="n">x</span><span class="p">))</span> <span class="o">==</span> <span class="mi">1</span><span class="p">,</span> <span class="n">subtree</span><span class="o">.</span><span class="n">nodes</span><span class="p">()))[</span><span class="mi">0</span><span class="p">]</span>
        <span class="n">clique_potential_list</span> <span class="o">=</span> <span class="p">[</span><span class="bp">self</span><span class="o">.</span><span class="n">clique_beliefs</span><span class="p">[</span><span class="n">root_node</span><span class="p">]]</span>

        <span class="c1"># For other nodes in the subtree compute the clique potentials as follows</span>
        <span class="c1"># As all the nodes are nothing but tuples so simple set(root_node) won&#39;t work at it would update the set with&#39;</span>
        <span class="c1"># all the elements of the tuple; instead use set([root_node]) as it would include only the tuple not the</span>
        <span class="c1"># internal elements within it.</span>
        <span class="n">parent_nodes</span> <span class="o">=</span> <span class="nb">set</span><span class="p">([</span><span class="n">root_node</span><span class="p">])</span>
        <span class="n">nodes_traversed</span> <span class="o">=</span> <span class="nb">set</span><span class="p">()</span>
        <span class="k">while</span> <span class="n">parent_nodes</span><span class="p">:</span>
            <span class="n">parent_node</span> <span class="o">=</span> <span class="n">parent_nodes</span><span class="o">.</span><span class="n">pop</span><span class="p">()</span>
            <span class="k">for</span> <span class="n">child_node</span> <span class="ow">in</span> <span class="nb">set</span><span class="p">(</span><span class="n">subtree</span><span class="o">.</span><span class="n">neighbors</span><span class="p">(</span><span class="n">parent_node</span><span class="p">))</span> <span class="o">-</span> <span class="n">nodes_traversed</span><span class="p">:</span>
                <span class="n">clique_potential_list</span><span class="o">.</span><span class="n">append</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">clique_beliefs</span><span class="p">[</span><span class="n">child_node</span><span class="p">]</span> <span class="o">/</span>
                                             <span class="bp">self</span><span class="o">.</span><span class="n">sepset_beliefs</span><span class="p">[</span><span class="nb">frozenset</span><span class="p">([</span><span class="n">parent_node</span><span class="p">,</span> <span class="n">child_node</span><span class="p">])])</span>
                <span class="n">parent_nodes</span><span class="o">.</span><span class="n">update</span><span class="p">([</span><span class="n">child_node</span><span class="p">])</span>
            <span class="n">nodes_traversed</span><span class="o">.</span><span class="n">update</span><span class="p">([</span><span class="n">parent_node</span><span class="p">])</span>

        <span class="c1"># Add factors to the corresponding junction tree</span>
        <span class="n">subtree</span><span class="o">.</span><span class="n">add_factors</span><span class="p">(</span><span class="o">*</span><span class="n">clique_potential_list</span><span class="p">)</span>

        <span class="c1"># Sum product variable elimination on the subtree</span>
        <span class="n">variable_elimination</span> <span class="o">=</span> <span class="n">VariableElimination</span><span class="p">(</span><span class="n">subtree</span><span class="p">)</span>
        <span class="k">if</span> <span class="n">operation</span> <span class="o">==</span> <span class="s1">&#39;marginalize&#39;</span><span class="p">:</span>
            <span class="k">return</span> <span class="n">variable_elimination</span><span class="o">.</span><span class="n">query</span><span class="p">(</span><span class="n">variables</span><span class="o">=</span><span class="n">variables</span><span class="p">,</span> <span class="n">evidence</span><span class="o">=</span><span class="n">evidence</span><span class="p">)</span>
        <span class="k">elif</span> <span class="n">operation</span> <span class="o">==</span> <span class="s1">&#39;maximize&#39;</span><span class="p">:</span>
            <span class="k">return</span> <span class="n">variable_elimination</span><span class="o">.</span><span class="n">map_query</span><span class="p">(</span><span class="n">variables</span><span class="o">=</span><span class="n">variables</span><span class="p">,</span> <span class="n">evidence</span><span class="o">=</span><span class="n">evidence</span><span class="p">)</span>

<div class="viewcode-block" id="BeliefPropagation.query"><a class="viewcode-back" href="../../../inference.html#pgmpy.inference.ExactInference.BeliefPropagation.query">[docs]</a>    <span class="k">def</span> <span class="nf">query</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">variables</span><span class="p">,</span> <span class="n">evidence</span><span class="o">=</span><span class="kc">None</span><span class="p">):</span>
        <span class="sd">&quot;&quot;&quot;</span>
<span class="sd">        Query method using belief propagation.</span>

<span class="sd">        Parameters</span>
<span class="sd">        ----------</span>
<span class="sd">        variables: list</span>
<span class="sd">            list of variables for which you want to compute the probability</span>
<span class="sd">        evidence: dict</span>
<span class="sd">            a dict key, value pair as {var: state_of_var_observed}</span>
<span class="sd">            None if no evidence</span>

<span class="sd">        Examples</span>
<span class="sd">        --------</span>
<span class="sd">        &gt;&gt;&gt; from pgmpy.factors.discrete import TabularCPD</span>
<span class="sd">        &gt;&gt;&gt; from pgmpy.models import BayesianModel</span>
<span class="sd">        &gt;&gt;&gt; from pgmpy.inference import BeliefPropagation</span>
<span class="sd">        &gt;&gt;&gt; bayesian_model = BayesianModel([(&#39;A&#39;, &#39;J&#39;), (&#39;R&#39;, &#39;J&#39;), (&#39;J&#39;, &#39;Q&#39;),</span>
<span class="sd">        ...                                 (&#39;J&#39;, &#39;L&#39;), (&#39;G&#39;, &#39;L&#39;)])</span>
<span class="sd">        &gt;&gt;&gt; cpd_a = TabularCPD(&#39;A&#39;, 2, [[0.2], [0.8]])</span>
<span class="sd">        &gt;&gt;&gt; cpd_r = TabularCPD(&#39;R&#39;, 2, [[0.4], [0.6]])</span>
<span class="sd">        &gt;&gt;&gt; cpd_j = TabularCPD(&#39;J&#39;, 2,</span>
<span class="sd">        ...                    [[0.9, 0.6, 0.7, 0.1],</span>
<span class="sd">        ...                     [0.1, 0.4, 0.3, 0.9]],</span>
<span class="sd">        ...                    [&#39;R&#39;, &#39;A&#39;], [2, 2])</span>
<span class="sd">        &gt;&gt;&gt; cpd_q = TabularCPD(&#39;Q&#39;, 2,</span>
<span class="sd">        ...                    [[0.9, 0.2],</span>
<span class="sd">        ...                     [0.1, 0.8]],</span>
<span class="sd">        ...                    [&#39;J&#39;], [2])</span>
<span class="sd">        &gt;&gt;&gt; cpd_l = TabularCPD(&#39;L&#39;, 2,</span>
<span class="sd">        ...                    [[0.9, 0.45, 0.8, 0.1],</span>
<span class="sd">        ...                     [0.1, 0.55, 0.2, 0.9]],</span>
<span class="sd">        ...                    [&#39;G&#39;, &#39;J&#39;], [2, 2])</span>
<span class="sd">        &gt;&gt;&gt; cpd_g = TabularCPD(&#39;G&#39;, 2, [[0.6], [0.4]])</span>
<span class="sd">        &gt;&gt;&gt; bayesian_model.add_cpds(cpd_a, cpd_r, cpd_j, cpd_q, cpd_l, cpd_g)</span>
<span class="sd">        &gt;&gt;&gt; belief_propagation = BeliefPropagation(bayesian_model)</span>
<span class="sd">        &gt;&gt;&gt; belief_propagation.query(variables=[&#39;J&#39;, &#39;Q&#39;],</span>
<span class="sd">        ...                          evidence={&#39;A&#39;: 0, &#39;R&#39;: 0, &#39;G&#39;: 0, &#39;L&#39;: 1})</span>
<span class="sd">        &quot;&quot;&quot;</span>
        <span class="k">return</span> <span class="bp">self</span><span class="o">.</span><span class="n">_query</span><span class="p">(</span><span class="n">variables</span><span class="o">=</span><span class="n">variables</span><span class="p">,</span> <span class="n">operation</span><span class="o">=</span><span class="s1">&#39;marginalize&#39;</span><span class="p">,</span> <span class="n">evidence</span><span class="o">=</span><span class="n">evidence</span><span class="p">)</span></div>

<div class="viewcode-block" id="BeliefPropagation.map_query"><a class="viewcode-back" href="../../../inference.html#pgmpy.inference.ExactInference.BeliefPropagation.map_query">[docs]</a>    <span class="k">def</span> <span class="nf">map_query</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">variables</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">evidence</span><span class="o">=</span><span class="kc">None</span><span class="p">):</span>
        <span class="sd">&quot;&quot;&quot;</span>
<span class="sd">        MAP Query method using belief propagation.</span>

<span class="sd">        Parameters</span>
<span class="sd">        ----------</span>
<span class="sd">        variables: list</span>
<span class="sd">            list of variables for which you want to compute the probability</span>
<span class="sd">        evidence: dict</span>
<span class="sd">            a dict key, value pair as {var: state_of_var_observed}</span>
<span class="sd">            None if no evidence</span>

<span class="sd">        Examples</span>
<span class="sd">        --------</span>
<span class="sd">        &gt;&gt;&gt; from pgmpy.factors.discrete import TabularCPD</span>
<span class="sd">        &gt;&gt;&gt; from pgmpy.models import BayesianModel</span>
<span class="sd">        &gt;&gt;&gt; from pgmpy.inference import BeliefPropagation</span>
<span class="sd">        &gt;&gt;&gt; bayesian_model = BayesianModel([(&#39;A&#39;, &#39;J&#39;), (&#39;R&#39;, &#39;J&#39;), (&#39;J&#39;, &#39;Q&#39;),</span>
<span class="sd">        ...                                 (&#39;J&#39;, &#39;L&#39;), (&#39;G&#39;, &#39;L&#39;)])</span>
<span class="sd">        &gt;&gt;&gt; cpd_a = TabularCPD(&#39;A&#39;, 2, [[0.2], [0.8]])</span>
<span class="sd">        &gt;&gt;&gt; cpd_r = TabularCPD(&#39;R&#39;, 2, [[0.4], [0.6]])</span>
<span class="sd">        &gt;&gt;&gt; cpd_j = TabularCPD(&#39;J&#39;, 2,</span>
<span class="sd">        ...                    [[0.9, 0.6, 0.7, 0.1],</span>
<span class="sd">        ...                     [0.1, 0.4, 0.3, 0.9]],</span>
<span class="sd">        ...                    [&#39;R&#39;, &#39;A&#39;], [2, 2])</span>
<span class="sd">        &gt;&gt;&gt; cpd_q = TabularCPD(&#39;Q&#39;, 2,</span>
<span class="sd">        ...                    [[0.9, 0.2],</span>
<span class="sd">        ...                     [0.1, 0.8]],</span>
<span class="sd">        ...                    [&#39;J&#39;], [2])</span>
<span class="sd">        &gt;&gt;&gt; cpd_l = TabularCPD(&#39;L&#39;, 2,</span>
<span class="sd">        ...                    [[0.9, 0.45, 0.8, 0.1],</span>
<span class="sd">        ...                     [0.1, 0.55, 0.2, 0.9]],</span>
<span class="sd">        ...                    [&#39;G&#39;, &#39;J&#39;], [2, 2])</span>
<span class="sd">        &gt;&gt;&gt; cpd_g = TabularCPD(&#39;G&#39;, 2, [[0.6], [0.4]])</span>
<span class="sd">        &gt;&gt;&gt; bayesian_model.add_cpds(cpd_a, cpd_r, cpd_j, cpd_q, cpd_l, cpd_g)</span>
<span class="sd">        &gt;&gt;&gt; belief_propagation = BeliefPropagation(bayesian_model)</span>
<span class="sd">        &gt;&gt;&gt; belief_propagation.map_query(variables=[&#39;J&#39;, &#39;Q&#39;],</span>
<span class="sd">        ...                              evidence={&#39;A&#39;: 0, &#39;R&#39;: 0, &#39;G&#39;: 0, &#39;L&#39;: 1})</span>
<span class="sd">        &quot;&quot;&quot;</span>
        <span class="c1"># If no variables are specified then run the MAP query for all the variables present in the model</span>
        <span class="k">if</span> <span class="n">variables</span> <span class="ow">is</span> <span class="kc">None</span><span class="p">:</span>
            <span class="n">variables</span> <span class="o">=</span> <span class="nb">set</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">variables</span><span class="p">)</span>

        <span class="k">return</span> <span class="bp">self</span><span class="o">.</span><span class="n">_query</span><span class="p">(</span><span class="n">variables</span><span class="o">=</span><span class="n">variables</span><span class="p">,</span> <span class="n">operation</span><span class="o">=</span><span class="s1">&#39;maximize&#39;</span><span class="p">,</span> <span class="n">evidence</span><span class="o">=</span><span class="n">evidence</span><span class="p">)</span></div></div>
</pre></div>

          </div>
        </div>
      </div>
      <div class="clearer"></div>
    </div>
    <div class="related" role="navigation" aria-label="related navigation">
      <h3>Navigation</h3>
      <ul>
        <li class="right" style="margin-right: 10px">
          <a href="../../../genindex.html" title="General Index"
             >index</a></li>
        <li class="right" >
          <a href="../../../py-modindex.html" title="Python Module Index"
             >modules</a> |</li>
        <li class="nav-item nav-item-0"><a href="../../../index.html">pgmpy 0.1.2 documentation</a> &#187;</li>
          <li class="nav-item nav-item-1"><a href="../../index.html" >Module code</a> &#187;</li> 
      </ul>
    </div>
    <div class="footer" role="contentinfo">
        &#169; Copyright 2016, Ankur Ankan.
      Created using <a href="http://sphinx-doc.org/">Sphinx</a> 1.5.1.
    </div>
  </body>
</html>