\section{MAP Inference}
%\section{Inference: Blocked Gibbs Sampling}

\label{s:inf}

We note that computing $\argmax_{\mathbf{x}}
P(\mathbf{X}=\mathbf{x})$ is challenging:
\begin{enumerate}
\item There exist thousands of grounded predicates, making exact
inference intractable.
\item The dependencies represented by our rules break the joint
distribution into islands of high-probability states with no
paths between them.
\end{enumerate}

For tractability we turn to approximate inference, and in particular
to Gibbs sampling. In its basic version, Gibbs sampling generates
a value for each variable in turn, conditioned on the other variables.
In general, the samples then approximate the joint distribution.

In our scenario, however, the basic approach fails due to
strong dependencies between variables. For example,
$\mtb{e} \wedge \notmatch{\tg{e}}{\bg{e}^\prime}$
and $\match{\tg{e}}{\bg{e}^\prime} \wedge \notmatch{\tg{e}}{\bg{e}}$
may both be possible states, but due to Rule \ref{rule:mutualexclusion} (mutual exclusion)
Gibbs sampling cannot reach one state from the other.

We thus apply Blocked Gibbs sampling, where we directly sample from
the true joint distribution of a subset of interdependent variables. For each
entity $\tg{e}$ in the target ontology, we create a block that contains
predicates $\match{\tg{e}}{\bg{e}^\prime}$ for all entity mapping
candidates ${\bg{e}^\prime}$ of $\tg{e}$.
We create similar blocks for each type $\tg{t}$ and each relation $\tg{r}$.

Since we are only interested in determining the MAP assignment to
$\mathbf{X}$, but not its probability, we ignore partition function
$Z$ and score samples by the unnormalized objective, keeping track
of the best sample found so far. For details see Algorithm
\ref{alg1}.

%In Gibbs sampling, one typically keeps track of sampling frequencies
%to determine the MAP solution.
%Since we are not

%Suppose $X$ is the set of predicates, the probability distribution
%over possible assignment $X=x$ specified by the ground Markov Logic
%Network \cite{richardson-domingos06} is given by

%\[P(X=x) = \frac{1}{Z} \exp \left(\sum_i w_i n_i(x)\right)\]

%where $w_i$ is the weight of the rule and $n_i(x)$ is the number of
%true grounds of that rule in assignment $x$. The optimal assignment
%is an assignment maximizing $P(X=x)$, i.e. the total weight of all
%satisfied rules.

%\comment{One can cast the problem above into Weighted MaxSat
%problem. But...}

%Gibbs sampling is a standard way to do statistic inference,
%including estimating the distribution of Markov Logic. Gibbs
%sampling is a kind of Markov chain Monte Carlo algorithm. The point
%of Gibbs sampling is that it is much simpler to sample the
%conditional distribution (i.e. fix the value of some variables and
%then estimate the others) than the joint distribution. Then the
%samples approximate the joint distribution of all variables.

%In our ontology mapping problem, suppose target relation $\tg{r}$
%has $k$ candidates $\{\bg{r}_1, \ldots \bg{r}_k\}$, then there are
%$k$ predicates in $X$, we denote the subset as $X_r$. $X_r$ has
%$2^k$ different assignment. \sys\ has special interests on them
%because each assignment means a different union strategy from
%$\tg{r}$. For example, if all predicates in $X_r$ are true, $r$ will
%map to the union of all candidates. When $k$ is relatively small, we
%can do accurate inference conditioned on other predicates. That is
%to say, for each of $2^k$ assignments, we explicitly calculate

%\begin{equation}
%\label{eq_gibbssampling}
%P(X_r=x_r|x_{-r}^*) = \frac{\exp(\sum_i w_i n_i(x_r,x_{-r}^*))}{\sum_{x_r^\prime}\exp(\sum_i w_i n_i(x_r^\prime,x_{-r}^*))}
%\end{equation}

%where $x_r$ is an assignment of $X_r$, $x_{-r}^*$ is the most recent
%sampled value of all other predicates. $n_i(x_r,x_{-r}$ is the
%number of true groundings of the rule $i$ in assignment $x_r \cup
%x_{-r}^*$.

%The process we applied is \emph{blocked Gibbs
%sampling}~\cite{Ishwaran_gibbssampling}: group several predicates
%together and samples from their joint distribution conditioned on
%all other variables. We describe it in Algorithm \ref{alg1}.

%\renewcommand{\algorithmicrequire}{\textbf{Input:}}
%\renewcommand{\algorithmicensure}{\textbf{Output:}}

%\begin{algorithm}
%\caption{} \label{alg1}
%\begin{algorithmic}
%\REQUIRE $\mathbf{X}$: variables for all predicates $\mtb{r}$, $\mtb{e}$ and $\mtb{t}$, \\
%    $w_i$: weight for rule $i$, \\
%    $n_i(\mathbf{x})$: number of true groundings of rule $i$ under assignment $\mathbf{x}$, \\
%    $\mathbf{x}^{(0)}$: initial assignment, \\
%    $T$: number of samples
%\ENSURE $\mathbf{x}_{best} \approx \argmax P(\mathbf{X}=\mathbf{x})$%a world $x$ with highest probability $P(X=x)$
%\STATE $\mathbf{x}_{best} \leftarrow \mathbf{x}^{(0)}$
%\FOR{$j = 1 \ldots T$}
%    \STATE $\mathbf{x}^{(j)} \leftarrow \mathbf{x}^{(j-1)}$
%    \FORALL{objects $\tg{o}$ in target ontology}
%        \STATE %$\{o_1\ldots o_k\}$ denote $\tg{o}$'s mapping candidates, and \\
%               $\mathbf{X}_o \leftarrow \{ \ldots, \tg{o} \cong o_i, \ldots \}$ where $o_i$ all candidates for $\tg{o}$ \\
%    \STATE $\mathbf{X}_{-o} \leftarrow \mathbf{X} \setminus \mathbf{X}_o$
%    \STATE $\mathbf{x}_{-o} \leftarrow$ assignment to variables $\mathbf{X}_{-o}$ under $\mathbf{x}^{(j)}$
%%                 $\tg{o}$'s mapping candidates $\{o_1\ldots o_k\}$\\
%%        \STATE get $o$'s mapping candidate $\{o_b^1\ldots o_b^k\}$ in the background ontology, denote the corresponding variables as $X_o$;
%%        \STATE $\mathbf{x}_{-o} \leftarrow \mathbf{x}^{(j)} \setminus \mathbf{X}_o$, i.e. the current assignment to other variables than $\mathbf{X}_o$
%%denote current assignment to $\mathbf{x}^{(j)} \setminus \mathbf{X}_o$
%        %\STATE set $x_{-o}^*$ from the assignment $X^{(i)}$;
%        \FORALL{assignments $\mathbf{x}_o$ to $\mathbf{X}_o$}
%            %\STATE get $p(x_o|x_{-o}^*)$ by Equation \ref{eq_gibbssampling};
%            \STATE $p(\mathbf{x}_o|\mathbf{x}_{-o}) \leftarrow \frac{\exp(\sum_i w_i n_i(\mathbf{x}_o,\mathbf{x}_{-o}))}{\sum_{\mathbf{x}_o^\prime}\exp(\sum_i w_i n_i(\mathbf{x}_o^\prime,\mathbf{x}_{-o}))}$
%        \ENDFOR
%        \STATE $\mathbf{x}_o^* \leftarrow$ sample from distribution $p(\mathbf{X}_o|\mathbf{x}_{-o})$;
%        \STATE $\mathbf{x}^{(j)}\leftarrow (\mathbf{x}_o^*,\mathbf{x}_{-o})$
%        \IF{$\sum_i w_i n_i(\mathbf{x}_{best}) < \sum_i w_i n_i(\mathbf{x}^{(j)})$}
%            \STATE $\mathbf{x}_{best} \leftarrow \mathbf{x}^{(j)}$;
%        \ENDIF
%    \ENDFOR
%\ENDFOR
%\RETURN $\mathbf{x}_{best}$
%\end{algorithmic}
%\end{algorithm}

The initial sample $\mathbf{x}^{(0)}$ is obtained greedily by
considering only rules having one predicate, only considering
atomic background relations (no views), and mapping a target
object to only a single object in the background ontology.


%Gibbs sampling often requires very big $T$ to estimate the joint
%distribution. Since \sys\ is more interested in the ontology
%mapping, it could start the iteration with a greedy $x^{(0)}$ to
%speed up convergence. In this paper, the greedy $x^{(0)}$ is
%obtained by (1) only considering rules having one predicates; (2)
%only mapping the target object to one best object. Since there is no
%joint inference in this greedy version, the result is deterministic
%and come out very quick.
