\documentclass[11pt,a4paper]{uebung}

\usepackage[british]{babel}
\usepackage{epsfig}
\usepackage{rotate}
\usepackage{amsmath,amsthm,amssymb}
\usepackage{color}
\makeatletter\let\@amsfonts=P\makeatother
\usepackage{graphicx}
\usepackage{typearea}
\usepackage{multicol}
\usepackage{amsfonts}
\usepackage[nounderscore]{syntax}
\usepackage{enumitem}
\newcommand{\comment}[1]{\marginpar{\small{\bf Comment:} #1}}

\usepackage{tikz}
\usetikzlibrary{shapes,arrows,backgrounds,%
matrix,patterns,arrows,decorations.pathmorphing,decorations.pathreplacing,%
positioning,fit,calc,decorations.text,shadows%
}

\newcommand{\solution}[1]{\par {\bf Solution:}\\#1}



%put your Matrikelnummer here instead of the XXXXXXXX
% if your group has less than 3 members, just delete the remaining XXXXXXXX
\newcommand\matrikelnummerA[0]{0627595}
\newcommand\matrikelnummerB[0]{0826407}
\newcommand\matrikelnummerC[0]{0803543}
%put your Matrikelnummer here instead of the XXXXXXXX



\def\cT{\mathcal{T}}


\begin{document}
\newcommand{\Vorlesung}{Formal Methods in Computer Science}
\newcommand{\Semester}{SS 2012}
\newcommand{\Prof}{Uwe Egly}
\newcommand{\AssisA}{Antonius Weinzierl}
\newcommand{\AssisB}{}

%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%

\Uebungsblatt{2 (10 points)}{
  \begin{tabular}{rl}
   Matrikelnummer(n): &\matrikelnummerA \\
   &\matrikelnummerB \\
   &\matrikelnummerC
  \end{tabular}
}

%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%


\Aufgabe[Tseitin Transformation \hfill \bf (0.5 + 1 + 1.5 points)]

\begin{enumerate}
\item Extend Tseitin's transformation for the connectives $\leftrightarrow$
  (equivalence) and $\oplus$ (XOR). Find the necessary clauses for the new schemes
  $l_i \leftrightarrow (l_{i'} \leftrightarrow l_{i''})$ and $l_k
  \leftrightarrow (l_{k'} \oplus l_{k''})$.
  
  \solution{
  
  \textbf{Associated clauses for the equivalence scheme $l_i \leftrightarrow (l_{i'} \leftrightarrow l_{i''})$:}
  
  We find the associated clauses by transforming the above equivalence scheme into CNF which results into:
  $(\neg l_i \vee \neg l_{i'} \vee l_{i''}) \wedge (\neg l_i \vee l_{i'} \vee \neg l_{i''}) \wedge (l_i \vee \neg l_{i'} \vee \neg l_{i''}) \wedge (l_i \vee l_{i'} \vee l_{i''})$
  
  Therefore we get the 4 clauses:\\
  \indent \quad $C_1 = \neg l_i \vee \neg l_{i'} \vee l_{i''}$\\
  \indent \quad $C_2 = \neg l_i \vee l_{i'} \vee \neg l_{i''}$\\
  \indent \quad $C_3 = l_i \vee \neg l_{i'} \vee \neg l_{i''}$\\
  \indent \quad $C_4 = l_i \vee l_{i'} \vee l_{i''}$\\
  
  \textbf{Associated clauses for the equivalence scheme $l_k\leftrightarrow (l_{k'} \oplus l_{k''})$:}
  
  We find the associated clauses by transforming the above equivalence scheme into CNF which results into:
  $(\neg l_k \vee \neg l_{k'} \vee \neg l_{k''}) \wedge (\neg l_k \vee l_{k'} \vee l_{k''}) \wedge (l_k \vee \neg l_{k'} \vee l_{k''}) \wedge (l_k \vee l_{k'} \vee \neg l_{k''})$
  
  Therefore we get the 4 clauses:\\
  \indent \quad $C_1 = \neg l_k \vee \neg l_{k'} \vee \neg l_{k''}$\\
  \indent \quad $C_2 = \neg l_k \vee l_{k'} \vee l_{k''}$\\
  \indent \quad $C_3 = l_k \vee \neg l_{k'} \vee l_{k''}$\\
  \indent \quad $C_4 = l_k \vee l_{k'} \vee \neg l_{k''}$\\

\textit{Detailed deductions from the equivalences to the clauses are attached in figure \ref{fig:deductions1a}.}
  
  \textbf{Extension of Tseitin's transformation}:
  
  To extend Tseitin's transformation we simply add the two new equivalence schemes with their associated clauses to our existing equivalence schemes.
  Both of them need 4 clauses in CNF, therefore we have to add a default clause $C_4$ which equals $\top$ for all equivalence schemes which need less than 4 clauses in CNF.
  }

\item Apply Tseitin's transformation to the following formula $\psi$: $a \rightarrow
  \big( b \lor \neg (a \leftrightarrow c)\big)$.
  
  Hint: You do not need to introduce labels for propositions $a,b,$ and $c$.
  \solution{
  
  \textbf{Labeled formula tree:}
  
  The formula tree and the assigned labels for $\psi$ are given in Figure
 \ref{fig:tseitin1}.

 \begin{figure}[ht]
   \centering
   \begin{tikzpicture}
     \node[label={[red]above:$l_4$}] {$\rightarrow$}
     child {
       node{$a$}
     }
     child {
       node[label={[red]right:$l_3$}] {$\vee$}
       child {
         node{$b$}
       }
       child {
         node[label={[red]right:$l_2$}] {$\neg$}
         child {
           node[label={[red]right:$l_1$}] {$\leftrightarrow$}
           child {
              node{$a$}
         	  }
	  child {
              node{$c$}
         	  }
         }
       }
     }
     ;
   \end{tikzpicture}
   \caption{Formula tree for $\psi$ and assigned labels in red.}
   \label{fig:tseitin1}
 \end{figure}
 
 \textbf{The resulting equivalences are:}
 \begin{align*}
   D_1: l_1 &\leftrightarrow (a \leftrightarrow c)\\
   D_2: l_2 &\leftrightarrow \neg l_1\\
   D_3: l_3 &\leftrightarrow (b \vee l_2)\\
   D_4: l_4 &\leftrightarrow (a \rightarrow l_3)\\
 \end{align*}
 
 \textbf{Transforming those to CNF yields:}
 \begin{align*}
   &\mathbf{Equivalences} &&\mathbf{C_1} && \mathbf{C_2} && \mathbf{C_3} && \mathbf{C_4}\\
   &\mathbf{l_1 \leftrightarrow (a \leftrightarrow c)} && \neg l_1 \vee \neg a \vee c && \neg l_1 \vee a \vee \neg c && l_1 \vee \neg a \vee \neg c && l1 \vee a \vee c\\
   &\mathbf{l_2 \leftrightarrow \neg l_1} && \neg l_2 \vee \neg l_1 && l_2 \vee l_1 && \top && \top\\
   &\mathbf{l_3 \leftrightarrow (b \vee l_2)} && \neg l_3 \vee b \vee l_2 && l_3 \vee \neg b && l_3 \vee \neg l_2 && \top\\
   &\mathbf{l_4 \leftrightarrow (a \rightarrow l_3)} && \neg l_4 \vee \neg a \vee l_3 && l_4 \vee a && l_4 \vee \neg l_3 && \top\\
 \end{align*}
 
\textit{Detailed deductions from the equivalences to the clauses are attached in figure \ref{fig:deductions1b}.}

 \textbf{For $i = 1, 2, ... 4$:}
 
 $D_i$ is built by conjunction of the 4 associated clauses in the corresponding line from the table above.
 $\psi$ is valid iff $(\bigwedge D_i) \rightarrow l_4$ is valid iff $\bigwedge D_i \wedge \neg l_4$ is unsatisfiable.\\
 \textbf{$\psi$ is satisfiable iff $(\bigwedge D_i) \wedge l_4$ is satisfiable.}\\

  }
\newpage

\item Let $\psi$ be a propositional formula and $D^\psi$ the set of clauses
  resulting from Tseitin's transformation on $\psi$. Prove that the following
  holds:
  
  \centerline{If $\psi$ is satisfiable then $D^\psi$ is satisfiable.}

  You only need to prove this for the connectives $\land$ and $\neg$.
  %\lor,\neg, \rightarrow$.
  Use the below clause schemes, which introduce a new label for every boolean
  variable.
  \begin{align*}
    L_a \leftrightarrow a && (\neg L_a \lor a)&& (L_a \lor \neg a)\\
    L_\phi \leftrightarrow (L_1 \land L_2) && (\neg L_\phi \lor L_1)&& (\neg
    L_\phi \lor L_2)&& (L_\phi \lor \neg L_1 \lor \neg L_2)\\
    L_\phi \leftrightarrow \neg L_1 && (\neg L_\phi \lor \neg L_1)&& (L_\phi
    \lor L_1)
  \end{align*}
  
    \solution{
  We distinct between three cases. All formulas can be combined out of subformulas with the same structure as these cases. By proving these 3 basic cases we therefore prove the original statement.
  
  \textbf{Case 1: $\psi = a$:}
  
  Let $a$ be an atom. Therefore $D^\psi = L_a \leftrightarrow a = (\neg L_a \lor a) \land (L_a \lor \neg a)$.
  
  To show: If $\psi$ is satisfiable then $D^\psi$ is also satisfiable.
  
  Left-side: $I(\psi) = 1$ off $I(a) = 1$\\
  Right-side: $I(D^\psi) = 1$ iff $I(\neg L_a \lor a) = 1$ and $I(L_a \lor \neg a) = 1$. $I(\neg L_a \lor a) = 1$ because $I(a) = 1$. $I(L_a \lor \neg a) = 1$ because $L_a \leftrightarrow a$.
  
  So we have shown that both the left-side and the right-side are satisfiable.
  
  \textbf{Case 2: $\psi =  \neg L_\phi$:}
  
  Let $L_\phi$ be an arbitrary labeled formula. Therefore $D^\psi  = L_\psi \leftrightarrow \neg L_\phi = (\neg L_\psi \lor \neg L_\phi) \land (L_\psi \lor L_\phi)$
  
  To show: If $\psi$ is satisfiable then $D^\psi$ is also satisfiable.
  
  Left-side: $I(\psi) = 1$ iff $I(L_\phi) = 0$\\
  Right-side: $I(D^\psi) = 1$ iff $I(\neg L_\psi \lor \neg L_\phi) = 1$ and $I(L_\psi \lor L_\phi) = 1$. $I(\neg L_\psi \lor \neg L_\phi) = 1$ because $I(L_\phi) = 0$. $I(L_\psi \lor L_\phi) = 1$ because $L_\psi \leftrightarrow \neg L_\phi$.
  
  So we have shown that both the left-side and the right-side are satisfiable.
  
  \textbf{Case 3: $\psi = L_\phi \land L_\eta$:}
  
  Let $L_\phi$ and $L_\eta$ be arbitrary labeled formulas. Therefore $D^\psi = L_\psi \leftrightarrow (L_\phi \land L_\eta) = (\neg L_\psi \lor L_\phi) \land (\neg L_\psi \lor L_\eta) \land (L_\psi \lor \neg L_\phi \lor \neg L_\eta)$

  To show: If $\psi$ is satisfiable then $D^\psi$ is also satisfiable.

  Left-side: $I(\psi) = 1$ iff $I(L_\phi) = 1$ and $I(L_\eta) = 1$\\
  Right-side: $I(D^\psi) = 1$ iff $I(\neg L_\psi \lor L_\phi) = 1$, $I(\neg L_\psi \lor L_\eta) = 1$ and $I(L_\psi \lor \neg L_\phi \lor \neg L_\eta) = 1$.
  $I(\neg L_\psi \lor L_\phi) = 1$ because $I(L_\phi) = 1$. $I(\neg L_\psi \lor L_\eta) = 1$ because $I(L_\eta) = 1$. $I(L_\psi \lor \neg L_\phi \lor \neg L_\eta) = 1$ because $L_\psi \leftrightarrow (L_\phi \land L_\eta)$.
  
  So we have shown that both the left-side and the right-side are satisfiable.
}
\end{enumerate}


%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%

\newpage
\Aufgabe[Implication Graphs \hfill \bf (2+1+1.5 points)]
\begin{enumerate}
\item Let $\mathcal{D}$ be the following set of clauses:
  \begin{align*}
    c_1:& (A \lor B)\\
    c_2:& (A \lor G \lor H)\\
    c_3:& (\neg B \lor \neg D \lor E)\\
    c_4:& (E \lor F)\\
    c_5:& (\neg F \lor \neg G \lor D)\\
    c_6:& (\neg C \lor G \lor J)\\
    c_7:& (\neg J \lor \neg H)
  \end{align*}
  Draw the implication graph resulting from $\mathcal{D}$ with decisions
  $A=0@1$, $C=1@2$, $E=0@3$. Find the first UIP, and learn a new clause using
  the first-UIP scheme (use resolution).

  \solution{
  
	The resulting implication graph is shown in Figure \ref{fig:implication_graph}.
	
  \begin{figure}[h]
    \centering
    \begin{tikzpicture}
    
    	\coordinate (a) at (0,4);
    	\coordinate (b) at (-2,2);
    	\coordinate (c) at (0,-4);
    	\coordinate (d) at (-4,0);
    	\coordinate (e) at (-6,-2);
    	\coordinate (f) at (-2,-2);
    	\coordinate (g) at (0,0);
    	\coordinate (h) at (2,2);
    	\coordinate (j) at (2,-2);
    	\coordinate (k) at (4,0); % k == conflict node \kappa
    	
    	\node[fill,circle,label=left:{$A=0@1$}] (A) at (a) {};
    	\node[fill,circle,label=left:{$B=1@1$}] (B) at (b) {};
    	\node[fill,circle,label=left:{$C=1@2$}] (C) at (c) {};
    	\node[fill,circle,label=left:{$D=0@3$}] (D) at (d) {};
    	\node[fill,circle,label=left:{$E=0@3$}] (E) at (e) {};
    	\node[fill,circle,label=below:{$F=1@3$}] (F) at (f) {};
    	\node[fill,circle,label=right:{$G=0@3$}] (G) at (g) {};
    	\node[fill,circle,label=right:{$H=1@3$}] (H) at (h) {};
    	\node[fill,circle,label=right:{$J=1@3$}] (J) at (j) {};
    	\node[fill,circle,label=right:{$\kappa$}] (K) at (k) {};
    	
    	\draw[->,thick] (A) --node[label=above:$c_1$] {} (B);
    	
    	\draw[->,thick] (B) --node[label=above:$c_3$] {} (D);
    	\draw[->,thick] (E) --node[label=above:$c_3$] {} (D);
    	
    	\draw[->,thick] (A) --node[label=above:$c_2$] {} (H);
    	\draw[->,thick] (G) --node[label=above:$c_2$] {} (H);    
    	
    	\draw[->,thick] (E) --node[label=above:$c_4$] {} (F);
    	
    	\draw[->,thick] (D) --node[label=above:$c_5$] {} (G);
    	\draw[->,thick] (F) --node[label=above:$c_5$] {} (G);
    	
    	\draw[->,thick] (C) --node[label=above:$c_6$] {} (J);
    	\draw[->,thick] (G) --node[label=above:$c_6$] {} (J);
    	
    	\draw[->,thick] (J) --node[label=above:$c_7$] {} (K);
    	\draw[->,thick] (H) --node[label=above:$c_7$] {} (K);

    \end{tikzpicture}
    \caption{Implication Graph}
    \label{fig:implication_graph}  
  \end{figure}
  
  Variable assignment order: A, B, C, E, D, F, G, H, J, leading to $\kappa$.\\
  E and G are UIPs, but since G is closer to $\kappa$, G is the first UIP.\\
  A new clause is learned (using resolution) as follows:\\
	\begin{center}
	  $r_1 = res(c_7,J,c_6) = (\neg H \vee \neg C \vee G)$\\
	  $r_2 = fac(res(r_1,H,c_2)) = (A \vee G \vee \neg C)$ (= conflict clause)
	\end{center}

	}

\newpage
\item Prove that in a conflict graph the first UIP is uniquely defined, i.e.,
  prove that there is exactly one node in the graph which is a first UIP.

  \solution{
  We will use the technique of ``Proof by contradiction''.
  
  We know that each conflict graph has at least one first UIP. This is obvious as in the simplest case the current decision node itself is the first UIP. We denote the first UIP as node $u$.
  
  \textbf{Assumption:} 
  
	\begin{center}
		\textit{There is a node $v$ in the conflict graph that is also a 1st UIP (in addition to $u$).}
	\end{center}
  
  Since both $u$ and $v$ are first UIPs, they have the same distance to the conflict node $\kappa$. Thus, 
   
	\begin{center}
		$distance(u,\kappa) = distance(v,\kappa)$
	\end{center}
  
  From the definition of a UIP we know that each path starting at the current decision node $s$ must contain all UIPs. Therefore, there must be either a path $p_{v,u} = v, x_1, x_2, \ldots, x_l, u$. or a path $p_{u,v} = u, x_1, x_2, \ldots, x_l, v$.
  
  Since every path between two distinct nodes has at least length 1, it holds that: 
  
	\begin{center}
		$distance(u,v) \geq 1$.
	\end{center}
  
  Now, combining the results, each path from $s$ to $\kappa$ must be of the following form (where $k$, $l$ and $n$ are bounded, but may vary for each path):
  
	\begin{center}
		$(1): p_i = \textbf{s}, x_1, x_2, \ldots, x_k, \textbf{v}, x_{k+1}, \ldots, x_l \textbf{u}, x_{l+1}, \ldots, x_n, \textbf{$\kappa$}$
		
		or (depending on the order of $u$ and $v$):
		
		$(2): p_i = \textbf{s}, x_1, x_2, \ldots, x_k, \textbf{u}, x_{k+1}, \ldots, x_l \textbf{v}, x_{l+1}, \ldots, x_n, \textbf{$\kappa$}$	
	\end{center}	Now we observe that the following holds: 
	
	for $(1)$: $distance(v,\kappa) = distance(v,u) + distance(u,\kappa) > distance(u,\kappa)$, meaning that the distance from $v$ to $\kappa$ is greater than the distance from $u$ to $\kappa$.
	
	for $(2)$: $distance(u,\kappa) = distance(u,v) + distance(v,\kappa) > distance(v,\kappa)$, meaning that the distance from $u$ to $\kappa$ is longer than the distance from $v$ to $\kappa$.
	
	Thus, either way we found a contradiction to our initial assumption, which proves the assumption wrong and shows that there can indeed be no more than one first UIP in a conflict graph.
    
  }

\newpage 
\item Let $\mathcal{C}$ be a set of clauses and $G$ a conflict graph with
  respect to $\mathcal{C}$. Prove: if a clause $C_l$ is learned following the
  first-UIP scheme, then $C_l$ is a consequence of $\mathcal{C}$.

  \solution{
  
  We have to show that $\mathcal{C} \models C_l$.
  
  According to the first-UIP scheme the clause $C_l$ is computed out of the set of clauses $\mathcal{C}$ by applying the \textbf{propositional binary resolution rule} and the \textbf{propositional factoring rule} to a specific subset $\mathcal{C}_s$ of $\mathcal{C}$.
  %There are no other clauses involved in the computation of $C_l$ than clauses from $C_s$.
  
  The application of the \textbf{propositional binary resolution rule} to two arbitrary clauses $C_1$ and $C_2$ always leads to a clause $C_3$ for which holds: $C_1, C_2 \models C_3$.
  Also the application of the \textbf{propositional factoring rule} to an arbitrary clause $C_4$ always leads to a clause $C_5$ for which holds: $C_4 \models C_5$.
  
  Therefore we can follow that if according to the first-UIP scheme the clause $C_l$ is learned from $\mathcal{C}_s$ it holds: $\mathcal{C}_s \models C_l$.
  
   Because of the \textbf{monotonicity of propositional logic} we know that if $\mathcal{C}_s \models C_l$ it follows that $\mathcal{C}_s  \cup \{\phi\} \models C_l$ for an arbitrary propositional formula $\phi$.
  
  Because $\mathcal{C}_s \models C_l$ and $\mathcal{C}_s \subseteq \mathcal{C}$ it follows from \textbf{monotonicity of propositional logic} that: $\mathcal{C} \models C_l$.
  
  }
\end{enumerate}


%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%

\newpage
\Aufgabe[Sparse Method \hfill \bf (1.5 points)]
Apply the Sparse Method including preprocessing on the formula $\varphi^E$
below to obtain a propositional formula.
\begin{displaymath}
  (x_1 \neq x_2 \lor x_2=x_3 ) \land \big[ (x_2 \neq x_4 \land x_3=x_4
  \land x_4=x_5)
  \lor (x_6 \neq x_5 \land x_6=x_7 \land x_7=x_3)\big]
\end{displaymath}

  \solution{

Let $\varphi^E$ be the above formula.\\
\textit{Remark: $\varphi^E$ does not contain any constants nor boolean variables which could have been replaced by introducing new variables.}\\
The equality graph $G^E(\varphi^E)$ is given in Figure \ref{fig:eg1}. Dashed lines represent equality edges while solid lines represent disequality edges.

  \begin{figure}[h]
    \centering
    \begin{tikzpicture}
	\node(x1){$x_1$};
	\node[right=of x1](x2){$x_2$};
	\node[right=of x2](x3){$x_3$};
	\node[below=of x2](x4){$x_4$};
	\node[right=of x4](x5){$x_5$};
	\node[right=of x5](x6){$x_6$};
	\node[right=of x3](x7){$x_7$};

	\draw[] (x1) -- (x2);
	\draw[dashed] (x2) -- (x3);
	\draw[] (x2) -- (x4);
	\draw[dashed] (x3) -- (x4);
	\draw[dashed] (x4) -- (x5);
	\draw[] (x5) -- (x6);
	\draw[dashed] (x6) -- (x7);
	\draw[dashed] (x3) -- (x7);

    \end{tikzpicture}
    \caption{$G^E(\varphi^E)$, dashed lines represent equality, solid lines disequality.}
    \label{fig:eg1}
  \end{figure}

Edge $(x_1,x_2)$ is not part of a simple contradictory cycle (i.e. a cycle with exactly one disequality edge where only the start vertex is repeated), therefore we set this edge to $true$ in $\varphi^E$  and obtain $\varphi^E_2$ as:
\begin{displaymath}
  (true \lor x_2=x_3 ) \land \big[ (x_2 \neq x_4 \land x_3=x_4
  \land x_4=x_5)
  \lor (x_6 \neq x_5 \land x_6=x_7 \land x_7=x_3)\big]
\end{displaymath}

Propositional simplification of $\varphi^E_2$ leads to $\varphi^E_3$ as:
\begin{gather*}
  (x_2 \neq x_4 \land x_3=x_4
  \land x_4=x_5)
  \lor (x_6 \neq x_5 \land x_6=x_7 \land x_7=x_3)
\end{gather*}

We draw the equality graph $G^E(\varphi^E_3)$, given in Figure \ref{fig:eg2}.

  \begin{figure}[h]
    \centering
    \begin{tikzpicture}
	\node[right=of x1](x2){$x_2$};
	\node[right=of x2](x3){$x_3$};
	\node[below=of x2](x4){$x_4$};
	\node[right=of x4](x5){$x_5$};
	\node[right=of x5](x6){$x_6$};
	\node[right=of x3](x7){$x_7$};

	\draw[] (x2) -- (x4);
	\draw[dashed] (x3) -- (x4);
	\draw[dashed] (x4) -- (x5);
	\draw[] (x5) -- (x6);
	\draw[dashed] (x6) -- (x7);
	\draw[dashed] (x3) -- (x7);

    \end{tikzpicture}
    \caption{$G^E(\varphi^E_3)$, dashed lines represent equality, solid lines disequality.}
    \label{fig:eg2}
  \end{figure}

Now $x_2 \neq x_4$ is not contained in any simple contradictory cycle and we set it to $true$ and apply propositional simplification. The result is $\varphi^E_4$ as:
\begin{gather*}
  (x_3=x_4
  \land x_4=x_5)
  \lor (x_6 \neq x_5 \land x_6=x_7 \land x_7=x_3)
\end{gather*}

As there are no more literals not occurring in a simple contradictory cycle, we stop preprocessing and build the propositional skeleton $e(\varphi^E_4)$ and the corresponding transitivity constraints $B_t$.\\
\textit{Remark: $\varphi^E$ is satisfiable iff $e(\varphi^E)$ and $B_t$ are satisfiable.}\\

$e(\varphi^E_4)$ is built by replacing equality literals $x_i=x_j$ with $e_{i,j}$, so
\begin{gather*}
  e(\varphi^E_4) = (e_{3,4}
  \land e_{4,5})
  \lor (\neg e_{6,5} \land e_{6,7} \land e_{7,3})
\end{gather*}

To derive a (small) set of transitivity constraints ($B_t$), we take the nonpolar equality graph $G^E_{NP}(\varphi^E_4)$ (i.e. neglecting the negation signs) and make it chordal. $G^E_{NP}$ is given in Figure \ref{fig:npeg1}.

  \begin{figure}[h]
    \centering
    \begin{tikzpicture}
	\node[right=of x2](x3){$x_3$};
	\node[below=of x2](x4){$x_4$};
	\node[right=of x4](x5){$x_5$};
	\node[right=of x5](x6){$x_6$};
	\node[right=of x3](x7){$x_7$};

	\draw[] (x3) -- (x4);
	\draw[] (x4) -- (x5);
	\draw[] (x5) -- (x6);
	\draw[] (x6) -- (x7);
	\draw[] (x3) -- (x7);

    \end{tikzpicture}
    \caption{$G^E_{NP}$}
    \label{fig:npeg1}
  \end{figure}

To make it chordal, we have several alternatives, i.e. either to add $(x_3,x_5)$, $(x_3,x_6)$ or to add $(x_3,x_5)$, $(x_5,x_7)$ (other alternatives are possible as well - depending on the chosen order of the vertices). We add the edges $(x_3,x_5)$ and $(x_3,x_6)$. This leads to a graph where no simple cycle has length greater than $3$, i.e., the graph in Figure \ref{fig:cg1} is "triangular" and thus chordal.

  \begin{figure}[h]
    \centering
    \begin{tikzpicture}
	\node[right=of x2](x3){$x_3$};
	\node[below=of x2](x4){$x_4$};
	\node[right=of x4](x5){$x_5$};
	\node[right=of x5](x6){$x_6$};
	\node[right=of x3](x7){$x_7$};

	\draw[] (x3) -- (x4);
	\draw[] (x3) -- (x5);
	\draw[] (x3) -- (x6);
	\draw[] (x4) -- (x5);
	\draw[] (x5) -- (x6);
	\draw[] (x6) -- (x7);
	\draw[] (x3) -- (x7);

    \end{tikzpicture}
    \caption{$G^E_{NP}$ made chordal by adding edges $(x_3,x_5)$ and $(x_3,x_6)$.}
    \label{fig:cg1}
  \end{figure}

For each triangle in this graph, we derive the according transitivity constraints to obtain $B_t$.
  \begin{eqnarray*}
    B_t:= &(e_{3,4} \land e_{4,5} \rightarrow e_{3,5}) \land
    (e_{4,5} \land e_{3,5} \rightarrow e_{3,4}) \land
    (e_{3,5} \land e_{3,4} \rightarrow e_{4,5}) \land & \quad \quad \text{for }\triangle
    (x_3,x_4,x_5)\\
    %
    &(e_{3,5} \land e_{5,6} \rightarrow e_{3,6}) \land
    (e_{5,6} \land e_{3,6} \rightarrow e_{3,5}) \land
    (e_{3,6} \land e_{3,5} \rightarrow e_{5,6}) \land & \quad \quad \text{for
    } \triangle (x_3,x_5,x_6)\\
    %
    &(e_{3,6} \land e_{6,7} \rightarrow e_{3,7}) \land
    (e_{6,7} \land e_{3,7} \rightarrow e_{3,6}) \land
    (e_{3,7} \land e_{3,6} \rightarrow e_{6,7}) &\quad \quad\text{for }  \triangle (x_3,x_6,x_7)
  \end{eqnarray*}

  The final formula then is $e(\varphi^E_4) \land B_t$.

}


%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%

\newpage
\Aufgabe[Ackermann's Reduction \hfill \bf (1 point)]
Apply Ackermann's reduction on the following EUF-formula $\varphi$ to obtain
an EU formula:
\begin{displaymath}
  f\left(f\left(g\left(a\right),b\right),a\right) = f(g(a),b) \rightarrow \big[ f(x,y) = g(f(g(a),b)) \land g(f(a,y))=d \big]
\end{displaymath}


\solution{

For representational reasons we replace each lower case UF symbol in $\varphi$ with the corresponding upper case symbol, i.e. $f$ is replaced by $F$ and $g$ is replaced by $G$, and get $\varphi$':
\begin{displaymath}
  F(F(G(a),b),a) = F(G(a),b) \rightarrow \big[ F(x,y) = G(F(G(a),b)) \land G(F(a,y))=d \big]
\end{displaymath}

Since  $\varphi$ and $\varphi$' are equal, we continue to apply Ackermann's reduction on  $\varphi$'.\\

  We first number the instances of the UFs inwards-to-outwards, left-to-right:
  \begin{gather*}
  F_2(F_1(G_1(a),b),a) = F_1(G_1(a),b) \rightarrow \big[ F_3(x,y) = G_2(F_1(G_1(a),b)) \land G_3(F_4(a,y))=d \big]
  \end{gather*}

We now associate to each function instance $F_i$ resp. $G_i$ a new term variable $f_i$ resp. $g_i$. This already gives $\cT$ for the numbered instances. For example:
  \begin{align*}
    \cT (F_1(G_1(a),b)) &= f_1\\
    \cT (F_2(F_1(G_1(a),b),a)) &= f_2\\
    \cT (F_3(x,y)) &= f_3 \\
    \cT (F_4(a,y)) &= f_4 \\
    \cT (G_1(a)) &= g_1 \\
    \cT (G_2(F_1(G_1(a),b))) &= g_2\\
    \cT (G_3(F_4(a,y))) &= g_3
  \end{align*}

  So $flat^E:= f_2 = f_1 \rightarrow (f_3 = g_2 \land g_3 = d)$.\\

  Based on $\cT$ we re-introduce the effects of functional constraints by constructing $FC^E :=$
  \def\impl{\rightarrow}
  \begin{align*}
    (( g_1 = f_1 \land b = a) \impl& f_1 = f_2) \land \\
    (( g_1 = x \land b = y) \impl& f_1 = f_3) \land \\
    (( g_1 = a \land b = y) \impl& f_1 = f_4) \land \\
    (( f_1 = x \land a = y) \impl& f_2 = f_3) \land \\
    (( f_1 = a \land a = y) \impl& f_2 = f_4) \land \\
    (( x = a \land y = y) \impl& f_3 = f_4) \land \\
    (a = f_1 \impl& g_1 = g_2) \land \\
    (a = f_4 \impl& g_1 = g_3) \land \\
    (f_1 = f_4 \impl& g_2 = g_3)
  \end{align*}

  Finally $\varphi'^E := FC^E \impl flat^E$.

}

\newpage

\begin{figure}[h]
\centerline{\fbox{\includegraphics[scale=.25]{1a}}}
\caption{Deductions from the equivalences to the clauses from exercise 1a.}
\label{fig:deductions1a}
\end{figure}

\begin{figure}[h]
\centerline{\fbox{\includegraphics[scale=.3]{1b}}}
\caption{Deductions from the equivalences to the clauses from exercise 1b.}
\label{fig:deductions1b}
\end{figure}


\end{document}
