% This is "sig-alternate.tex" V2.0 May 2012
% This file should be compiled with V2.5 of "sig-alternate.cls" May 2012
%
% This example file demonstrates the use of the 'sig-alternate.cls'
% V2.5 LaTeX2e document class file. It is for those submitting
% articles to ACM Conference Proceedings WHO DO NOT WISH TO
% STRICTLY ADHERE TO THE SIGS (PUBS-BOARD-ENDORSED) STYLE.
% The 'sig-alternate.cls' file will produce a similar-looking,
% albeit, 'tighter' paper resulting in, invariably, fewer pages.
%
% ----------------------------------------------------------------------------------------------------------------
% This .tex file (and associated .cls V2.5) produces:
%       1) The Permission Statement
%       2) The Conference (location) Info information
%       3) The Copyright Line with ACM data
%       4) NO page numbers
%
% as against the acm_proc_article-sp.cls file which
% DOES NOT produce 1) thru' 3) above.
%
% Using 'sig-alternate.cls' you have control, however, from within
% the source .tex file, over both the CopyrightYear
% (defaulted to 200X) and the ACM Copyright Data
% (defaulted to X-XXXXX-XX-X/XX/XX).
% e.g.
% \CopyrightYear{2007} will cause 2007 to appear in the copyright line.
% \crdata{0-12345-67-8/90/12} will cause 0-12345-67-8/90/12 to appear in the copyright line.
%
% ---------------------------------------------------------------------------------------------------------------
% This .tex source is an example which *does* use
% the .bib file (from which the .bbl file % is produced).
% REMEMBER HOWEVER: After having produced the .bbl file,
% and prior to final submission, you *NEED* to 'insert'
% your .bbl file into your source .tex file so as to provide
% ONE 'self-contained' source file.
%
% ================= IF YOU HAVE QUESTIONS =======================
% Questions regarding the SIGS styles, SIGS policies and
% procedures, Conferences etc. should be sent to
% Adrienne Griscti (griscti@acm.org)
%
% Technical questions _only_ to
% Gerald Murray (murray@hq.acm.org)
% ===============================================================
%
% For tracking purposes - this is V2.0 - May 2012

\documentclass{sig-alternate}

\usepackage[utf8]{inputenc}
\input{DeclareUnicodeCharacter}
\usepackage{color}
\definecolor{shadecolor}{rgb}{.9,.9,1} 
\usepackage{url}

\usepackage{tikz}
\usetikzlibrary{matrix,arrows}
\usepackage{pgfplots}
\usetikzlibrary{plotmarks}
%\usetikzlibrary{positioning}
%\usetikzlibrary{shapes,shadows,arrows}
\usetikzlibrary{calc}
%%\pgfplotsset{compat=1.3}
%\pgfdeclarelayer{background}
%\pgfdeclarelayer{foreground}
%\pgfsetlayers{background,main,foreground}

\newcommand{\LPBs}{\mathit{LPBs}}
\newcommand{\sd}[1]{\colorbox{shadecolor}{#1}}
\newcommand{\ld}[1]{\raisebox{-1ex}[0cm][0cm]{\sd{#1}}}
\newcommand{\ud}[1]{\raisebox{1ex}[0cm][0cm]{\sd{#1}}}
\newcommand{\ub}[1]{\raisebox{1ex}[0cm][0cm]{#1}}
\newcommand{\repeatlabel}[1]{\label{#1}}
\newcommand{\defining}[1]{\textbf{#1}}
\newcommand{\bool}{\mathit{Bool}}
\newcommand{\real}{\mathbb{R}}
\newcommand{\false}{\mathit{false}}
\newcommand{\true}{\mathit{true}}
\newcommand{\nat}{\mathbb{N}}
\newcommand{\integ}{\mathbb{Z}}
\newcommand{\onetom}{{[1..m]}}
\newcommand{\onetol}{{[1..l]}} 
\newcommand{\zerotol}{{[0..l]}}   
\newcommand{\iso}{\mathfrak{I}^\leq}
\newcommand{\isop}{\mathfrak{I}^=}
\newcommand{\ineq}{\mathit{Ineq}^\leq}
\newcommand{\ineqp}{\mathit{Ineqp}^=}
\newcommand{\mono}{\mathfrak{M}^\leq}
\newcommand{\monop}{\mathfrak{M}^=}
\newcommand{\coeffSum}[2]{\sum_{i=#1}^{#2}a_ix_i}
\newcommand{\coeffSumPrime}[2]{\sum_{i=#1}^{#2}a'_ix_i}
\newcommand{\ms}[1]{\{\hspace{-0.2em}[#1]\hspace{-0.2em}\}}
\newcommand{\OP}{\mathit{OP}}
\newcommand{\Cut}[3]{\mathit{S}(#1,#2,#3)}
\newcommand{\CutT}[2]{\mathit{S}(\cdot,#1,#2)}
\newcommand{\mint}{s}
\newcommand{\maxt}{b}
\newcommand{\threshold}[2]{(#1,#2]}
\newcommand{\LPBT}[2]{}%\mathit{I}(#1,#2)\equiv}

%For pseudocode
\newcommand{\uc}{\mathit{uc}}
\newcommand{\lc}{\mathit{lc}}
\newcommand{\uak}{\mathit{u}_{ak2}}
\newcommand{\lak}{\mathit{l}_{ak2}}
\newcommand{\uPlus}{u_+}
\newcommand{\lPlus}{l_+}
\newcommand{\uminus}{u_-}
\newcommand{\lminus}{l_-}
\newcommand{\parent}{\mathit{parent}}
\newcommand{\col}{\mathit{col}}
\newcommand{\pat}{\mathit{path}}
\newcommand{\MU}{\mathit{MU}}
\newcommand{\ML}{\mathit{ML}}
\newcommand{\NU}{\mathit{NU}}
\newcommand{\NL}{\mathit{NL}}
\newcommand{\MUplus}{\mathit{MU_+}}
\newcommand{\MLplus}{\mathit{ML_+}}
\newcommand{\NUplus}{\mathit{NU_+}}
\newcommand{\NLplus}{\mathit{NL_+}}
\newcommand{\MUminus}{\mathit{MU_-}}
\newcommand{\MLminus}{\mathit{ML_-}}
\newcommand{\NUminus}{\mathit{NU_-}}
\newcommand{\NLminus}{\mathit{NL_-}}
\newcommand{\MUak}{\mathit{MU}_{ak2}}
\newcommand{\MLak}{\mathit{ML}_{ak2}}
\newcommand{\NUak}{\mathit{NU}_{ak2}}
\newcommand{\NLak}{\mathit{NL}_{ak2}}
\newcommand{\NtwoU}{\mathit{N2U}}
\newcommand{\NtwoL}{\mathit{N2L}}
\newcommand{\MUs}{\mathit{MUs}}
\newcommand{\MLb}{\mathit{MLb}}
\newcommand{\NUb}{\mathit{NUb}}
\newcommand{\NLs}{\mathit{NLs}}
\newcommand{\MUU}{\mathit{MUU}}
\newcommand{\MLU}{\mathit{MLU}}
\newcommand{\NUU}{\mathit{NUU}}
\newcommand{\NLU}{\mathit{NLU}}
\newcommand{\MUL}{\mathit{MUL}}
\newcommand{\MLL}{\mathit{MLL}}
\newcommand{\NUL}{\mathit{NUL}}
\newcommand{\NLL}{\mathit{NLL}}
\newcommand{\jump}{\mathit{jump}}
\newcommand{\repair}{\mathrm{repair}}


\newtheorem{theorem}{Theorem}
\newtheorem{definition}[theorem]{Definition}
\newtheorem{proposition}[theorem]{Proposition}
\newtheorem{lemma}[theorem]{Lemma}
\newtheorem{example}[theorem]{Example}
\newtheorem{corollary}[theorem]{Corollary}

%\renewcommand{\floatpagefraction}{.9}

\begin{document}
%
% --- Author Metadata here ---
%\CopyrightYear{2007} % Allows default copyright year (20XX) to be over-ridden - IF NEED BE.
%\crdata{0-12345-67-8/90/01}  % Allows default copyright data (0-89791-88-6/97/05) to be over-ridden - IF NEED BE.
% --- End of Author Metadata ---

\title{Implementation of two Algorithms for the Threshold Synthesis Algorithms}
%\subtitle{[Extended Abstract]
%\titlenote{A full version of this paper is available as
%\textit{Author's Guide to Preparing ACM SIG Proceedings Using
%\LaTeX$2_\epsilon$\ and BibTeX} at
%\texttt{www.acm.org/eaddress.htm}}}
%
% You need the command \numberofauthors to handle the 'placement
% and alignment' of the authors beneath the title.
%
% For aesthetic reasons, we recommend 'three authors at a time'
% i.e. three 'name/affiliation blocks' be placed beneath the title.
%
% NOTE: You are NOT restricted in how many 'rows' of
% "name/affiliations" may appear. We just ask that you restrict
% the number of 'columns' to three.
%
% Because of the available 'opening page real-estate'
% we ask you to refrain from putting more than six authors
% (two rows with three columns) beneath the article title.
% More than six makes the first-page appear very cluttered indeed.
%
% Use the \alignauthor commands to handle the names
% and affiliations for an 'aesthetic maximum' of six authors.
% Add names, affiliations, addresses for
% the seventh etc. author(s) as the argument for the
% \additionalauthors command.
% These 'additional authors' will be output/set for you
% without further effort on your part as the last section in
% the body of your article BEFORE References or any Appendices.

\numberofauthors{3} %  in this sample file, there are a *total*
% of EIGHT authors. SIX appear on the 'first-page' (for formatting
% reasons) and the remaining two appear in the \additionalauthors section.
%
% You can go ahead and credit any number of authors here,
% e.g. one 'row of three' or two rows (consisting of one row of three
% and a second row of one, two or three).
%
% The command \alignauthor (no curly braces needed) should
% precede each author name, affiliation/snail-mail address and
% e-mail address. Additionally, tag each line of
% affiliation/address with \affaddr, and tag the
% e-mail address with \email.
%
\author{
\alignauthor
Christian Schilling\\
       \affaddr{Institut f\"ur Informatik}\\
       \affaddr{Universit\"at Freiburg}\\
       \affaddr{Germany}\\
       \email{schillic@informatik.uni-freiburg.de}
\alignauthor
Jan-Georg Smaus\\
       \affaddr{IRIT}
       \affaddr{Universit\'e de Toulouse}\\
       \affaddr{France}\\
       \email{smaus@irit.fr}
\alignauthor
Fabian Wenzelmann\\
       \affaddr{Institut f\"ur Informatik}\\
       \affaddr{Universit\"at Freiburg}\\
       \affaddr{Germany}\\
       \email{wenzelmf@informatik.uni-freiburg.de}
}
%Authors must remain anonymous for submission


\maketitle


\bibliographystyle{plain}
%\vspace*{-5cm}


\section{Linear Programming Approach}
Two ideas by Fabian Wenzelmann: 

\subsection{CHIP (Email 11.9.12)}

Mir ist gerade etwas eingefallen, was nicht unbedingt mit unserem
Algorithmus zu tun hat - aber vielleicht ein interessanter Ansatz w\"are.
Ich wollte es nur mal schreiben bevor ich es vergesse.

Ich habe vor Kurzem f\"ur die CSP-Klausur gelernt und da hast mir
aufgefallen, dass es die sogenannte CHIP constraint sprache gibt.

CHIP hat constraints der Art

\[
\begin{array}{rcl}
ax &=& by + c\\
ax &\leq& by + c\\
ax &\geq& by + c  
\end{array}
\]
diese Sprache ist tractable, was so viel hei\ss t wie polynomiell l\"osbar.
Das ganze ist etwas schw\"acher als linear Programming, es fehlt die
Optimierung.

Die Folien von Herr W\"olfl sagen
``The language is still tractable if we allow for relations expressed by''
$a_1*x_1 + \ldots + a_n*x_n \geq by + c$
F\"ur $a, b, c \in N$.
Ich denke mal, dass gilt dann auch f\"ur $\leq$, m\"usste man aber wohl nachschauen.

Das Entscheidende:
"...that restrict the domains
of variables to a finite set of natural numbers"

Wenn wir nat\"urlich eine obere Grenze angeben k\"onnten, die nicht
exponentiell ist (was wohl trivial w\"are), k\"onnte man das ganze also
damit polynomiell l\"osen.
Also muss man nicht unbedingt einen LP solver einsetzen, ich wei\ss{} aber
nicht, wie genau die Laufzeit ist oder ob das \"uberhaupt etwas bringen
w\"urde, war nur eine Idee.

\subsection{Additional Constraints (Email 23.4.13)}
etwas ganz anderes, was mir gerade noch einf\"allt \ldots
Hat eigentlich mal jemand untersucht, ob das lineare Programm in dem
Algorithmus schneller gel\"ost wird wenn man zus\"atzliche constraints
einf\"uhrt? Also das hier:
http://code.google.com/p/recognition-procedures-for-boolean-functions/wiki/LPSolveConstraints


\section{Maximising the gap}
\label{gap-sec}
\cite{CoaKirLew62}

{\Huge gap}

{\Huge range}

\cite{CoaKirLew62} talks about how to choose $a_{k+1}$ so that the
gap of the LPBs in the next column is maximal, which clearly
seems desirable. I found this very complicated (some iterative
process) but then I thought about it myself and concluded that
determining the coeffcient that will keep the gap maximal is not very
difficult. 

Consider a block of a column in the splitting tree that looks something like
this:

\[
\begin{array}{c}
\threshold{s_0}{b_0}\\
\threshold{s_1}{b_2}\\
\threshold{s_3}{b_3}\\\hline
\end{array}
\]

If instead of choosing one coefficient at a time, we choose $a$ as the
next 2 coefficients in one go (which would make sense if the symmetry
was a global one), we would obtain that the gap 2 columns to the left
would be
\[
\threshold{\max\{s_0,s_1+a,s_2+2a \}}{\min\{b_0,b_1+a,b_2+2a \}} 
\]
Now $s_0,s_1+a,s_2+2a$ are all linear functions in the variable
$a$. Note moreover that $s_0\geq s_1\geq s_2$. Therefore, 
$\max\{s_0,s_1+a,s_2+2a \}$ is a piecewise linear function that may
have up to 2 corners at which it makes a left turn (the function has
nonnegative increasing slope). 

Similarly, $\min\{b_0,b_1+a,b_2+2a \}$ is a piecewise linear function that may
have up to 2 corners at which it makes a right turn (the function has
nonnegative decreasing slope). 

Moreover, $\min\{b_0,b_1+a,b_2+2a \}$ has some points above
$\max\{s_0,s_1+a,s_2+2a \}$. It follows that 
$\min\{b_0,b_1+a,b_2+2a\}-\max\{s_0,s_1+a,s_2+2a \}$ is a piecewise linear function that
increases at first and then decreases. Figuring out this function is
fiddly but definitely doable!

The coefficient $a$ should be chosen as the smallest value such that
the function $\min\{b_0,b_1+a,b_2+2a\}-\max\{s_0,s_1+a,s_2+2a \}$
attains its maximum (in general, the function will have a region where
it is constant maximal.

If we want to consider several blocks, we would have to compute the
piecewise linear function for those blocks and again take the minimum,
which would again be a piecewise linear function although I am not
sure about whether it may have local maxima. 

Now, since our algorithm chooses one coefficient at a time, maybe the
above method should be modified in that it considers any 2
neighbouring nodes as a block. As far as I can see, this would just
maximise the gap of the LPBs in next column, possibly at the cost of
the gap of later columns. Therefore my guess is that what I proposed
initially is better. 



\section{Completion according to \ldots}
 \cite{CoaLew61}
\begin{figure*}[t]
\begin{center}
\scalebox{1.0}
{
\begin{tikzpicture}[description/.style={fill=white,inner sep=2pt}]
\tikzstyle{treenode} = [anchor=west, minimum width=1.5cm]
\def\rectDist{0.05}

\def\nodesep{0.8}
\def\innersep{10}

\def\yoff{6}

%column indicator
\node[anchor=west] at (1, 4 * \nodesep+\yoff)  {column $k+1$};
\node[anchor=west] at (4, 4 * \nodesep+\yoff)  {column $k+2$};

%M HALf
%first column
\node[treenode] (MUsb) at (1, \nodesep+\yoff) {$(s,\qquad b]$};
\node[treenode] (MU) [above of=MUsb, node distance=\innersep] {$\MU.$};
\draw ($(MU.north west)-(\rectDist,-\rectDist)$) rectangle ($(MUsb.south east)+(\rectDist, -\rectDist)$);

\node[treenode] (ML) at (1, -\nodesep+\yoff)  {$\ML.$};
\node[treenode] (MLsb) [below of=ML, node distance=\innersep] {$(s,\qquad b]$};
\draw ($(ML.north west)-(\rectDist,-\rectDist)$) rectangle ($(MLsb.south east)+(\rectDist, -\rectDist)$);

%The sigma distance indicator
\path[dashed] 
(MUsb.north west) edge ($(MUsb.north west)-(1.6,0)$)
(ML.south west) edge ($(ML.south west)-(1.6,0)$);
\path[<->]
($(MUsb.north west)-(1,0)$) edge node[description] {$\sigma$} ($(ML.south west)-(1,0)$);

%the diagonal
\path[dashed]
(MUsb.south west) edge node[description] {$\ldots<\sigma$} (ML.north east);

%second column
\node[treenode] (MUUsb) at (4, 2 * \nodesep+\yoff) {$(s,\qquad b]$};
\node[treenode] (MUU) [above of=MUUsb, node distance=\innersep] {$\MU.\uc.$};
\draw ($(MUU.north west)-(\rectDist,-\rectDist)$) rectangle ($(MUUsb.south east)+(\rectDist, -\rectDist)$);

\node[treenode] (MUL) at (4, 1 * \nodesep+\yoff) {$\MU.\lc.$};
\node[treenode] (MULsb) [below of=MUL, node distance=\innersep] {$(s,\qquad b]$};
\draw ($(MUL.north west)-(\rectDist,-\rectDist)$) rectangle ($(MULsb.south east)+(\rectDist, -\rectDist)$);

\node[treenode] (MLUsb) at (4, - 1 * \nodesep+\yoff) {$(s,\qquad b]$};
\node[treenode] (MLU) [above of=MLUsb, node distance=\innersep] {$\ML.\uc.$};
\draw ($(MLU.north west)-(\rectDist,-\rectDist)$) rectangle ($(MLUsb.south east)+(\rectDist, -\rectDist)$);

\node[treenode] (MLL) at (4, - 2 * \nodesep+\yoff) {$\ML.\lc.$};
\node[treenode] (MLLsb) [below of=MLL, node distance=\innersep] {$(s,\qquad b]$};
\draw ($(MLL.north west)-(\rectDist,-\rectDist)$) rectangle ($(MLLsb.south east)+(\rectDist, -\rectDist)$);

%edges
\draw[thick] (MUsb.north east)--(MUUsb.north west);
\draw[thick] (MU.south east)--(MUL.south west);
\draw[thick] (MLsb.north east)--(MLUsb.north west);
\draw[thick] (ML.south east)--(MLL.south west);


%The constraints
\node (MsigmaU) at (7, 1 * \nodesep+\yoff) {$\ldots<\sigma<\ldots$};
\path[-] 
(MsigmaU.west) edge (MLUsb.north east)
              edge (MUU.south east);

\node (MsigmaL) at (7, -1 * \nodesep+\yoff) {$\ldots<\sigma<\ldots$};
\path[-] 
(MsigmaL.west) edge (MUL.south east)
              edge (MLLsb.north east);

\node (MsigmaMinus) at (8, 0+\yoff) {$\ldots<\sigma-a_{k+2}<\ldots$};
\path[-] 
(MsigmaMinus.west) edge[bend left=20] (MULsb.north east)
                  edge[bend right=20] (MLU.south east);

\node (MsigmaPlus) at (12, 0+\yoff) {$\ldots<\sigma+a_{k+2}<\ldots$};
\path[-] 
(MsigmaPlus.west) edge[bend right=20] (MUUsb.north east)
                 edge[bend left=20] (MLL.south east);


\node[rectangle, rounded corners, draw=black, dashed] at (5, 0.5 * \yoff)
{$\updownarrow$ ``M'' part might be below ``N'' part $\updownarrow$};

%N HALf
\def\yoff{0}

%first column
\node[treenode] (NUsb) at (1, \nodesep+\yoff) {$(s,\qquad b]$};
\node[treenode] (NU) [above of=NUsb, node distance=\innersep] {$\NU.$};
\draw ($(NU.north west)-(\rectDist,-\rectDist)$) rectangle ($(NUsb.south east)+(\rectDist, -\rectDist)$);

\node[treenode] (NL) at (1, -\nodesep+\yoff)  {$\NL.$};
\node[treenode] (NLsb) [below of=NL, node distance=\innersep] {$(s,\qquad b]$};
\draw ($(NL.north west)-(\rectDist,-\rectDist)$) rectangle ($(NLsb.south east)+(\rectDist, -\rectDist)$);

%The sigma distance indicator
\path[dashed] 
(NUsb.north west) edge ($(NUsb.north west)-(1.6,0)$)
(NL.south west) edge ($(NL.south west)-(1.6,0)$);
\path[<->]
($(NUsb.north west)-(1,0)$) edge node[description] {$\sigma$} ($(NL.south west)-(1,0)$);

%the diagonal
\path[dashed]
(NUsb.south east) edge node[description] {$\sigma<\ldots$} (NL.north west);

%second column
\node[treenode] (NUUsb) at (4, 2 * \nodesep+\yoff) {$(s,\qquad b]$};
\node[treenode] (NUU) [above of=NUUsb, node distance=\innersep] {$\NU.\uc.$};
\draw ($(NUU.north west)-(\rectDist,-\rectDist)$) rectangle ($(NUUsb.south east)+(\rectDist, -\rectDist)$);

\node[treenode] (NUL) at (4, 1 * \nodesep+\yoff) {$\NU.\lc.$};
\node[treenode] (NULsb) [below of=NUL, node distance=\innersep] {$(s,\qquad b]$};
\draw ($(NUL.north west)-(\rectDist,-\rectDist)$) rectangle ($(NULsb.south east)+(\rectDist, -\rectDist)$);

\node[treenode] (NLUsb) at (4, - 1 * \nodesep+\yoff) {$(s,\qquad b]$};
\node[treenode] (NLU) [above of=NLUsb, node distance=\innersep] {$\NL.\uc.$};
\draw ($(NLU.north west)-(\rectDist,-\rectDist)$) rectangle ($(NLUsb.south east)+(\rectDist, -\rectDist)$);

\node[treenode] (NLL) at (4, - 2 * \nodesep+\yoff) {$\NL.\lc.$};
\node[treenode] (NLLsb) [below of=NLL, node distance=\innersep] {$(s,\qquad b]$};
\draw ($(NLL.north west)-(\rectDist,-\rectDist)$) rectangle ($(NLLsb.south east)+(\rectDist, -\rectDist)$);

%edges
\draw[thick] (NUsb.north east)--(NUUsb.north west);
\draw[thick] (NU.south east)--(NUL.south west);
\draw[thick] (NLsb.north east)--(NLUsb.north west);
\draw[thick] (NL.south east)--(NLL.south west);


%The constraints
\node (NsigmaU) at (7, 1 * \nodesep+\yoff) {$\ldots<\sigma<\ldots$};
\path[-] 
(NsigmaU.west) edge (NLUsb.north east)
              edge (NUU.south east);

\node (NsigmaL) at (7, -1 * \nodesep+\yoff) {$\ldots<\sigma<\ldots$};
\path[-] 
(NsigmaL.west) edge (NUL.south east)
              edge (NLLsb.north east);

\node (NsigmaMinus) at (8, 0+\yoff) {$\ldots<\sigma-a_{k+2}<\ldots$};
\path[-] 
(NsigmaMinus.west) edge[bend left=20] (NULsb.north east)
                  edge[bend right=20] (NLU.south east);

\node (NsigmaPlus) at (12, 0+\yoff) {$\ldots<\sigma+a_{k+2}<\ldots$};
\path[-] 
(NsigmaPlus.west) edge[bend right=20] (NUUsb.north east)
                 edge[bend left=20] (NLL.south east);
\end{tikzpicture}
}
\end{center}
\caption{Illustration of the repair procedure}
\label{repair-illus}
\end{figure*}


Consider Table \ref{op-cont-thresh-table}. We consider each interval
in the table to be a \emph{node}. With each node $N$, the following
information must be stored:
\begin{itemize}
\item 
Its upper and lower children $N.\uc$ and $N.\lc$;
\item 
its minimum threshold (lower bound) $N.s$ and maximum threshold (upper
bound) $N.b$; of course, these are undefined if they have not been
computed yet.
\end{itemize}

We adopt the convention that columns in the table are counted starting
from $0$. 

The completion of the basic algorithm is based on a generalisation of 
the constraints of the form given in (\ref{maxmin-diff-eq}) in Theorem
\ref{maxmin-diff-eq}. There, two sibling nodes impose a constraint on 
$a_{k+1}$. The generalisation is in recognising that any two nodes in
the same column, not just sibling nodes, impose a constraint, but not
always on just one coefficient, but rather on a coefficient
combination, e.g. $a_2-a_3+a_5+a_7$. In the repair procedure we are
about to define, one parameter is such a combination, denoted by
$\sigma$. However, this parameter is only there to help the understanding
(and possibly the proof of correctness if ever we feel like providing
one). The repair procedure will derive constraints such as 
$1-a_5<\sigma<-1$ from which $a_5>2$ can be concluded, without actually
knowing $\sigma$.\footnote{At least it was like this for all examples I
  looked at. At some point, I thought we would need the path 
leading $N.\pat{1}$ to that node starting from the root, represented as a
symbolic sum of coefficients. For example, the node reached by moving
up--up--down--up would be represented by $a_1+a_2+a_4$, the node
reached by moving up--up--down--up--down--down would also be
represented by $a_1+a_2+a_4$.}

We also presume that there is a class ``coefficient'' with attributes
``actually chosen value'', ``lower bound'' and ``upper bound'',
because for the sake of backtracking we should keep track not just of
the value we actually chose for a coefficient but also of the other
values we could have chosen. In  the pseudocode, we simply write $a_i$
for the actual value and $a_i.s$ and $a_i.b$ for the lower and upper
bound, respectively. Note that $a_i.s<a_i<a_i.b$ must hold. 


We recall how the basic algorithm works, and we count
in analogy to Theorem \ref{ak-completion-criterion-thm}: there is a
running index $k$ which starts at $m-1$ and counts back to $0$. 
In each step, the algorithm compares nodes in column $k+1$ to compute
$a_{k+1}$.  

Suppose at some point $k\leq m-2$, we are unable
to choose coefficient $a_{k+1}$ in the way suggested by Theorem
\ref{ak-completion-criterion-thm}.\footnote{Note that at the very
  beginning, $k=m-1$, it is not yet possible to have a conflict.} This means that
$\max_{\phi'\in\Phi_k} (\mint_{\phi'0}-\maxt_{\phi'1}) \geq \min_{\phi'\in\Phi_k}(\maxt_{\phi'0}-\mint_{\phi'1})$. 
We refer to the $\phi'$ such that $\max_{\phi'\in\Phi_k}
(\mint_{\phi'0}-\maxt_{\phi'1})$ is obtained as the node $M$ and to the node
such that $\min_{\phi'\in\Phi_k} (\maxt_{\phi'0}-\mint_{\phi'1})$ is obtained as $N$. 
We denote the upper/lower child of these nodes as 
$\MU$/$\ML$ and $\NU$/$\NL$. We thus have four nodes, two sibling pairs to
be precise, that are responsible for the fact that the algorithm
cannot proceed. This is the starting point of the repair procedure. 


Before we proceed to explain the repair procedure, it should be noted
that it implements a kind of backtracking in the basic algorithm: the
repair procedure will return as result a new $k$ to jump to, bigger
than the previous $k$, telling us that $a_{k+1}$ had been badly
chosen. A return value of $m$ means that repair is impossible; the
function is not a threshold function.  The repair procedure will have
put a computed lower or upper bound in $a_{k+1}.s$ or $a_{k+1}.b$. In
the new choice of $a_{k+1}$, this bound must be respected.


\begin{figure*}[t]
\centering
  
\begin{tabular}{l}
procedure repair(Node $\MU$, $\ML$, $\NU$, $\NL$, Nat $k$, Comb $\sigma$)\\
\ if $k=m$ then return $m$ (*repair impossible*)\\
\ $P := \{ (\MU.\uc,\ML.\uc),$\\ 
\hspace{2em} $(\MU.\lc,\ML.\lc), (\NU.\uc,\NL.\uc), (\NU.\lc,\NL.\lc)\}$\\
\ pick $(\MU',\ML')\in P$ with $l := \MU'.s-\ML'.b$ max.\\
\ pick $(\NU',\NL')\in P$ with $u := \NU'.b-\NL'.s$ min.\\
\ if $\lnot\;l<u$ (*conflict on $\sigma$*)\\
\ \ return $\repair(\MU', \ML', \NU', \NL', k+1, \sigma)$\\
\ $P := \{ (\MU.\uc,\ML.\lc), (\NU.\uc,\NL.\lc)\}$\\
\ pick $(\MU',\ML')\in P$ with $l := \MU'.s-\ML'.b$ max.\\
\ pick $(\NU',\NL')\in P$ with $u := \NU'.b-\NL'.s$ min.\\
\ if $\lnot\;l<u$ (*conflict on $\sigma+a_{k+2}$*)\\
\ \ return $\repair(\MU', \ML', \NU', \NL', k+1, \sigma+a_{k+2})$\\
\ $P := \{ (\MU.\lc,\ML.\uc), (\NU.\lc,\NL.\uc)\}$\\
\ pick $(\MU',\ML')\in P$ with $l := \MU'.s-\ML'.b$ max.\\
\ pick $(\NU',\NL')\in P$ with $u := \NU'.b-\NL'.s$ min.\\
\ if $\lnot\;l<u$ (*conflict on $\sigma-a_{k+2}$*)\\
\ \ return $\repair(\MU', \ML', \NU', \NL', k+1, \sigma-a_{k+2})$\\
\end{tabular}
\vrule
\begin{tabular}{l}
\ (*got this far = no conflicts at this level*)\\
\ (*$a_{k+2}$ is the culprit*)\\
\ if $\MU.\uc.s > \MU.\lc.s + a_{k+2}$\\ 
\ \ $\MUs := \MU.\uc.s$ else\\ 
\ \ $\MUs := \MU.\lc.s + A$\\  
\ if $\ML.\uc.b < \ML.\lc.b + a_{k+2}$\\ 
\ \ $\MLb := \ML.\uc.b$ else\\ 
\ \ $\MLb := \ML.\lc.b + A$\\  
\ if $\NU.\uc.b < \NU.\lc.b + a_{k+2}$\\ 
\ \ $\NUb := \NU.\uc.b$ else\\ 
\ \ $\NUb := \NU.\lc.b + A$\\  
\ if $\NL.\uc.s > \NL.\lc.s + a_{k+2}$\\ 
\ \ $\NLs := \NL.\uc.s$ else\\ 
\ \ $\NLs := \NL.\lc.s + A$\\  
\ resolve $\MUs -\MLb < \NUb - \NLs$ by $A$\\
\ if lower bound is obtained then set $a_{k+2}.s$ to it\\
\ if upper bound is obtained then set $a_{k+2}.b$ to it\\
\ return $k+1$
\end{tabular}
\caption{The repair procedure}
\label{fig:repair}
\end{figure*}


The idea of the repair procedure is to consider the children of the
four nodes, so that we now have eight nodes. The first group of four
nodes gives rise to four comparisons between nodes that go beyond the
comparisons done by the basic algorithm; likewise the second group.
Each comparison imposes a constraint on a certain combination of
coefficients, where there are three different combinations involved.
This is illustrated in Figure \ref{repair-illus}.

In the first half of the code, it is checked if the eight nodes impose
any conflict on any of the three coefficient combinations involved. If
yes, the procedure is called recursively shifted one column to the
right, with four out of the eight nodes picked, the ones that create
this newly discovered conflict. Note that the first discovered
conflict gives rise to the recursive call. Any further possible
conflicts are ignored at this point.

In the second half, if it has been found that the eight nodes impose no
conflict at all, while the four father nodes do, it means that the
choice of $a_{k+2}$ was bad.\footnote{\label{repair2-foot}This point will be refined in
  Subsec.~\ref{repair2-subsec}.} 
 We then essentially undo our commitment
of $a_{k+2}$, express the bounds of the four nodes (which made
choosing $a_{k+1}$ impossible) with $a_{k+2}$ being a symbolic
variable and thereby compute a constraint on $a_{k+2}$ which was
unknown before. 

In the pseudocode, the variables
$\MUs$, $\MLb$, $\NUb$, $\NLs$ have the type 
``arithmetic expression involving symbolic variable $A$''. 
This allows us to talk in a concise way about routine arithmetic
transformations which, if spelt out, would all but obfuscate the code.
Note that in $\MUs -\MLb < \NUb - \NLs$, each expression may be a
plain number or a number plus $A$, and so depending on how the
occurrences of $A$ cancel out, simplification by addition and
subtraction may yield $A<\ldots$, $A>\ldots$, $2\cdot A<\ldots$, or $2\cdot A>\ldots$, and so we end up
with $A<\ldots$ or $A>\ldots$. 

The first call is  
$\repair(\MU, \ML, \NU, \NL, k, a_{k+1})$. 

The pseudocode is shown in Fig.~\ref{fig:repair}.  



After the repair procedure has returned, the basic algorithm must set
$k$ to the return value of the procedure, i.e., jump to the according
column and undo any $a_i.s$ and $a_i.b$ for $i\geq k$. 

Some final remarks: 

\begin{itemize}
\item 
Integrality issues are completely ignored
here. Coates and Lewis, in fact, do not restrict to integer
coefficients. We can easily do so by doubling coefficients where
necessary. 
\item
Our optimisation of node sharing is also ignored here. However, I do
not think it is a big deal for the computation. Our datastructure is
built so that we always find any node we look for. The fact that there
are several ways to get there or that some node are in fact shared in
the datastructure does not affect us.
\item
Similarly, we ignore the issue that final nodes may occur in columns
other than the last column. Conceptually, a $\false$ node has two 
$\false$ children, and a $\true$ node has two $\true$ children. 
\item
Moreover, we assume as always that coefficients are
non-increasing. E.g., if the repair procedure computes a new bound
$a_5>2$ and we choose $a_5=3$, the we will by all means choose $a_4\geq3$
even if the constraints of the basic algorithm do not enforce this. 
%DOUBT
\end{itemize}


\subsection{Further Refinement}
\label{repair2-subsec}
Recall the point mentioned in footnote \ref{repair2-foot}. 
In an example sent by CS on 14.1.13 (\texttt{fail5}), the repair tree
eventually looks as shown in Fig.~\ref{fail5-repair-fig}.
In fact, this is the initial (non-recursive) call of the second time repair is
called. 

\begin{figure*}[t]
\begin{center}
\scalebox{0.9}
{
\begin{tikzpicture}[description/.style={fill=white,inner sep=2pt}]
\tikzstyle{treenode} = [anchor=west, minimum width=1.5cm]
\def\rectDist{0.05}

\def\nodesep{0.8}
\def\innersep{10}

\def\yoff{5}

%column indicator
\node[anchor=west] at (1, 4 * \nodesep+\yoff)  {column $k+1$};
\node[anchor=west] at (4, 4 * \nodesep+\yoff)  {column $k+2$};

%N HALf
%first column
\node[treenode] (NUsb) at (1, \nodesep+\yoff) {$(15.5, 16]$};
\node[treenode] (NU) [above of=NUsb, node distance=\innersep] {$\NU.$};
\draw ($(NU.north west)-(\rectDist,-\rectDist)$) rectangle ($(NUsb.south east)+(\rectDist, -\rectDist)$);

\node[treenode] (NL) at (1, -\nodesep+\yoff)  {$\NL.$};
\node[treenode] (NLsb) [below of=NL, node distance=\innersep] {$(9.5,\quad 10]$};
\draw ($(NL.north west)-(\rectDist,-\rectDist)$) rectangle ($(NLsb.south east)+(\rectDist, -\rectDist)$);

%The sigma distance indicator
\path[dashed] 
(NUsb.north west) edge ($(NUsb.north west)-(1.6,0)$)
(NL.south west) edge ($(NL.south west)-(1.6,0)$);
\path[<->]
($(NUsb.north west)-(1,0)$) edge node[description] {$a_2$} ($(NL.south west)-(1,0)$);

%the diagonal
\path[dashed]
(NUsb.south east) edge node[description] {$a_2<6.5$} (NL.north west);

%second column
\node[treenode] (NUUsb) at (4, 2 * \nodesep+\yoff) {$(13,\quad 16]$};
\node[treenode] (NUU) [above of=NUUsb, node distance=\innersep] {$\NU.\uc.$};
\draw ($(NUU.north west)-(\rectDist,-\rectDist)$) rectangle ($(NUUsb.south east)+(\rectDist, -\rectDist)$);

\node[treenode] (NUL) at (4, 1 * \nodesep+\yoff) {$\NU.\lc.$};
\node[treenode] (NULsb) [below of=NUL, node distance=\innersep] {$(8,\quad 10]$};
\draw ($(NUL.north west)-(\rectDist,-\rectDist)$) rectangle ($(NULsb.south east)+(\rectDist, -\rectDist)$);

\node[treenode] (NLUsb) at (4, - 1 * \nodesep+\yoff) {$(8,\quad 10]$};
\node[treenode] (NLU) [above of=NLUsb, node distance=\innersep] {$\NL.\uc.$};
\draw ($(NLU.north west)-(\rectDist,-\rectDist)$) rectangle ($(NLUsb.south east)+(\rectDist, -\rectDist)$);

\node[treenode] (NLL) at (4, - 2 * \nodesep+\yoff) {$\NL.\lc.$};
\node[treenode] (NLLsb) [below of=NLL, node distance=\innersep] {$(2,\quad 5]$};
\draw ($(NLL.north west)-(\rectDist,-\rectDist)$) rectangle ($(NLLsb.south east)+(\rectDist, -\rectDist)$);

%edges
\draw[thick] (NUsb.north east)--(NUUsb.north west);
\draw[thick] (NU.south east)--(NUL.south west);
\draw[thick] (NLsb.north east)--(NLUsb.north west);
\draw[thick] (NL.south east)--(NLL.south west);


%The constraints
\node (NsigmaU) at (7, 1 * \nodesep+\yoff) {$3<a_2<8$};
\path[-] 
(NsigmaU.west) edge (NLUsb.north east)
              edge (NUU.south east);

\node (NsigmaL) at (7, -1 * \nodesep+\yoff) {$3<a_2<8$};
\path[-] 
(NsigmaL.west) edge (NUL.south east)
              edge (NLLsb.north east);

\node (NsigmaMinus) at (8, 0+\yoff) {$-2<a_2-a_3<2$};
\path[-] 
(NsigmaMinus.west) edge[bend left=20] (NULsb.north east)
                  edge[bend right=20] (NLU.south east);

\node (NsigmaPlus) at (12, 0+\yoff) {$8<a_2+a_3<14$};
\path[-] 
(NsigmaPlus.west) edge[bend right=20] (NUUsb.north east)
                 edge[bend left=20] (NLL.south east);


%M HALf
\def\yoff{0}

%first column
\node[treenode] (MUsb) at (1, \nodesep+\yoff) {$(7,\quad 7.5]$};
\node[treenode] (MU) [above of=MUsb, node distance=\innersep] {$\MU.$};
\draw ($(MU.north west)-(\rectDist,-\rectDist)$) rectangle ($(MUsb.south east)+(\rectDist, -\rectDist)$);

\node[treenode] (ML) at (1, -\nodesep+\yoff)  {$\ML.$};
\node[treenode] (MLsb) [below of=ML, node distance=\innersep] {$(-\infty,\quad 0]$};
\draw ($(ML.north west)-(\rectDist,-\rectDist)$) rectangle ($(MLsb.south east)+(\rectDist, -\rectDist)$);

%The sigma distance indicator
\path[dashed] 
(MUsb.north west) edge ($(MUsb.north west)-(1.6,0)$)
(ML.south west) edge ($(ML.south west)-(1.6,0)$);
\path[<->]
($(MUsb.north west)-(1,0)$) edge node[description] {$a_2$} ($(ML.south west)-(1,0)$);

%the diagonal
\path[dashed]
(MUsb.south west) edge node[description] {$7<a_2$} (ML.north east);


%second column
\node[treenode] (MUUsb) at (4, 2 * \nodesep+\yoff) {$(7,\quad 8]$};
\node[treenode] (MUU) [above of=MUUsb, node distance=\innersep] {$\MU.\uc.$};
\draw ($(MUU.north west)-(\rectDist,-\rectDist)$) rectangle ($(MUUsb.south east)+(\rectDist, -\rectDist)$);

\node[treenode] (MUL) at (4, 1 * \nodesep+\yoff) {$\MU.\lc.$};
\node[treenode] (MULsb) [below of=MUL, node distance=\innersep] {$(-\infty,\quad 0]$};
\draw ($(MUL.north west)-(\rectDist,-\rectDist)$) rectangle ($(MULsb.south east)+(\rectDist, -\rectDist)$);

\node[treenode] (MLUsb) at (4, - 1 * \nodesep+\yoff) {$(-\infty,\quad 0]$};
\node[treenode] (MLU) [above of=MLUsb, node distance=\innersep] {$\ML.\uc.$};
\draw ($(MLU.north west)-(\rectDist,-\rectDist)$) rectangle ($(MLUsb.south east)+(\rectDist, -\rectDist)$);

\node[treenode] (MLL) at (4, - 2 * \nodesep+\yoff) {$\ML.\lc.$};
\node[treenode] (MLLsb) [below of=MLL, node distance=\innersep] {$(-\infty,\quad 0]$};
\draw ($(MLL.north west)-(\rectDist,-\rectDist)$) rectangle ($(MLLsb.south east)+(\rectDist, -\rectDist)$);

%edges
\draw[thick] (MUsb.north east)--(MUUsb.north west);
\draw[thick] (MU.south east)--(MUL.south west);
\draw[thick] (MLsb.north east)--(MLUsb.north west);
\draw[thick] (ML.south east)--(MLL.south west);


%The constraints
\node (MsigmaU) at (7, 1 * \nodesep+\yoff) {$7<a_2<\infty$};
\path[-] 
(MsigmaU.west) edge (MLUsb.north east)
              edge (MUU.south east);

\node (MsigmaL) at (7, -1 * \nodesep+\yoff) {$-\infty<a_2<\infty$};
\path[-] 
(MsigmaL.west) edge (MUL.south east)
              edge (MLLsb.north east);

\node (MsigmaMinus) at (8, 0+\yoff) {$-\infty<a_2-a_3<\infty$};
\path[-] 
(MsigmaMinus.west) edge[bend left=20] (MULsb.north east)
                  edge[bend right=20] (MLU.south east);

\node (MsigmaPlus) at (12, 0+\yoff) {$7<a_2+a_3<\infty$};
\path[-] 
(MsigmaPlus.west) edge[bend right=20] (MUUsb.north east)
                 edge[bend left=20] (MLL.south east);
\end{tikzpicture}
}
\end{center}
\caption{repair on \texttt{fail5}}
\label{fail5-repair-fig}
\end{figure*}

According to the pseudocode as shown in Fig.~\ref{fig:repair}, we are
in the situation that no conflict is visible on the 8-node column,
while in the column left to it, a conflict is present, and so the
choice of $a_3$ should be the culprit.

It is at this point that a generalisation of repair comes into
play. Note the constraint $a_2+a_3<14$ imposed by $\NUU$ and $\NLL$,
moreover the constraint $7<a_2$ imposed by $\MUU$ and $\MLU$, and
moreover the constraint, not shown in the figure (!!!), 
$7<a_3$ imposed by $\MUU$ and $\MUL$. Clearly the sum of the latter
two gives the constraint  $14<a_2+a_3$ which contradicts the first
constraint. Thus a closer look shows that there \emph{is} a conflict
in the 8-node column. 

Thus, instead of jumping out of repair, we must issue a 
``recursive'' call to a variant
of repair, called repair2, illustrated in
Fig.~\ref{repair2-illus}. Instead of just $\sigma$, we now also have $\rho$,
$\rho_2$. As before, these are tacit arguments, not needed for the
computation but helping the understanding. It holds that $\sigma=\rho+\rho_2$.  

\begin{figure*}[t]
\begin{center}
\scalebox{1.0}
{
\begin{tikzpicture}[description/.style={fill=white,inner sep=2pt}]
\tikzstyle{treenode} = [anchor=west, minimum width=1.5cm]
\def\rectDist{0.05}

\def\nodesep{0.8}
\def\innersep{10}

\def\yoff{12}

%column indicator
\node[anchor=west] at (1, 4 * \nodesep+\yoff)  {column $k+1$};
\node[anchor=west] at (4, 4 * \nodesep+\yoff)  {column $k+2$};

%M HALf
%first column
\node[treenode] (MUsb) at (1, \nodesep+\yoff) {$(s,\qquad b]$};
\node[treenode] (MU) [above of=MUsb, node distance=\innersep] {$\MU.$};
\draw ($(MU.north west)-(\rectDist,-\rectDist)$) rectangle ($(MUsb.south east)+(\rectDist, -\rectDist)$);

\node[treenode] (ML) at (1, -\nodesep+\yoff)  {$\ML.$};
\node[treenode] (MLsb) [below of=ML, node distance=\innersep] {$(s,\qquad b]$};
\draw ($(ML.north west)-(\rectDist,-\rectDist)$) rectangle ($(MLsb.south east)+(\rectDist, -\rectDist)$);

%The sigma distance indicator
\path[dashed] 
(MUsb.north west) edge ($(MUsb.north west)-(1.6,0)$)
(ML.south west) edge ($(ML.south west)-(1.6,0)$);
\path[<->]
($(MUsb.north west)-(1,0)$) edge node[description] {$\sigma$} ($(ML.south west)-(1,0)$);

%the diagonal
%\path[dashed]
%(MUsb.south west) edge node[description] {$\ldots<\sigma$} (ML.north east);

%second column
\node[treenode] (MUUsb) at (4, 2 * \nodesep+\yoff) {$(s,\qquad b]$};
\node[treenode] (MUU) [above of=MUUsb, node distance=\innersep] {$\MU.\uc.$};
\draw ($(MUU.north west)-(\rectDist,-\rectDist)$) rectangle ($(MUUsb.south east)+(\rectDist, -\rectDist)$);

\node[treenode] (MUL) at (4, 1 * \nodesep+\yoff) {$\MU.\lc.$};
\node[treenode] (MULsb) [below of=MUL, node distance=\innersep] {$(s,\qquad b]$};
\draw ($(MUL.north west)-(\rectDist,-\rectDist)$) rectangle ($(MULsb.south east)+(\rectDist, -\rectDist)$);

\node[treenode] (MLUsb) at (4, - 1 * \nodesep+\yoff) {$(s,\qquad b]$};
\node[treenode] (MLU) [above of=MLUsb, node distance=\innersep] {$\ML.\uc.$};
\draw ($(MLU.north west)-(\rectDist,-\rectDist)$) rectangle ($(MLUsb.south east)+(\rectDist, -\rectDist)$);

\node[treenode] (MLL) at (4, - 2 * \nodesep+\yoff) {$\ML.\lc.$};
\node[treenode] (MLLsb) [below of=MLL, node distance=\innersep] {$(s,\qquad b]$};
\draw ($(MLL.north west)-(\rectDist,-\rectDist)$) rectangle ($(MLLsb.south east)+(\rectDist, -\rectDist)$);

%edges
\draw[thick] (MUsb.north east)--(MUUsb.north west);
\draw[thick] (MU.south east)--(MUL.south west);
\draw[thick] (MLsb.north east)--(MLUsb.north west);
\draw[thick] (ML.south east)--(MLL.south west);


%The constraints
\node (MsigmaU) at (7, 1 * \nodesep+\yoff) {$\ldots<\sigma<\ldots$};
\path[-] 
(MsigmaU.west) edge (MLUsb.north east)
              edge (MUU.south east);

\node (MsigmaL) at (7, -1 * \nodesep+\yoff) {$\ldots<\sigma<\ldots$};
\path[-] 
(MsigmaL.west) edge (MUL.south east)
              edge (MLLsb.north east);

\node (MsigmaMinus) at (8, 0+\yoff) {$\ldots<\sigma-a_{k+2}<\ldots$};
\path[-] 
(MsigmaMinus.west) edge[bend left=20] (MULsb.north east)
                  edge[bend right=20] (MLU.south east);

\node (MsigmaPlus) at (12, 0+\yoff) {$\ldots<\sigma+a_{k+2}<\ldots$};
\path[-] 
(MsigmaPlus.west) edge[bend right=20] (MUUsb.north east)
                 edge[bend left=20] (MLL.south east);


%\node[rectangle, rounded corners, draw=black, dashed] at (5, 0.5 * \yoff)
%{$\updownarrow$ ``M'' part might be below ``N'' part $\updownarrow$};

%N HALf
\def\yoff{6}

%first column
\node[treenode] (NUsb) at (1, \nodesep+\yoff) {$(s,\qquad b]$};
\node[treenode] (NU) [above of=NUsb, node distance=\innersep] {$\NU.$};
\draw ($(NU.north west)-(\rectDist,-\rectDist)$) rectangle ($(NUsb.south east)+(\rectDist, -\rectDist)$);

\node[treenode] (NL) at (1, -\nodesep+\yoff)  {$\NL.$};
\node[treenode] (NLsb) [below of=NL, node distance=\innersep] {$(s,\qquad b]$};
\draw ($(NL.north west)-(\rectDist,-\rectDist)$) rectangle ($(NLsb.south east)+(\rectDist, -\rectDist)$);

%The sigma distance indicator
\path[dashed] 
(NUsb.north west) edge ($(NUsb.north west)-(1.6,0)$)
(NL.south west) edge ($(NL.south west)-(1.6,0)$);
\path[<->]
($(NUsb.north west)-(1,0)$) edge node[description] {$\rho$} ($(NL.south west)-(1,0)$);

%the diagonal
%\path[dashed]
%(NUsb.south east) edge node[description] {$\rho<\ldots$} (NL.north west);

%second column
\node[treenode] (NUUsb) at (4, 2 * \nodesep+\yoff) {$(s,\qquad b]$};
\node[treenode] (NUU) [above of=NUUsb, node distance=\innersep] {$\NU.\uc.$};
\draw ($(NUU.north west)-(\rectDist,-\rectDist)$) rectangle ($(NUUsb.south east)+(\rectDist, -\rectDist)$);

\node[treenode] (NUL) at (4, 1 * \nodesep+\yoff) {$\NU.\lc.$};
\node[treenode] (NULsb) [below of=NUL, node distance=\innersep] {$(s,\qquad b]$};
\draw ($(NUL.north west)-(\rectDist,-\rectDist)$) rectangle ($(NULsb.south east)+(\rectDist, -\rectDist)$);

\node[treenode] (NLUsb) at (4, - 1 * \nodesep+\yoff) {$(s,\qquad b]$};
\node[treenode] (NLU) [above of=NLUsb, node distance=\innersep] {$\NL.\uc.$};
\draw ($(NLU.north west)-(\rectDist,-\rectDist)$) rectangle ($(NLUsb.south east)+(\rectDist, -\rectDist)$);

\node[treenode] (NLL) at (4, - 2 * \nodesep+\yoff) {$\NL.\lc.$};
\node[treenode] (NLLsb) [below of=NLL, node distance=\innersep] {$(s,\qquad b]$};
\draw ($(NLL.north west)-(\rectDist,-\rectDist)$) rectangle ($(NLLsb.south east)+(\rectDist, -\rectDist)$);

%edges
\draw[thick] (NUsb.north east)--(NUUsb.north west);
\draw[thick] (NU.south east)--(NUL.south west);
\draw[thick] (NLsb.north east)--(NLUsb.north west);
\draw[thick] (NL.south east)--(NLL.south west);


%The constraints
\node (NsigmaU) at (7, 1 * \nodesep+\yoff) {$\ldots<\rho<\ldots$};
\path[-] 
(NsigmaU.west) edge (NLUsb.north east)
              edge (NUU.south east);

\node (NsigmaL) at (7, -1 * \nodesep+\yoff) {$\ldots<\rho<\ldots$};
\path[-] 
(NsigmaL.west) edge (NUL.south east)
              edge (NLLsb.north east);

\node (NsigmaMinus) at (8, 0+\yoff) {$\ldots<\rho-a_{k+2}<\ldots$};
\path[-] 
(NsigmaMinus.west) edge[bend left=20] (NULsb.north east)
                  edge[bend right=20] (NLU.south east);

\node (NsigmaPlus) at (12, 0+\yoff) {$\ldots<\rho+a_{k+2}<\ldots$};
\path[-] 
(NsigmaPlus.west) edge[bend right=20] (NUUsb.north east)
                 edge[bend left=20] (NLL.south east);

%N2 HALf
\def\yoff{0}

%first column
\node[treenode] (N2Usb) at (1, \nodesep+\yoff) {$(s,\qquad b]$};
\node[treenode] (N2U) [above of=N2Usb, node distance=\innersep] {$\NtwoU.$};
\draw ($(N2U.north west)-(\rectDist,-\rectDist)$) rectangle ($(N2Usb.south east)+(\rectDist, -\rectDist)$);

\node[treenode] (N2L) at (1, -\nodesep+\yoff)  {$\NtwoL.$};
\node[treenode] (N2Lsb) [below of=N2L, node distance=\innersep] {$(s,\qquad b]$};
\draw ($(N2L.north west)-(\rectDist,-\rectDist)$) rectangle ($(N2Lsb.south east)+(\rectDist, -\rectDist)$);

%The sigma distance indicator
\path[dashed] 
(N2Usb.north west) edge ($(N2Usb.north west)-(1.6,0)$)
(N2L.south west) edge ($(N2L.south west)-(1.6,0)$);
\path[<->]
($(N2Usb.north west)-(1,0)$) edge node[description] {$\rho_2$} ($(N2L.south west)-(1,0)$);

%the diagonal
%\path[dashed]
%(N2Usb.south east) edge node[description] {$\rho_2<\ldots$} (N2L.north west);

%second column
\node[treenode] (N2UUsb) at (4, 2 * \nodesep+\yoff) {$(s,\qquad b]$};
\node[treenode] (N2UU) [above of=N2UUsb, node distance=\innersep] {$\NtwoU.\uc.$};
\draw ($(N2UU.north west)-(\rectDist,-\rectDist)$) rectangle ($(N2UUsb.south east)+(\rectDist, -\rectDist)$);

\node[treenode] (N2UL) at (4, 1 * \nodesep+\yoff) {$\NtwoU.\lc.$};
\node[treenode] (N2ULsb) [below of=N2UL, node distance=\innersep] {$(s,\qquad b]$};
\draw ($(N2UL.north west)-(\rectDist,-\rectDist)$) rectangle ($(N2ULsb.south east)+(\rectDist, -\rectDist)$);

\node[treenode] (N2LUsb) at (4, - 1 * \nodesep+\yoff) {$(s,\qquad b]$};
\node[treenode] (N2LU) [above of=N2LUsb, node distance=\innersep] {$\NtwoL.\uc.$};
\draw ($(N2LU.north west)-(\rectDist,-\rectDist)$) rectangle ($(N2LUsb.south east)+(\rectDist, -\rectDist)$);

\node[treenode] (N2LL) at (4, - 2 * \nodesep+\yoff) {$\NtwoL.\lc.$};
\node[treenode] (N2LLsb) [below of=N2LL, node distance=\innersep] {$(s,\qquad b]$};
\draw ($(N2LL.north west)-(\rectDist,-\rectDist)$) rectangle ($(N2LLsb.south east)+(\rectDist, -\rectDist)$);

%edges
\draw[thick] (N2Usb.north east)--(N2UUsb.north west);
\draw[thick] (N2U.south east)--(N2UL.south west);
\draw[thick] (N2Lsb.north east)--(N2LUsb.north west);
\draw[thick] (N2L.south east)--(N2LL.south west);


%The constraints
\node (N2sigmaU) at (7, 1 * \nodesep+\yoff) {$\ldots<\rho_2<\ldots$};
\path[-] 
(N2sigmaU.west) edge (N2LUsb.north east)
              edge (N2UU.south east);

\node (N2sigmaL) at (7, -1 * \nodesep+\yoff) {$\ldots<\rho_2<\ldots$};
\path[-] 
(N2sigmaL.west) edge (N2UL.south east)
              edge (N2LLsb.north east);

\node (N2sigmaMinus) at (8, 0+\yoff) {$\ldots<\rho_2-a_{k+2}<\ldots$};
\path[-] 
(N2sigmaMinus.west) edge[bend left=20] (N2ULsb.north east)
                  edge[bend right=20] (N2LU.south east);

\node (N2sigmaPlus) at (12, 0+\yoff) {$\ldots<\rho_2+a_{k+2}<\ldots$};
\path[-] 
(N2sigmaPlus.west) edge[bend right=20] (N2UUsb.north east)
                 edge[bend left=20] (N2LL.south east);
\end{tikzpicture}
}
\end{center}
\caption{Illustration of the repair2 procedure}
\label{repair2-illus}
\end{figure*}

Observe that the constraint
$7<a_3$ imposed by $\MUU$ and $\MUL$ is actually one of the
constraints used by the usual \texttt{solve} procedure, i.e., without
any repair. It is therefore wasteful to recompute these, but that's
what I did. See also Subsec.~\ref{ak2-bound-subsec}. 

But before explaining repair2 in detail, we should explain the
additions we have to make to repair. This is shown in
Fig.~\ref{fig:repair:enhanced}. Compared with Fig.~\ref{fig:repair},
additional constraints are derived for $\sigma+a_{k+2}$ by adding up
constraints for $\sigma$ and for $a_{k+2}$. 
\begin{itemize}
\item If the tightest bounds are ``direct'' as in
  Fig.~\ref{repair-illus}, then we call repair as before.
\item
If the tightest upper bound for
$\sigma+a_{k+2}$ originates from two constraints but the tightest lower
bound $\sigma+a_{k+2}$ is ``direct'', then we
will call repair2 which splits the ``N-pair'' into two pairs.
\item
If the tightest lower bound for
$\sigma+a_{k+2}$ originates from two constraints but the tightest upper
bound $\sigma+a_{k+2}$ is ``direct'', then we
will call repair2 which splits the ``M-pair'' into two pairs.

At this point we must explain the meaning of the letters M and N in
repair: it used to be the case that the  ``M-pair'' gives a lower
bound (as illustrated by the NW-SE diagonal in
Fig.~\ref{repair-illus}) and the  ``N-pair'' gives an upper
bound (as illustrated by the SW-NE diagonal in
Fig.~\ref{repair-illus}). Note that the distinction between the
``M-pair'' and the ``N-pair'' only matters in the ``non-recursive''
part of the code, i.e., the part that is executed only if no recursive
call has been issued before. Now, for repair2, things become more
complicated because we have ``stand-alone'' pair and two pairs which
are ``summed up'', and the former might be the lower bound and the
latter the upper bound, or vice versa. 

We will say that if we have a ``stand-alone N-pair'' which gives the
\label{goofy}
upper bound and the ``split-up M pairs'' which give the lower bound,
then this is ``goofy'' (the ``wrong way round'') and there will be a
special flag set to true.
\item If the tightest bounds are both ``indirect'' we call 
repair3. As of 21.1.13, repair3 is not implemented and it is not clear
whether it will ever be needed. It would be nice
if CS and FW could insert some big exception throwing at this point so
that we discover if repair3 is ever called. 
\end{itemize}


\begin{figure*}[t]
\centering
  
\begin{tabular}{l}
procedure repair(Node $\MU$, $\ML$, $\NU$, $\NL$, Nat $k$, Comb $\sigma$)\\
\ if $k=m$ then return $m$ (*repair impossible*)\\
\ $P := \{ (\MU.\uc,\ML.\uc),$\\ 
\hspace{2em} $(\MU.\lc,\ML.\lc), (\NU.\uc,\NL.\uc), (\NU.\lc,\NL.\lc)\}$\\
\ pick $(\MU',\ML')\in P$ with $l := \MU'.s-\ML'.b$ max.\\
\ pick $(\NU',\NL')\in P$ with $u := \NU'.b-\NL'.s$ min.\\
\ if $\lnot\;l<u$ (*conflict on $\sigma$*)\\
\ \ return $\repair(\MU', \ML', \NU', \NL', k+1, \sigma)$\\
\ $P := \{ (\MU.\uc,\ML.\lc), (\NU.\uc,\NL.\lc)\}$\\
\ pick $(\MUplus',\MLplus')\in P$ with $\lPlus := \MUplus'.s-\MLplus'.b$ max.\\
\ pick $(\NUplus',\NLplus')\in P$ with $\uPlus := \NUplus'.b-\NLplus'.s$ min.\\

\ $P := \{ (\MU.\uc,\MU.\lc),$\\ 
\hspace{2em} $(\ML.\uc,\ML.\lc), (\NU.\uc,\NU.\lc), (\NL.\uc,\NL.\lc)\}$\\
\ pick $(\MUak',\MLak')\in P$ with $\lak := \MUak'.s-\MLak'.b$ max.\\
\ pick $(\NUak',\NLak')\in P$ with $\uak := \NUak'.b-\NLak'.s$ min.\\

\ if $(l+\lak)>\lPlus$\\
\ \ l := $\lPlus$
\ \ need2Ms := true\\
\ if $(u+\uak)<\uPlus$\\
\ \ u := $\uPlus$
\ \ need2Ns := true\\
\ if $\lnot\;l<u$ (*conflict on $\sigma+a_{k+2}$*)\\
\ \ if need2Ms \& need2Ns\\
\ \ \ return $repair3(\MU', \ML', \MUak',\MLak', \NU', \NL', \NUak',\NLak', k+1, \sigma+a_{k+2})$\\
\ \ if need2Ms \& $\lnot$ need2Ns\\
\ \ \ return $repair2(\NUplus', \NLplus', \MU', \ML', \MUak',\MLak',k+1, \sigma+a_{k+2},true)$\\
\ \ if $\lnot$ need2Ms \& need2Ns\\
\ \ \ return $repair2(\MUplus', \MLplus', \NU', \NL', \NUak',\NLak',k+1, \sigma+a_{k+2},false)$\\
\ \ if $\lnot$ need2Ms \&  $\lnot$ need2Ns\\
\ \ return $\repair(\MU', \ML', \NU', \NL', k+1, \sigma+a_{k+2})$\\
\ $P := \{ (\MU.\lc,\ML.\uc), (\NU.\lc,\NL.\uc)\}$\\
\ pick $(\MUminus',\MLminus')\in P$ with $l := \MUminus'.s-\MLminus'.b$ max.\\
\ pick $(\NUminus',\NLminus')\in P$ with $u := \NUminus'.b-\NLminus'.s$ min.\\
\ if $\lnot\;l<u$ (*conflict on $\sigma-a_{k+2}$*)\\
\ \ return $\repair(\MUminus', \MLminus', \NUminus', \NLminus', k+1, \sigma-a_{k+2})$\\
\ (*non-recursive part as previously*)
\end{tabular}
\caption{The enhanced repair procedure}
\label{fig:repair:enhanced}
\end{figure*}

Now let us consider the actual repair2 procedure. 
What constraints can be read from
Fig.~\ref{repair2-illus}: 
\begin{itemize}
\item 
$\sigma$: 2 direct constraints from ``M'';
$2\cdot 2$ bounds by combining $\rho$-constraint from ``N'' and
$\rho_2$-constraint from ``N2'';
$1$ constraint by combining $\rho+a_{k+2}$-constraint from ``N'' with
$\rho_2-a_{k+2}$-constraint from ``N2'' (implemented later, see Subsec.~\ref{another-combination-subsec});
$1$ constraint by combining $\rho-a_{k+2}$-constraint from ``N'' with
$\rho_2+a_{k+2}$-constraint from ``N2'' (implemented later, see Subsec.~\ref{another-combination-subsec}).
\item
$\sigma+a_{k+2}$: 1 direct constraint from ``M'';
$2$ constraints by combining $\rho+a_{k+2}$-constraint from ``N'' with
either $\rho_2$-constraint from ``N2''; 
$2$ constraints by combining $\rho_2+a_{k+2}$-constraint from ``N2'' with
either $\rho$-constraint from ``N'' (as of 21.1.13, not implemented, see 
Subsec.~\ref{yet-another-combination-subsec}). 
\item
$\sigma-a_{k+2}$: 1 direct constraint from ``M'';
$2$ constraints by combining $\rho-a_{k+2}$-constraint from ``N'' with
either $\rho_2$-constraint from ``N2''; 
$2$ constraints by combining $\rho_2-a_{k+2}$-constraint from ``N2'' with
either $\rho$-constraint from ``N'' (as of 21.1.13, not implemented, see 
Subsec.~\ref{yet-another-combination-subsec}). 
\end{itemize}

We do not give the pseudocode for repair2 because we hope that after
these explanations, the code will be understandable. 

\subsection{\texttt{repair2} with $\sigma=\rho-\rho_2$}
\label{repair2-minus-subsec}

FW sent me an example on 18.1.13 on which repair2 fails; I call it
\texttt{repair2fails}. I calculated it by hand and found that more
cases need to be covered.  In this example,  the repair tree
eventually looks as shown in Fig.~\ref{repair2fails-fig}.
In fact, this is the initial (non-recursive) call of the second time repair is
called. 


\begin{figure*}[t]
\begin{center}
\scalebox{0.9}
{
\begin{tikzpicture}[description/.style={fill=white,inner sep=2pt}]
\tikzstyle{treenode} = [anchor=west, minimum width=1.5cm]
\def\rectDist{0.05}

\def\nodesep{0.8}
\def\innersep{10}

\def\yoff{5}

%column indicator
\node[anchor=west] at (1, 4 * \nodesep+\yoff)  {column $k+1$};
\node[anchor=west] at (4, 4 * \nodesep+\yoff)  {column $k+2$};

%N HALf
%first column
\node[treenode] (NUsb) at (1, \nodesep+\yoff) {$(9, 9.25]$};
\node[treenode] (NU) [above of=NUsb, node distance=\innersep] {$\NU.$};
\draw ($(NU.north west)-(\rectDist,-\rectDist)$) rectangle ($(NUsb.south east)+(\rectDist, -\rectDist)$);

\node[treenode] (NL) at (1, -\nodesep+\yoff)  {$\NL.$};
\node[treenode] (NLsb) [below of=NL, node distance=\innersep] {$(4.75,\quad 5]$};
\draw ($(NL.north west)-(\rectDist,-\rectDist)$) rectangle ($(NLsb.south east)+(\rectDist, -\rectDist)$);

%The sigma distance indicator
\path[dashed] 
(NUsb.north west) edge ($(NUsb.north west)-(1.6,0)$)
(NL.south west) edge ($(NL.south west)-(1.6,0)$);
\path[<->]
($(NUsb.north west)-(1,0)$) edge node[description] {$a_1$} ($(NL.south west)-(1,0)$);

%the diagonal

%second column
\node[treenode] (NUUsb) at (4, 2 * \nodesep+\yoff) {$(9,\quad \infty]$};
\node[treenode] (NUU) [above of=NUUsb, node distance=\innersep] {$\NU.\uc.$};
\draw ($(NUU.north west)-(\rectDist,-\rectDist)$) rectangle ($(NUUsb.south east)+(\rectDist, -\rectDist)$);

\node[treenode] (NUL) at (4, 1 * \nodesep+\yoff) {$\NU.\lc.$};
\node[treenode] (NULsb) [below of=NUL, node distance=\innersep] {$(5,\quad 5.5]$};
\draw ($(NUL.north west)-(\rectDist,-\rectDist)$) rectangle ($(NULsb.south east)+(\rectDist, -\rectDist)$);

\node[treenode] (NLUsb) at (4, - 1 * \nodesep+\yoff) {$(4,\quad 5]$};
\node[treenode] (NLU) [above of=NLUsb, node distance=\innersep] {$\NL.\uc.$};
\draw ($(NLU.north west)-(\rectDist,-\rectDist)$) rectangle ($(NLUsb.south east)+(\rectDist, -\rectDist)$);

\node[treenode] (NLL) at (4, - 2 * \nodesep+\yoff) {$\NL.\lc.$};
\node[treenode] (NLLsb) [below of=NLL, node distance=\innersep] {$(1,\quad 2.5]$};
\draw ($(NLL.north west)-(\rectDist,-\rectDist)$) rectangle ($(NLLsb.south east)+(\rectDist, -\rectDist)$);

%edges
\draw[thick] (NUsb.north east)--(NUUsb.north west);
\draw[thick] (NU.south east)--(NUL.south west);
\draw[thick] (NLsb.north east)--(NLUsb.north west);
\draw[thick] (NL.south east)--(NLL.south west);


%The constraints
\node (NsigmaU) at (7, 1 * \nodesep+\yoff) {$4<a_1<\infty$};
\path[-] 
(NsigmaU.west) edge (NLUsb.north east)
              edge (NUU.south east);

\node (NsigmaL) at (7, -1 * \nodesep+\yoff) {$2.5<a_1<4.5$};
\path[-] 
(NsigmaL.west) edge (NUL.south east)
              edge (NLLsb.north east);

\node (NsigmaMinus) at (8, 0+\yoff) {$0<a_1-a_2<1.5$};
\path[-] 
(NsigmaMinus.west) edge[bend left=20] (NULsb.north east)
                  edge[bend right=20] (NLU.south east);

\node (NsigmaPlus) at (12, 0+\yoff) {$6.5<a_1+a_2<\infty$};
\path[-] 
(NsigmaPlus.west) edge[bend right=20] (NUUsb.north east)
                 edge[bend left=20] (NLL.south east);


%M HALf
\def\yoff{0}

%first column
\node[treenode] (MUsb) at (1, \nodesep+\yoff) {$(4.75,\quad 5]$};
\node[treenode] (MU) [above of=MUsb, node distance=\innersep] {$\MU.$};
\draw ($(MU.north west)-(\rectDist,-\rectDist)$) rectangle ($(MUsb.south east)+(\rectDist, -\rectDist)$);

\node[treenode] (ML) at (1, -\nodesep+\yoff)  {$\ML.$};
\node[treenode] (MLsb) [below of=ML, node distance=\innersep] {$(-\infty,\quad 0]$};
\draw ($(ML.north west)-(\rectDist,-\rectDist)$) rectangle ($(MLsb.south east)+(\rectDist, -\rectDist)$);

%The sigma distance indicator
\path[dashed] 
(MUsb.north west) edge ($(MUsb.north west)-(1.6,0)$)
(ML.south west) edge ($(ML.south west)-(1.6,0)$);
\path[<->]
($(MUsb.north west)-(1,0)$) edge node[description] {$a_1$} ($(ML.south west)-(1,0)$);

%the diagonal
\path[dashed]
(MUsb.south west) edge node[description] {$4.75<a_1$} (ML.north east);

\path[dashed]
(NUsb.south east) edge node[description] {$a_1<4.5$} (NL.north west);

%second column
\node[treenode] (MUUsb) at (4, 2 * \nodesep+\yoff) {$(4,\quad 5]$};
\node[treenode] (MUU) [above of=MUUsb, node distance=\innersep] {$\MU.\uc.$};
\draw ($(MUU.north west)-(\rectDist,-\rectDist)$) rectangle ($(MUUsb.south east)+(\rectDist, -\rectDist)$);

\node[treenode] (MUL) at (4, 1 * \nodesep+\yoff) {$\MU.\lc.$};
\node[treenode] (MULsb) [below of=MUL, node distance=\innersep] {$(1,\quad 2.5]$};
\draw ($(MUL.north west)-(\rectDist,-\rectDist)$) rectangle ($(MULsb.south east)+(\rectDist, -\rectDist)$);

\node[treenode] (MLUsb) at (4, - 1 * \nodesep+\yoff) {$(-\infty,\quad 0]$};
\node[treenode] (MLU) [above of=MLUsb, node distance=\innersep] {$\ML.\uc.$};
\draw ($(MLU.north west)-(\rectDist,-\rectDist)$) rectangle ($(MLUsb.south east)+(\rectDist, -\rectDist)$);

\node[treenode] (MLL) at (4, - 2 * \nodesep+\yoff) {$\ML.\lc.$};
\node[treenode] (MLLsb) [below of=MLL, node distance=\innersep] {$(-\infty,\quad 0]$};
\draw ($(MLL.north west)-(\rectDist,-\rectDist)$) rectangle ($(MLLsb.south east)+(\rectDist, -\rectDist)$);

%edges
\draw[thick] (MUsb.north east)--(MUUsb.north west);
\draw[thick] (MU.south east)--(MUL.south west);
\draw[thick] (MLsb.north east)--(MLUsb.north west);
\draw[thick] (ML.south east)--(MLL.south west);


%The constraints
\node (MsigmaU) at (7, 1 * \nodesep+\yoff) {$4<a_1<\infty$};
\path[-] 
(MsigmaU.west) edge (MLUsb.north east)
              edge (MUU.south east);

\node (MsigmaL) at (7, -1 * \nodesep+\yoff) {$1<a_1<\infty$};
\path[-] 
(MsigmaL.west) edge (MUL.south east)
              edge (MLLsb.north east);

\node (MsigmaMinus) at (8, 0+\yoff) {$1<a_1-a_2<\infty$};
\path[-] 
(MsigmaMinus.west) edge[bend left=20] (MULsb.north east)
                  edge[bend right=20] (MLU.south east);

\node (MsigmaPlus) at (12, 0+\yoff) {$4<a_1+a_2<\infty$};
\path[-] 
(MsigmaPlus.west) edge[bend right=20] (MUUsb.north east)
                 edge[bend left=20] (MLL.south east);
\end{tikzpicture}
}
\end{center}
\caption{repair on \texttt{repair2fails}}
\label{repair2fails-fig}
\end{figure*}

We have a situation similar to Fig.~\ref{fail5-repair-fig}: 
no conflict is visible on the 8-node column,
while in the column left to it, a conflict is present, and so the
choice of $a_2$ should be the culprit.

Note the constraint $1<a_1-a_2$ imposed by $\MUL$ and $\MLU$,
moreover the constraint $a_1<4.5$ imposed by $\NUL$ and $\NLL$, and
moreover the constraint, not shown in the figure (!!!), 
$-a_2<-3.5$ imposed by $\NUU$ and $\NUL$ (after some multiplication by
-1). Clearly the sum of the latter
two gives the constraint  $a_1-a_2<1$ which contradicts the first
constraint. Thus a closer look shows that there \emph{is} a conflict
in the 8-node column. 

This calls for a further extension of repair2, which should be
straightforward to do if one understands repair2 so far, but it is
without doubt very fiddly and tedious. 

I introduced a parameter \texttt{minus} for \texttt{repair2} which is
true whenever $\rho_2$ mus be taken with a minus sign,
i.e. $\sigma=\rho-\rho_2$ rather than $\sigma=\rho+\rho_2$. This is indeed very
fiddly because lower bounds become upper bounds and vice versa. 

After this generalisation only 20 problems remained unsolved:
\texttt{computed\_LPBs\_7.txt}. 

\subsection{\texttt{monorepair} and Bounds on $a_{k+2}$}
\label{ak2-bound-subsec}
One of the unsolved problems was \texttt{repair2withminus\_fails.dnf},
see \texttt{repair2withminus\_fails.txt}. 
This was a very hard one: \texttt{repair} computed a bound for a
coefficient $a_i$ which contradicted a bound for $a_i$ that was
imposed by another node pair and which had been used in the usual
computation of \texttt{solve} (i.e., without any repair). In addition,
this latter node pair was in a part of the tree that was not at all
involved in the \texttt{repair} call. 

I was completely puzzled how I could ``repair'' this problem. I
attempted to do it by a procedure I called \texttt{monorepair} which
took the node pair and the bound it contradicted (a number) and tried
to choose some earlier coefficient to solve the conflict.
\texttt{monorepair} resembles  \texttt{repair} but is a lot simpler. 

Unfortunately, monorepair proved useless\footnote{3.4.13: See 
Subsec.~\ref{monorepair-revisited-subsec}.}: it seems that it aggravated
the unsolvability of constraints rather than resolving it. 
\texttt{computed\_LPBs\_7\_withmono.txt} = \texttt{computed\_LPBs\_7.txt}. 

Instead, I observed that the node pair unused by \texttt{repair} could
in fact be used quite naturally: in Subsec.~\ref{repair2-subsec} I
said that it is wasteful to recompute the ``usual'' solve-bounds
between neighbouring nodes. It is not only wasteful, it is also a
shame because there are only 4 (\texttt{repair}) or 6
(\texttt{repair2}) such neighbouring node pairs, while the original
tree may have much more. 

However, making use of this required some important changes to the
code because I have to remember for each column what the tightest
constraint on the coefficient are and which nodes are responsible for
it. I had to add to \texttt{SolverInformation.hpp}: 
\texttt{getComputedBound}, \texttt{setLowerComputedBound}, 
\texttt{setUpperComputedBound}, \texttt{getCulprit},
\texttt{setMinCulprit}, \texttt{setMaxCulprit}. 


\subsection{Guessing Default Values for Coefficients (14.3.13)}
The fact that the problem described in Subsec.~\ref{ak2-bound-subsec}
was so hard made me think about ways of guessing default values for
the coefficients. Some ideas:
\begin{itemize}
\item 
Every clause of length $m$ supports the hypothesis that every variable
in it has weight $\frac{d}{m}$. Now we should compute, for each
variable, the average hypothesis. 

\texttt{repair2withminus\_fails.dnf} strongly disproves this! Both
$a_7$ and $a_6$ would have a default value of $\frac{d}{3}$ while in
reality $a_6$ must be much bigger. 
\item Count $\frac{d}{m}$ for each clause, or take the square root of
  the number of clauses, for each length \ldots none of this works on
  \texttt{repair2withminus\_fails.dnf}. 
\end{itemize}




\subsection{Another Combination for \texttt{repair2}:
  $\sigma=(\rho\pm a_{k+2})\pm(\rho_2\pm a_{k+2})$ (15.3.13)}
\label{another-combination-subsec}
The file \texttt{computed\_LPBs\_7\_withStoreComputedBounds.txt}
represents a complete run on the 7-class which is however aborted at
some unsolved problem and therefore does not tell us how many unsolved
problems there were. 

At some point the problem \texttt{storeComputedBounds\_fails} was
unsolved. The problem was that at some level, \texttt{repair2} did not see
a conflict and thus tried to return a bound for some coefficient,
while in fact there still was a conflict. This is all not so shocking
since I have been aware that I do not consider all possible
combinations. 

The combinations in question concern \texttt{repair2}, 
so that $\sigma=\rho\pm\rho_2$ holds. In fact, the $\rho$-nodes
express bounds on $\rho\pm a_{k+2}$ and the $\rho_2$-nodes
express bounds on $\rho_2\pm a_{k+2}$ which can be used to infer bound on
$\sigma$ using the combinations: \\
$\sigma=(\rho+a_{k+2})+(\rho_2-a_{k+2})$ and $\sigma=(\rho-a_{k+2})+(\rho_2+a_{k+2})$ if
flag \texttt{minus} is false;\\
$\sigma=(\rho+a_{k+2})-(\rho_2+a_{k+2})$ and $\sigma=(\rho-a_{k+2})-(\rho_2-a_{k+2})$ if
flag \texttt{minus} is true. 

I implemented all those combinations. 
After this there were four problems left unsolved in
the 7-class: \texttt{computed\_LPBs\_7\_rho2Minus\_fails.txt}.


\subsection{Trouble with the Monotonicity Condition (15.3.13)}
At some point only 4 problems in the 7-class remained unsolved: 
\texttt{computed\_LPBs\_7\_rho2Minus\_fails.txt}. In particular, 
\texttt{rho2MinusFails} remained unsolved, and the mistake was due to
the combination of \texttt{repair}/\texttt{repair2} and the
monotonicity condition, which says that the coefficients must be
non-decreasing as one assigns them from right to left. What happened
was that \texttt{repair}/\texttt{repair2} computed an upper bound for 
a coefficient $a_i$ which was below the previously computed value of
$a_{i+1}$, making it impossible to fulfill the condition. 

My first attempt to remedy this was to simply lift the monotonicity
condition. This worked for \texttt{rho2MinusFails} but made previously
solved problems unsolvable so that in total there would be 21 unsolved
problems, see \texttt{computed\_LPBs\_7\_lift\_monotonicity.txt}. 

What I did then was the following: whenever
\texttt{repair}/\texttt{repair2} assigned an upper bound for a
coefficient $a_i$ but the values computed for 
$a_{i+1},\ldots,a_{i+k}$ are above this upper bound, I modify the output
produced by \texttt{repair}/\texttt{repair2} so that it enforces the 
upper bound on $a_{i+k}$ instead of on $a_i$. The variable for
counting $k$ is called \texttt{shift\_repair\_col}. 

I am not 100\% sure, but I think that I also had to correct a mistake
in \texttt{setUpperRepairBound} and \texttt{setLowerRepairBound}: they
did not take into account \texttt{repairFactors}. 

It is very bizarre that such a clear mistake should only manifest
itself so rarely. After correcting the mistake there were two
problems left unsolved in the 7-class: 
\texttt{computed\_LPBs\_7\_repairFactors.txt}

A very nice side effect of this change was that the solutions were
often smaller than previously: apparently, if
\texttt{repair}/\texttt{repair2} assigns an upper bound for a
coefficient $a_i$ but the values computed for $a_{i+1},\ldots,a_{i+k}$ are
above this upper bound, then this just means that the upper bound
should be on $a_{i+k}$ instead of on $a_i$, it is just not visible
yet. 

I was also hoping that this would reduce the runtime, but this is not
the case: the 7-class now takes 9 to 10 seconds, a few days ago it was
8 to 9. I suspect that the benefit occurs very rarely whereas the
extra overhead occurs much more often. 

\subsection{Wrong Arguments to \texttt{repair2}: Nodes for $\rho-\rho_2$ or
  $\rho+\rho_2$, not $\rho$ (16.3.13)}
The last-but-one unsolved problem in the 7-class was
\texttt{repairFactors\_fails} (see also
\texttt{computed\_LPBs\_7\_repairFactors.txt}). 

The problem here was that I
had assigned to \texttt{muPrimeRho2}, \texttt{mlPrimeRho2}, 
\texttt{nuPrimeRho2 }, \texttt{nlPrimeRho2} some nodes giving bounds
to $\rho+\rho_2$ or $\rho-\rho_2$, although the recursive call to \texttt{repair2}
should have actually used nodes talking about $\rho$. 

I introduced two flags \texttt{needRho2Plus}, \texttt{needRho2Minus}
in order to postpone the assignment to \texttt{muPrimeRho2}, \texttt{mlPrimeRho2}, 
\texttt{nuPrimeRho2 }, \texttt{nlPrimeRho2} of nodes giving bounds
to $\rho+\rho_2$ or $\rho-\rho_2$, until we are sure that \texttt{repair2} should
be called with these nodes as parameters. 

It is very bizarre that such a clear mistake should only manifest itself so rarely.
After correcting the mistake there was only one problem left unsolved
in the 7-class: \texttt{computed\_LPBs\_7\_repairNodeAssignment.txt}. 


\subsection{Wrong Arguments to \texttt{repair2}: M, N Confusion (17.3.13)}
The last unsolved problem in the 7-class was
\texttt{repairNodeAssignment\_fails} (see also  
\texttt{computed\_LPBs\_7\_repairNodeAssignment.txt}). 
The problem here was that I
had assigned to \texttt{muPrimeRho2}, \texttt{mlPrimeRho2} 
the nodes giving the tightest lower bound for $a_{\rho_2}$ and to 
\texttt{nuPrimeRho2 }, \texttt{nlPrimeRho2}, the nodes giving the 
tightest upper bound for $a_{\rho_2}$, disregarding the fact that if
\texttt{repair2} is called with the flag ``minus'' ($\sigma=\rho-\rho_2$,
see Subsec.~\ref{repair2-minus-subsec}), then the lower and upper
bound should be reversed. 

All this is due to the confusion about what the letters M and N stand
for, see Subsec.~\ref{goofy} on page \pageref{goofy}. I should think
hard about a clearer nomenclature and rename the variables
accordingly. 
\label{M-N-nomenclature} 

It is very bizarre that such a clear mistake should only manifest itself so rarely.
After correcting the mistake I got a result for the entire 7-class:
\texttt{all7solved.txt}. 

\subsection{Undo the repair Bounds (19.3.13)}
\label{undo-repair-subsec}
Working on bigger example \texttt{15\_1.dnf}, I became aware that the
repair procedures should undo all repair bounds to the left of the
current column. This was surprisingly easy to implement by modifying
\texttt{setLowerRepairBound} and \texttt{setUpperRepairBound}. 

This modification did not solve \texttt{15\_1.dnf}, but interestingly,
it changed the behaviour on the 7-class, although only on six 
examples where slightly different solutions were computed. See  
\texttt{all7solved\_repair\_bounds\_undone.txt}. 

%\subsection{Remaining Problems (19.3.13)}
%\label{remaining-problems-subsec}

\subsection{Division by 0 (20.3.13)}
The example \texttt{loops\_8} (8 variables) loops. The problem is the
following: in the second half ot the \texttt{repair} or
\texttt{repair2} procedure (see e.g. Fig.~\ref{fig:repair}), all the
occurrences of the symbolic $a_{k+2}$ cancel out, which codewise means
that a bound will be computed by a division of 0 by 0, shown as
``\verb!-nan!''.  \texttt{setLowerRepairBound} and
\texttt{setUpperRepairBound} used to have a loop prevention condition
saying that the treatment of a DNF should be aborted with an exception
if a repair bound is non tighter than a previously computed one, as
this seems to be a clear criterion that no progress has been
made. This condition did not work properly if a bound is
``\verb!-nan!''. I modified it accordingly. What is annoying in this
context is that apparently, \verb!coeff_neg_infinity!,
\verb!coeff_pos_infinity! cannot be used in comparisons with $<$, $>$,
requiring special treatment. 

I have tested that this causes no trouble for the 7-class.

My solution here just prevents the looping, but still no solution to
\texttt{loops\_8} is found. From previous experience I would say that
the cancelling out of the symbolic $a_{k+2}$ indicates that there is
still a conflict present in the current column, it is just not covered
by the current code. 

Later the same day I discovered on \texttt{still\_loops} another
variation of the problem: if the bound is computed by non-zero divided
by 0, then the result will be $\pm\infty$ in such a way that the bound can
never be fulfilled; again \texttt{setLowerRepairBound} and
\texttt{setUpperRepairBound} should throw an exception.
 
And again later another variation, on \texttt{still\_still\_loops}: \texttt{monorepair} is called
(which most likely means that repair was doing something strange
earlier) and results in bounds $\pm\infty$ in such a way that the bound is
trivial; again \texttt{setLowerRepairBound} and
\texttt{setUpperRepairBound} should throw an exception.

CS generated 248298 examples in the 8-class. The current version of
the code does not loop on any of those, and finds a solution for
248253, i.e., 99.982\%.


\subsection{Rounding the repair Bounds (22.3.13)}
\label{bound-rounding-subsec}
I looked again at \texttt{15\_1.dnf} (see
Subsec.~\ref{undo-repair-subsec}), which produced some chaotic results
starting with the problem that a repair bound computed is not integral
but ends in .5. I modified the return values of all repair variants so
that they are rounded up or down, as applicable. Amazingly, this made
the algorithm work for \texttt{15\_1.dnf}, result:
\[
393 x_0 + 393 x_1 + 374 x_2 + 355 x_3 + 337 x_4 + 318 x_5 + 300 x_6 + 262 x_7
+ 244 x_8 + 224 x_9 + 168 x_{10} + 112 x_{11} + 112 x_{12} + 72 x_{13}
+ 32 x_{14} \geq 1119
\]
Moreover, on the 8-class, there are now 6 more examples on which I
find a solution: 248259, i.e., 99.984\%.

But see Subsec.~\ref{rounding-revisited-subsec}. 

\subsection{Another Combination for \texttt{repair2}:
  $\sigma\pm a_{k+2}=\rho\pm(\rho_2\pm a_{k+2})$ (23.3.13)}
\label{yet-another-combination-subsec}
I extracted 18 examples that failed (\texttt{all\_8\_2\_fail.dnfs}) to
do some further debugging, which revealed that one example failed due
to the cases $\sigma\pm a_{k+2}=\rho\pm(\rho_2\pm a_{k+2})$ not implemented so far, see
Subsec.~\ref{repair2-subsec}. 

I implemented these cases. On the 8-class, there are now 19 more examples on which I
find a solution: 248278, i.e., 99.992\%. In detail, implementing 
 $\sigma-a_{k+2}=\rho\pm(\rho_2\pm a_{k+2})$ contributed 18 more successes while
 implementing  $\sigma+a_{k+2}=\rho\pm(\rho_2\pm a_{k+2})$ just contributed one etxra
 case! 


\subsection{Rounding Revisited (24.3.13)}\label{rounding-revisited-subsec}
I became aware that my rounding of repair bounds, contrary to what I
had intended, \emph{loosened} the bounds rather than tightening
them. But then thinking about it it became clear to me that loosening
or tightening are both bad! I now did no rounding in this place, but
modified \texttt{chooseCoefficient} (e.g.~a coefficient $>12.5$ will
be chosen as 13). 

Strangely, this made me lose exactly the one example that I had gained
in the previous subsection, but I still feel like the current solution
is conceptually more correct. 


\subsection{\texttt{repair} cannot work in constant time (27.3.13)}
The second example in \texttt{all\_8\_2\_fail.dnfs} is
\texttt{8\_2\_2.dnf}. I tried to understand why it did not
work. Checking this example by hand was indeed very tedious!

The nice thing about this example is that only \texttt{repair}, not
\texttt{repair2} is used, making it still relatively easy to analyse. 

After a first repair with four recursive calls, the bound $a_7>8$ is
imposed. Later on a conflict $18<a_3<17$ occurs leading to another
repair. This one makes one recursive call before ``resolving the
conflict'' by imposing a bound $16<a_5$. Some time later another call
to repair infers a contradicting bound $a_5<16$.

As it happened before, I suspected that in the 6th column where no
conflict was found, there was in fact a conflict, just not covered by
the cases we consider so far. Indeed, the column imposes a bound
$a_4+a_5<19$, and at the same time two bounds $14<a_5$ and
$14<a_4$. The bound $a_4+a_5<19$ is imposed by a pair of nodes that
are not at all involved in the current repair call. 

This calls for a generalisation of repair: Rather than only exploiting
the bounds arising from the arguments of repair, plus the precomputed
bounds arising from direct siblings (see
Subsec.~\ref{ak2-bound-subsec}), we could also use other pairs in the
column in question; at least, we could read all possible constraints
for $\sigma$, $\sigma+a_{k+2}$ and  $\sigma-a_{k+2}$ from the column. 

However, this generalisation sacrifices an important charm of repair,
namely that it is constant effort per column! Also, there is no limit
as to what kind of combinations must be considered. 

\label{mic-mac-stuff}
I still think that we should go for it, but before, we should do some
cleaning of the mic-mac-stuff, because there is so much repetition
there so maybe one can write some methods for this. In particular, so
far we need a method for ``find the lower sibling''; in the future, we
would need ``find the lower cousin''\ldots 


\subsection{Segmentation Fault with LP Algorithm (3.4.13)}
On \verb+rand22_1p0_0p1_10.dnfs+, a segmentation fault occurred with
the LP algorithm! It now works, although incredibly slowly (80s per
problem). In comparison: the combinatorial algorithm solves this suite
as well and takes 2.5 seconds! 

\subsection{\texttt{monorepair} Revisited (3.4.13)}
\label{monorepair-revisited-subsec}
I observed on \texttt{10\_loops.dnf} that \texttt{monorepair} causes a
loop: in fact, there is a sequence of three calls to
\texttt{monorepair}/\texttt{repair} that is repeated over and over
again, the only difference being that all bounds involved are
increased by one in each iteration of this loop. 

My rushed solution was to disable \texttt{monorepair} because I
thought it was useless anyhow (see Subsec.~\ref{ak2-bound-subsec}),
but I realised that \texttt{monorepair} was useful quite frequently,
e.g.~on \texttt{15\_needs\_monorepair.dnf}. 

\label{monorepair-hack}
My temporary solution, a terrible hack, was to introduce a counter
\verb+termination_hack+ which allows only 100 calls to
\texttt{monorepair} on the same problem. 

Of course, what we need are 2 simple examples one exhibiting the loop
and one exhibiting the usefulness of \texttt{monorepair}, so that
maybe we can reconcile. 

\subsection{Collection or not matters (24.4.13)}
CS writes: 
ich wollte gerade ein Beispiel extrahieren und nun noch mehr Probleme
als vorher. Kurioserweise l\"auft das Beispiel \texttt{cutTreeFail} durch, wenn ich
es nicht als Collection starte.

I tried to confirm this on 3.5.13 (version before CS fixed that bug) 
and it ran through without problems in both cases. 

I observed a similal strange problem, namely that 
\begin{verbatim}
  ./boolean-function-recognition --repair true --lpbConv combinatorial --cutTree true --dimacsM 28262 --dimacsC ../../../enumeration/7/all_7.cnfs
\end{verbatim}
fails on 4 problems and 
\begin{verbatim}
 ./boolean-function-recognition --repair true --lpbConv combinatorial --cutTree true --dimacsM ../../../enumeration/META7 --dimacsC ../../../enumeration/7/all_7.cnfs
\end{verbatim}
fails on just 1, i.e., how the dimacsM argument is passed matters,
which is a very strange mistake. 

I then ran both cases again after a repository update, so that I get
the version after CS fixed that bug. When I do that, I still do not
get any failures on \texttt{cutTreeFail}. For \verb+ll_7.cnfs+, I get
a segmentation fault after 9744 lines, which is before the problem
mentioned just above occurs. SO we cannot know whether CS fix worked
in all cases. 

For \texttt{cutTreeFail2}, CS also reports a segmentation fault. I can
confirm this. 

On 4.5.13, this seems to be fixed. See Subsec.~\ref{cutTree-revisted-subsec}


\subsection{Going up to $m=24$}
\label{m24-subsec}
I early May 2013 I decided to try for the IWOCA submission to go up to
$m=24$ instead of $m=22$.

For $m=22,23$, we removed
2 of the 60 benchmarks because they exhausted the memory resources
which caused various crashes 
for one of the two programs. Of course, since these were by far the
hardest examples, this manipulation distorts our results.

The according figures (extensions of the IWOCA version) are shown
below:

\begin{figure}[h]
  \centering
  \begin{tikzpicture}
\begin{semilogyaxis}[width=6cm,%xlabel=dimension,ylabel=Failure rate in \%,
ytick={0.01,0.1,1,10},
ymax=100]
\addplot[mark=*] coordinates {
(4,0.0001)
(5,0.0001)
(6,0.0001)
(7,0.0001)
(8,0.0141)
(9,0.01)
(10,0.05)
(11,0.167)
(12,0.533)
(13,0.767)
(14,0.7)
(15,2.4)
(16,1.333)
(17,3)
(18,2)
(19,4.33333)
(20,11.3333)
(21,13.3333)
(22,18.3333)
(23,5.08)
(24,13.79)
}; 
%\addlegendentry{Failure rate}
\end{semilogyaxis}
\end{tikzpicture}
  \caption{\label{failure:fig}Failure rate in \%}
\end{figure}


\begin{figure}[h]
  \centering
\begin{tikzpicture}
\begin{axis}[width=5.5cm,%xlabel=dimension,ylabel=\% requiring repair,
ymax=20]
%\addplot[mark=*] coordinates {
%(4,100)
%(5,100)
%(6,99.195)
%(7,91.565)
%(8,94.4)
%(9,99.1)
%(10,98.743)
%(11,97.5)
%(12,96.7)
%(13,94.067)
%(14,94.533)
%(15,89.067)
%(16,94)
%(17,84.6667)
%(18,88.3333)
%(19,90)
%(20,80.3333)
%(21,80)
%(22,73.3333)
%(23,80)
%}; 
%\addlegendentry{Success rate without need for repair}
\addplot[mark=x] coordinates {
(4,0)
(5,0)
(6,0.805)
(7,8.435)
(8,5.578)
(9,0.89)
(10,1.207)
(11,2.33)
(12,2.767)
(13,5.167)
(14,4.767)
(15,8.533)
(16,4.66667)
(17,12.3333)
(18,9.66667)
(19,5.66667)
(20,8.333333)
(21,6.66667)
(22,8.33333)
(23,15.25)
(24,3.45)
}; %\addlegendentry{Requiring repair}
\end{axis}
% \begin{axis}...\end{axis} for normal plots,
% \begin{semilogxaxis}...\end{semilogxaxis} for plots which have a normal y axis and a logarithmic x axis,
% \begin{semilogyaxis}...\end{semilogyaxis} the same with x and y switched,
% \begin{loglogaxis}...\end{loglogaxis} for double–logarithmic plots.
\end{tikzpicture}  
  \caption{\label{repair:fig}Success rate with repair}
\end{figure}


\begin{figure}[h]
  \centering
\begin{tikzpicture}
\begin{semilogyaxis}[legend
  style={at={(0.53,0.95)}},width=7cm,%xlabel=dimension,%
%ylabel={Runtime ms, \#.~var.~occurr.},
ymax=10000000]
\addplot[mark=triangle] coordinates {
(4,0.1)
(5,0.1)
(6,0.1)
(7,0.1)
(8,0.128)
(9,0.3)
(10,0.4)
(11,0.6)
(12,1.0)
(13,1.9)
(14,3.2)
(15,6.0)
(16,11.0)
(17,29.2)
(18,48.6)
(19,111.3)
(20,178.8)
(21,645.4)
(22,1528.9)
(23,2088)
(24,5468)
}; 
%\addlegendentry{Combin.~alg.}%No repair necessary
%\addplot[mark=x] coordinates {
%(4,0.1)
%(5,0.1)
%(6,0.1)
%(7,0.1)
%(8,0.2)
%(9,0.3)
%(10,0.5)
%(11,1.1)
%(12,1.1)
%(13,1.7)
%(14,3.4)
%(15,4.0)
%(16,3.5)
%(17,11.4)
%(18,27.9)
%(19,12.9)
%(20,98.4)
%}; \addlegendentry{Repair needed}
%	\addplot+[smooth,mark=*,mark size=1pt] file {input.dat}; \addlegendentry{file input}
\addplot[mark=star] coordinates {
(4,0.1)
(5,0.2)
(6,0.2)
(7,0.3)
(8,0.3)
(9,0.5)
(10,0.8)
(11,1.4)
(12,2.3)
(13,6.4)
(14,10.0)
(15,24.0)
(16,34.2)
(17,97.4)
(18,262.1)
(19,897.3)
(20,2178.2)
(21,12271.8)
(22,29291.5)
(23,30753.6)
(24,36990)
}; %\addlegendentry{LP alg.}
%\addplot[mark=o] coordinates {
%(4,3.29)
%(5,5.09)
%(6,8.13)
%(7,13.18)
%(8,20.85)
%(9,40.16)
%(10,59.81)
%(11,101.57)
%(12,148.5)
%(13,266.89)
%(14,387.68)
%(15,683.66)
%(16,1018.9)
%(17,2005.45)
%(18,2723.27)
%(19,4926.82)
%(20,6406.7)
%(21,17388.7)
%(22,22239.82)
%(23,36524.78)
%}; \addlegendentry{\# clauses per DNF}
\addplot[mark=o] coordinates {
(4,7.04)
(5,13.08)
(6,24.39)
(7,45.34)
(8,69.64)
(9,179.11)
(10,293.07)
(11,550.51)
(12,871.7)
(13,1710.76)
(14,2667.24)
(15,5052.25)
(16,8039.12)
(17,16885.9)
(18,24264.3)
(19,46262.8)
(20,63234.13)
(21,181538)
(22,243303)
(23,259795)
(24,292844)
}; 
%\addlegendentry{Input size}%No repair necessary
\legend{
Comb.~alg.,
LP alg.,
Input size
};
\end{semilogyaxis}
% \begin{axis}...\end{axis} for normal plots,
% \begin{semilogxaxis}...\end{semilogxaxis} for plots which have a normal y axis and a logarithmic x axis,
% \begin{semilogyaxis}...\end{semilogyaxis} the same with x and y switched,
% \begin{loglogaxis}...\end{loglogaxis} for double–logarithmic plots.
\end{tikzpicture}  
  \caption{\label{runtime:fig}Runtime}
\end{figure}

Due to the distortion, the curve for the runtime flattens at $m=23,
24$, and that even more the LP algorithm, and so we would really be
hurting ourselves by presenting distorted results that work against
us. 

\subsection{Segmentation Fault with cutTree (20.5.13)}
\label{cutTree-revisted-subsec}
The example \verb+all_8_2_cutTree_segFault.dnf+ gave a segmentation
fault with cutTree. This was due to an ommission in \verb+monorepair+
(analogously also in \verb+repair2+): for a non-final node that is
true or false, it may be that it only has one child, so that the other
one has to be generated artificially. 

It is now fixed. 


\subsection{cutTree Loses Solution}

Hallo,

ich habe ein sehr seltsames Ph\"anomen, das mit cutTree
zusammenh\"angt. Bei einer sehr gro\ss en Suite, die Ihr auf
\verb+all_8_1_cutTree_fails.dnfs+ findet,
ist es so, dass der Algo mit cutTree ein Beispiel weniger l\"osen kann
als ohne cutTree (siehe meine Interaktion unten). Sobald ich eine DNF
vom Anfang der Datei entferne, verschwindet dieser Unterschied. Es ist
aber NICHT so, dass die erste DNF diejenige ist, die er mit cutTree
nicht l\"osen kann und ohne schon. Deshalb kann ich das Beispiel auch
nicht isolieren. Irgendwas seltsames passiert, was mit dem
Collection-Zeugs zu tun hat. 

I have solved the problem by simply putting the first example in
\verb+8_1+ at the very end. It works. 

\subsection{Symmetry Test (22.7.13)}
The symmetry test used to be on the todo list: 

I (JGS) have a very clear
  idea of how this should work, let me see if I can communicate it as
  clearly: Suppose the input DNF $\phi$ has the same occurrence patterns
  for $x_1, x_2$. So far, the code simply trusts that $\phi$ is symmetric
  in $x_1, x_2$ and constructs the splitting tree. The second column
  consists of the nodes $\Cut{\phi}{\{x_1\}}{0}$ and
  $\Cut{\phi}{\{x_1\}}{1}$. The third column consists of three nodes: 
at the top,  $\Cut{\Cut{\phi}{\{x_1\}}{0}}{\{x_2\}}{0}$; at the bottom,
$\Cut{\Cut{\phi}{\{x_1\}}{1}}{\{x_2\}}{1}$. But at the middle, the current
code constructs $\Cut{\Cut{\phi}{\{x_1\}}{0}}{\{x_2\}}{1}$, trusting that it
is identical to $\Cut{\Cut{\phi}{\{x_1\}}{1}}{\{x_2\}}{0}$ thanks to symmetry
(or maybe it is exactly vice versa, I do not know). 

The modification that is needed is that both
\linebreak  
$\Cut{\Cut{\phi}{\{x_1\}}{0}}{\{x_2\}}{1}$ and 
$\Cut{\Cut{\phi}{\{x_1\}}{1}}{\{x_2\}}{0}$ are constructed explicitly and
checked for identity (depending on the datastructures, this identity
check may be nontrivial). If not, $\phi$ is not a threshold function.  

Around 22.7.2013, CS has implemented the test. One has a command line
option e.g.
\begin{verbatim}
 ./boolean-function-recognition --repair true 
  --lpbConv combinatorial --cutTree true 
  --symTest true -d tests/test_files/bizarre_output2.dnf
\end{verbatim}
so that we can easily check how expensive it is.

The first observation, not obvious at all, is that the test is really
necessary, i.e., there are examples where without the test, we wrongly
obtain a result. It is not so easy to create examples with two variables
that are not symmetric yet have the same occurrence patterns. 
Yet on 27.7.13 I did construct two examples. 

The first example I designed is \verb|4vars4clauses.dnf|. It looks as
follows:
$(x_1\land x_2)\lor(x_1 \land x_4) \lor
(x_2 \land x_3) \lor
(x_3 \land x_4)$. 
The
trick for designing this example is to start with the DNF for 
$x_1+x_2+x_3+x_4\geq 2$ and remove the two clauses $(x_1 \land x_3)$ and
$(x_2\land x_4)$. By doing so, the occurrence pattern for each variable is
$\ms{2,2}$; $x_1$ and $x_3$ are symmetric to each other; $x_2$ and
$x_4$ are symmetric to each other; but $x_1$ or $x_3$ are not
symmetric to  $x_2$ or $x_4$. 

Without the symmetry test, the program wrongly computes the result 
$x_1+x_2+x_3+x_4\geq 2$. 

Note that the example crucially depends on the variable numbering,
i.e., the order in which the variables are treated in the tree
construction (which is arbitrary a-priori since the variables all have
the same occurrence patterns). For a different ordering, even without
the symmetry test, no result will be computed, because some numerical
constraint won't be satisfiable 
(``It is not possible to choose a coefficient \ldots''). 
That is, it is quite subtle to
concoct an example so that the tree constructed without the symmetry
test (incorrectly) becomes a tree that corresponds to an LPB.  

The second example I designed is \verb|6vars10clauses.dnf|. This is an
example where the asymmetry arises between variables other than
$x_1,x_2$. The example was found by starting off with 
$5x_1 + 4x_2 + 3 x_3 + 3 x_4 + 2 x_5 + 2 x_6 \geq 8$, whose DNF is 
\[
\begin{array}{l}
(x_1 \land x_2)\lor
(x_1 \land x_3)\lor
(x_1 \land x_4)\lor
(x_1 \land x_5 \land x_6)\lor\\
(x_2 \land x_3 \land x_4)\lor
(x_2 \land x_3 \land x_5)\lor
(x_2 \land x_3 \land x_6)\lor\\
(x_2 \land x_4 \land x_5)\lor
(x_2 \land x_4 \land x_6)\lor
(x_2 \land x_5 \land x_6)\lor\\
(x_3 \land x_4 \land x_5)\lor
(x_3 \land x_4 \land x_6).
\end{array}
\] 
The interesting about this example is that $x_3,x_4,x_5,x_6$ are not
globally symmetric, but in the node obtained by going up then down we
have the sub-DNF $(x_3 \land x_4)\lor
(x_3 \land x_5)\lor
(x_3 \land x_6)\lor
(x_4 \land x_5)\lor
(x_4 \land x_6)\lor(x_5 \land x_6)$ in which $x_3,x_4,x_5,x_6$ are symmetric. 
What I do now is that I destroy this symmetry similarly as above by
removing the two clauses $(x_2 \land x_3 \land x_5)$ and 
$(x_2 \land x_4 \land x_6)$ from my input. Then, $\OP(x_1)=\ms{2,2,2,3}$,
$\OP(x_2)=\OP(x_3)=\OP(x_4)=\ms{2,3,3,3,3}$,
$\OP(x_5)=\OP(x_6)=\ms{3,3,3,3}$. Without symmetry test, this wrongly
gives the result 
$3 x_1 + 2 x_2 + 2 x_3 + 2 x_4 + 1 x_5 + 1 x_6 \geq 5$. 

However, I notice that this example does not turn out as intended. I
wanted the symmetry test to discover the asymetry between $x_3$ and
$x_5$ or $x_4$ and $x_6$, in a sub-DNF. But by removing $(x_2 \land x_3 \land x_5)$ and 
$(x_2 \land x_4 \land x_6)$, the occurrence pattern of $x_3$ and $x_4$
becomes the same s that of $x_2$, and now it's actually the asymmetry
between $x_2$ and $x_3$ that hits in (maybe the other one would have
been discovered later \ldots). Anyway we do have an example here where the
asymmetry is not between $x_0$ and $x_1$ and hence not at the topmost
level.  

Otherwise, I took a dozen of my bigger benchmarks and modified them by
arbitrarily cutting out one or several clauses. For most examples, the
outcome was a message ``It is not possible to choose a coefficient
\ldots'' or a segmentation fault, with or without symmetry test! The only
benchmark where the symmetry test mattered was \verb|20_14.dnf| where
the test discovered an asymmetry between columns 14 and 15 (variables
15 and 16), while without the test we got a segmentation fault. 

On 21.8.13, I repeated those tests. I now got quite a number of cases
where the symmetry test caught although there were also some examples
where I got the message  ``It is not possible \ldots''. 
The example \texttt{segmentation\_fault} is now ok and does not cause a
segmentation fault. 
The example \texttt{bizarre\_output} produces some bizarre output
without symmetry test but with symmetry test it is correctly
discovered that there are asymmetric variables. 

The only example that gives a segmentation fault on 21.8.13 is
\verb+wild8vars1.dnf+, and this is due to the fact that the number of
variables is 10 while only 8 are declared in the first line. 

The next question is how expensive the test is. I tested many of the
bigger benchmarks and it looks like the test makes the overall program
run 15 to 30\% slower. It would be useful to make the test more
efficient but we cannot do without the test. Doing detailed
benchmarking on how expensive the test is on what examples seems a
waste of time to me. 

However, I wanted to know how expensive the symmetry test can become
at the extreme, namely when the DNF contains the maximal number of
symmetries, and this is the case for cardinality constraints. I
generated the cardinality constraint 10 out of 20:
\verb|card_constraint10from20.dnf|. It takes around 7 seconds without
symmetry test and about 10 with symmetry test (164 seconds with the LP
algorithm). Note that the symmetry savings for this example are
enormous: There are 10 final true nodes (just 1 if we switch on
cutTree) for 
184756 clauses, giving a quotient of 5.41254e-05 (5.41254e-06, resp.). 


\section{Things to Do on the Code (28.7.13)}

What are the priorities for improving the code: 
\begin{enumerate}
%\item\label{symmetry-checking-it} See Subsec.~\ref{symmetry-subsec}. 
\item\label{mic-mac-it} 
Making all those operations looking for lower and upper siblings more
systematic (the MIC-MAC stuff in the code), see
Subsec.~\ref{mic-mac-stuff}; there is so much code
repetition here! In particular, we should devise methods for finding
the ``cousins'' of arbitrary degree of a given node. I can spell this
out in more detail \ldots 
\item\label{monorepair-hack-it} 
Clean up my \texttt{monorepair} hack, see
Subsec.~\ref{monorepair-hack}. 
\item\label{M-N-nomenclature-it}
Find better names for the variables used in the repair procedures so
that 
(1) stand-alone vs.~split-up (Subsec.~\ref{repair2-subsec}), 
(2) $\sigma=\rho+\rho_2$ vs.~$\sigma=\rho-\rho_2$ (Subsec.~\ref{repair2-minus-subsec}), 
(3) upper bound vs.~lower bound (Subsec.~\ref{M-N-nomenclature}) 
cause less confusion. 
\item\label{real-it} 
Use real-valued coefficients, i.e., do not restrict to integer
coefficients, see among others, Subsec.~\ref{bound-rounding-subsec}).  
\item\label{gap-it}
Maximising the gap, see Subsec.~\ref{gap-sec}. 
\item\label{heuristic-it}
Heuristics for choosing coefficients using Chow parameters or similar.
\item\label{memory-it}
Reflect on what our datastructures look like and where the memory is
used. 
\item\label{sum-it} CS: Sums for $\false$ could be computed more efficiently. Our previous attempts failed somehow (we did not investigate them any further, though). Note that this both has no notable impact on the total runtime and is fishy since the coefficients can change due to various reasons (doubling of coefficients, several repair methods).
\item\label{negative-variables}
The example ex1.dnf shows that the code computes a wrong result if
there are negative variables. That's not so rurgent as it is
conceptually a trivial bug, but itg should be fixed. 
\end{enumerate}


\section{Generating Random LPBs}
In order to evaluate algorithms for the threshold recognition problem,
one needs benchmark DNFs. In \cite{PalGopTra10}, all monotone (unate) 
Boolean functions of up to 5 variables are considered as input. For 6
variables, 200,000 ``random'' monotone functions are chosen as input. 
We have several objections to this approach:
\begin{enumerate}
\item 
As explained above, 
there is no point in allowing for both polarities for variables. 
\item
For testing purposes, it is best to use DNFs for which it is
\emph{known} that they are threshold functions. To do so, one should
generate LPBs, convert them into DNFs, and then apply the algorithms
to those generated DNFs, hoping that the algorithm will recognise them
as threshold functions. (In addition, one might also test
some DNFs known not to be threshold functions, hoping that the
algorithm will not mistakenly recognise them as threshold functions.)
\item
The approach will enumerate pairs of functions that are symmetric to
each other, i.e., one function is obtained from the other by permuting
the variables. 
\end{enumerate}
  
Apart from a few tests on non-threshold functions, we tested all
algorithms by generating LPBs where the coefficients are all positive
and non-increasing from $a_1$ to $a_m$. Up to $m=7$, we generated all 
%TODO: true???
LPBs up to equivalence. For bigger $m$, the number of LPBs becomes too
big and so we generated a ``random'' sample of LPBs.

It is typical for methods of random generation of objects that it is
hard to show that the sample is in ``representative'' in any sense.
We do not claim anything in this respect. The rationale for our method
of generating random LPBs is explained as follows:
\begin{enumerate}
\item 
The method should be simple and avoid ad-hoc choices.
\item 
It should be possible to calibrate how big the increase from the
smallest to the biggest coefficient is.  
\item 
Every coefficient vector (for given $m$) should have a positive
probability, however small, of being generated. 
\end{enumerate}

We propose an algorithm for generating a non-decreasing sequence of
coefficients. For the purposes of our algorithm, we generate a
sequence of real numbers; the actual coefficients are obtained by
rounding off. The algorithm is based on a ``function'' 
$\mathit{rf}(W)$ (``random factor'') which generates a random number
$\geq1$ that can become arbitrarily big. The smallest coefficient is
simply given by the first return value of this function. Each
subsequent coefficient is obtained by multiplying the previous one with
a factor obtained by calling $\mathit{rf}(W)$. For 
$\mathit{rf}(W)$, we chose $r^{-W}$, where $r$
%TODO: Better (0,1]?
is a random number from a uniform distribution in the interval $[0,1)$
(the simplest pseudorandom generator usually provided by programming
languages). $W$ is a parameter used to calibrate the sequence; 
the excentricity of an LPB will tend to increase with
$\mathit{rf}(W)$ and hence with $W$.

\begin{table*}[t]
\begin{tabular}{|l|l|l|l|l|}
$W$ & Coefficients & Exc. & Sym. & Thresholds\\\hline
0.1 & 5\;4\;3\;2\;2\;1\;1\;1\;1\;1\; &  1.20 &  0.56 &  5\;5\;11\;16\;16 \\
0.1 & 2\;2\;2\;2\;1\;1\;1\;1\;1\;1\; &  1.08 &  0.89 &  2\;5\;7\;9\;12 \\
0.1 & 2\;2\;1\;1\;1\;1\;1\;1\;1\;1\; &  1.08 &  0.89 &  2\;5\;6\;7\;10 \\\hline
0.5 & 585\;322\;52\;24\;23\;19\;13\;11\;10\;1\; &  2.03 &  0.0 &  585\;54\;530\;1006\;475 \\
0.5 & 230\;115\;104\;74\;38\;21\;11\;10\;5\;1\; &  1.83 &  0.0 &  230\;48\;305\;561\;379 \\
0.5 & 142\;59\;11\;5\;4\;3\;2\;1\;1\;1\; &  1.73 &  0.22 &  142\;8\;115\;221\;87 \\\hline
1.0 & 133801\;132695\;34555\;10416\;6292\;3242\;375\;76\;18\;6\; &  3.04 &  0.0 &  133801\;3717\;160738\;317759\;187675 \\
1.0 & 10058\;5952\;4709\;1595\;277\;265\;111\;71\;29\;1\; &  2.78 &  0.0 &  10058\;477\;11534\;22591\;13010 \\
1.0 & 80646\;41081\;646\;590\;28\;21\;21\;4\;1\;1\; &  3.51 &  0.22 &  80646\;48\;61520\;122991\;42393 \\\hline  
\end{tabular}
  \caption{Some generated LPB examples}
  \label{tab:lpb-examples}
\end{table*}

Table \ref{tab:lpb-examples} shows some of the coefficient sequences
generated for various values of $W$. 
For $W=0.1$, duplicates are not unlikely and so we
remove duplicates in our generated benchmark suite. 
For $W=0.5$, the increase of the coefficients is very big already
(compared to any examples we came across in the literature) and so we
do not consider any $W$ bigger than that.  We decided to use the
following values for $W$: 0.1, 0.2, 0.3, and 0.5.

After choosing some coefficients $a_1,\ldots,a_m$, the next question is how
to choose the threshold $d$. One might introduce some randomisation
here as well, but we found it more important to cover systematically
the spectrum of possibilities while avoiding choices that are too
ad-hoc. We decided to generate, for each coefficient vector, 5 LPBs,
with the following thresholds (we add a short explanation of which
variables suffice to make the LPB true, to explain the rationale of
our choices):

\smallskip

\begin{tabular}{llp{0.4\textwidth}}
1) & $a_1$ & The strongest variable alone\\[2mm]
2) & $\sum_{i=\lfloor m/2\rfloor+1}^{m}a_i$ & The weaker half\\[2mm]
3) & $\lceil(\sum_{i=1}^{m}a_i)/2\rceil$ & The ``average'' half\\[2mm]
4) & $\sum_{i=1}^{\lfloor m/2\rfloor}a_i$ & The stronger half\\[2mm]
5) & $\sum_{i=2}^{m}a_i$ & Any $m-1$ variables
\end{tabular}

\smallskip 

Table \ref{tab:lpb-examples} shows those 5 threshold values for each
coefficient vector. Note that the idea is that the 5 thresholds should
be an increasing sequence: for the first threshold, the LPB has very
many true points; \ldots; for the third, around half of the points are
true; \ldots; for the fifth, the LPB has very few true points. However, 
for coefficient vectors with relatively big $a_1$ an ``anomaly''
occurs. 

To refer to our benchmarks, we denote by $\LPBs(m,W,k)$ the set of
generated LPBs with $m$ variables, where $W$ is the parameter
explained above and $k\in\{ 1,2,3,4,5\}$ is the number of the threshold as
explained above. 

We suspected that for the LPBs generated for big $W$ (like $0.5$), 
the excentricity is much ``bigger than necessary'', i.e.,  
there will frequently exist equivalent LPBs with much
smaller excentricity (and more symmetries). 
To confirm this, we recorded, for each class of generated LPBs, the
smallest, biggest and average values of the symmetry quotient and
excentricity (``before''), and the same values for the LPBs obtained after
conversion to DNFs and reconversion to LPBs (``after''). The results for 
$\LPBs(10,0.5,3)$ are shown in the following table:

\smallskip

%TODO: Experiment to confirm this with detailed table (maybe not all
%details will end up in the paper). 
%TODO: do this experiment
\begin{tabular}{|ll|ll|ll|}
\hline
\multicolumn{6}{|c|}{Excentricity}\\\hline
\multicolumn{2}{|c|}{minimum} & \multicolumn{2}{c|}{average} & \multicolumn{2}{c|}{maximum}\\
before & after &  before & after &  before & after\\
&&&&&\\\hline\hline     
\multicolumn{6}{|c|}{Symmetry quotient}\\\hline
\multicolumn{2}{|c|}{minimum} & \multicolumn{2}{c|}{average} & \multicolumn{2}{c|}{maximum}\\
before & after &  before & after &  before & after\\
&&&&&\\\hline     
\end{tabular}


\section{Conclusion}\label{concl-sec}
 
Hooker has proposed an algorithm for generating the strongest 0-1 ILP
constraints, within a candidate set $T$, that are implied by a set $S$
of 0-1 ILP constraints \cite{Hoo92}. Letting $T$ be the set of all
LPBs, the algorithm can be used to transform a CNF to an LPB. However,
the algorithm is practical only for certain restrictions of $T$. In
the general case, which we need here, it is unclear if the algorithm
is any better than enumerating and checking all LPBs. This is however
an interesting topic for future work.

Some Boolean functions can be represented by a single LPB. 
The problem of
finding this LPB representation is
called \emph{threshold recognition problem}.  In this work, we have
implemented two algorithms for this problem, a classical one based on
linear programming, and a more recent one that we have previously
presented. The most important insight was that our algorithm 
is, unfortunately, incomplete. 

%\subsection{Reestablishing completeness}
The most important topic for future work is, of course, trying to
reestablish completeness.

The obvious way to achieve this is to incorporate some kind of
backtracking into the algorithm: If a DNF can be represented by an LPB
and we cannot choose $a_{k+1}$, then this is because we must have
chosen one of the coefficients $a_{k+2},\ldots,a_m$ too small, because our
strategy so far was to chose the coefficients as small as possible. In
order to find a solution we increment the coefficients $a_{k+2},\ldots,a_m$
and re-evaluate the LPBs. We can use the minimum and maximum degrees
to ensure that we enumerate only legal candidates. We iteratively
increment the coefficients until we can choose the coefficient
$a_{k+1}$.

One problem of this approach is that for a DNF that cannot be
represented by an LPB, termination is not guaranteed, because
frequently the choice of the next coefficient is not bounded from
above. However, we are confident that this problem can be resolved
because it should be possible to derive some upper bound for each
variable in the sense that it is never necessary to choose a
coefficient bigger than this bound (something along the lines: it is
never necessary to choose a coefficient more than $m$ times bigger
than the previous coefficient). 

The other problem is of course that backtracking worsens that runtime
of the algorithm, and we very much fear that it will destroy the
polynomial runtime of the algorithm. 

The backtracking approach has been implemented and was able to find a
solution for each tested input DNF. But the implementation has also
shown that the higher the dimension $m$, the more often we have to use
backtracking. 

Alternatively, or more likely, additionally, one might use the
occurrence patterns for estimating the weight ratio:
 In the example above we were able to represent all LPBs
  in the fourth column but we were not able to choose $a_3$ s.\,t. we
  can represent all LPBs in the third column with the configuration
  $a_6 = 1, a_5 = a_4 = 2$. We need some \emph{global} information
  that the distance between $a_6$ and $a_5, a_4$ will be too small in
  the sequel.

  Maybe it is possible to use the occurrence patterns to formulate
  such constraints, i.e., one might find a constraint of the form ``in
  an LPB representing $\phi$ one has to ensure that $a_i \geq w \cdot a_j$''.

It has to be said however that there have been previous attempts to
somehow directly translate the occurrence patterns into numeric
coefficients or better, coefficient ratios; the threshold recognition
problem has stubbornly resisted such attempts\footnote{Personal
  communication with Yves Crama}.

However, even a \emph{rough estimate} of the coefficient ration, based
on the occurrence patterns, might be useful for reducing if not
eliminating the backtracking effort. 

One other interesting topic is a more thorough analysis of the
complexity of the combinatorial algorithm, whether it is in its
current state or after having achieved completeness. In particular, as
we have mentioned in Subsec.~\ref{only12}, analysing the effect of
exploiting the symmetries in the input DNF would be interesting.  

%
% The following two commands are all you need in the
% initial runs of your .tex file to
% produce the bibliography for the citations in your paper.
\bibliography{smaus,own_publications}  % sigproc.bib is the name of the Bibliography in this case
% You must have a proper ".bib" file
%  and remember to run:
% latex bibtex latex latex
% to resolve all references
%
% ACM needs 'a single self-contained file'!
%
%APPENDICES are optional
%\balancecolumns

%\appendix
%Appendix A
%\balancecolumns % GM June 2007
% That's all folks!



\end{document}





%%% Local Variables:
%%% mode: latex
%%% x-symbol-8bits: t
%%% TeX-master: t
%%% coding: utf-8-unix
%%% End:
