\NeedsTeXFormat{LaTeX2e}

\documentclass{new_tlp}
%\listfiles % versions of the packages are listed in the .log file

\usepackage{../myproof}
\usepackage{stmaryrd}
\usepackage{latexsym}
\usepackage{url}
\usepackage{amsmath}
\usepackage{amssymb}
\usepackage{comment}
\usepackage{color}
\usepackage{times}
\usepackage{../mycommands}
\usepackage{multirow}
\usepackage{xspace}
\usepackage{alltt}
\usepackage{booktabs}
%\usepackage{lineno}
\usepackage{algorithm}
\usepackage{algorithmic}
\RequirePackage{txfonts}


\newcommand\dnot{\ensuremath{\mathit{not}}\xspace}
\newcommand\plus{\oplus}
\renewcommand\lpar{\mathrel{\bindnasrepma}}

\newcommand\SELL{\tsl{SELL}}
\newcommand\SELLF{\tsl{SELLF}}
\newcommand{\etal}{\emph{et al.}}
\newcommand\lra{\longrightarrow}

% Constraint predicates
\newcommand{\elin}[2]{\ensuremath{{\tsl{unitctx}(\ensuremath{#1}, \ensuremath{#2})}}}
\newcommand{\emp}[1]{\ensuremath{{\tsl{emp}(\ensuremath{#1})}}}
\newcommand{\eqctx}[2]{\ensuremath{{\tsl{eqctx}(\ensuremath{#1},\ensuremath{#2})}}}
\newcommand{\union}[3]{\ensuremath{{\tsl{union}(\ensuremath{#1},\ensuremath{#2},\ensuremath{ #3})}}}
\newcommand{\In}[2]{\ensuremath{\tsl{in}(\ensuremath{#1},\ensuremath{#2})}}
\newcommand{\Equ}[2]{\ensuremath{\tsl{Equ}(\ensuremath{#1},\ensuremath{#2})}}
\newcommand{\unions}[2]{\ensuremath{\tsl{Unions}(\ensuremath{#1},\ensuremath{#2})}}
\newcommand{\equal}[2]{\ensuremath{\tsl{Equal}(\ensuremath{#1},\ensuremath{#2})}}
\newcommand{\equalCtx}[2]{\ensuremath{\tsl{EqualCtx}(\ensuremath{#1},\ensuremath{#2})}}
% ProveIf predicates
\newcommand{\inSequent}[2]{\ensuremath{\tsl{inSequent}(\ensuremath{#1},\ensuremath{#2})}}
\newcommand{\inDer}[2]{\ensuremath{\tsl{inDer}(\ensuremath{#1},\ensuremath{#2})}}
\newcommand{\provIf}[2]{\ensuremath{\tsl{proveIf}(\ensuremath{#1},\ensuremath{#2})}}
\newcommand{\notProvIf}[2]{\ensuremath{\tsl{notProveIf}(\ensuremath{#1},\ensuremath{#2})}}
\newcommand{\bounded}[1]{\ensuremath{{\tsl{bounded}(\ensuremath{#1})}}}

% Names of the logic programs
\newcommand\LPder{\ensuremath{\mathbb{P}_1}}
\newcommand\LPprov{\ensuremath{\mathbb{P}_2}}

\usepackage{enumitem}
\newlist{myQuoteEnumerate}{enumerate}{2}% Set max nesting depth
\setlist[myQuoteEnumerate,1]{label=(\arabic*)}% Use numbers for level 1
\setlist[myQuoteEnumerate,2]{label=(\alph*)}%   Use letters for level 2

\newenvironment{MyQuote}{%
    \begin{myQuoteEnumerate}[resume=*,series=MyQuoteSeries]%
%     \color{blue}%
    \item \begin{quote}%
}{%
    \end{quote}%
    \end{myQuoteEnumerate}%
}%


\newenvironment{changemargin}[2]{% 
  \begin{list}{}{% 
    \setlength{\topsep}{0pt}% 
    \setlength{\leftmargin}{#1}% 
    \setlength{\rightmargin}{#2}% 
    \setlength{\listparindent}{\parindent}% 
    \setlength{\itemindent}{\parindent}% 
    \setlength{\parsep}{\parskip}% 
  }% 
\item[]}{\end{list}} 

\DeclareMathAlphabet{\mathsl}{OT1}{cmr}{m}{sl}

\newtheorem{theorem}{Theorem} 
\newtheorem{definition}[theorem]{Definition}
\newtheorem{lemma}[theorem]{Lemma}
\newtheorem{proposition}[theorem]{Proposition}
\newtheorem{corollary}[theorem]{Corollary}
\newenvironment{remark}{\noindent \textbf{Remark}\quad}{}
\renewcommand{\red}[1]{\textcolor{red}{#1} }
\newenvironment{Paragraph}[1]{\paragraph{#1}}

\newcommand\vSpace{\vspace{-0mm}}

\title[International Conference on Logic Programming]
      {Checking Proof Transformations with ASP}

\author[V. Nigam, G. Reis and L. Lima]
       {VIVEK NIGAM and LEONARDO LIMA\\
       Universidade Federal da Para\'{i}ba, Brazil\\
       \email{\{vivek.nigam, leonardo.alfs\}@gmail.com}
       \and GISELLE REIS\\
       Vienna University of Technology, Austria\\
       \email{giselle@logic.at}}

\jdate{April 2013}
\pubyear{2013}
\pagerange{\pageref{firstpage}--\pageref{lastpage}}
\doi{}

\begin{document}
\label{firstpage}
\maketitle

\begin{abstract}
Proof transformation is an important proof theoretic technique that has
been used for showing a number of foundational results about proof
systems. For instance, it is used for showing the admissibility of the
cut-rule and the completeness of proof search strategies, such as uniform
provability and the focusing discipline. However, in order to check the
validity of a proof transformation, such as when one inference
rule permutes over another, one needs to consider the combination of how 
inference rules may be applied. Therefore, checking the correctness of proof
transformations is prone to human error. This paper offers the means to
automatize the check of such transformations by using Answer Set Programming (ASP). 
\end{abstract}

\begin{keywords}
Answer Set Programming, Automatic Verification, Logical Frameworks, Proof Theory
\end{keywords}

\section{Introduction}
Proof transformation is a powerful technique used in proving many
foundational results about proof systems. For instance, one demonstrates
the admissibility of the cut-rule~\cite{gentzen35} by showing
how to transform a proof with cuts into a proof without cuts.
Similarly, in order to show the completeness of a proof search strategy,
such as uniform provability~\cite{miller91apal}  and the focusing
discipline~\cite{andreoli92jlc}, one demonstrates how to transform an
arbitrary (cut-free) proof into another (cut-free) proof that follows the
given proof search strategy. 

However, it is often a tedious task to verify whether a proof
transformation is valid, specially when there is a great number of cases to
consider. For example, in the proof of completeness of the focusing discipline and of the 
admissibility of the cut rule, one needs to show that some rules permute over other
rules~\cite{miller07cslb,gentzen35}.
These results are called \emph{permutation lemmas} and they are obtained by
showing that any proof where two rules are applied in a determined order can be
transformed into another proof where these rules are permuted. For example: a
linear logic~\cite{girard87tcs} proof ending with the  derivation to the left,
where $\tensor_R$ is applied last, can (always) be transformed into another 
linear logic proof ending with the derivation to the right, 
where the $\with_R$ is applied last:
%
\begin{small}
\[
\infer[\tensor_R]{\Gamma, \Gamma' \vdash \Delta, \Delta', A\tensor B, C \with D}{
  \deduce{\Gamma \vdash \Delta, A}{\Xi_1}
  &
  \infer[\with_R]{\Gamma' \vdash \Delta', B, C\with D}{
    \deduce{\Gamma' \vdash \Delta', B, C}{\Xi_2}
    &
    \deduce{\Gamma' \vdash \Delta', B, D}{\Xi_3}
  }
}
\quad \rightsquigarrow \quad
\infer[\with_R]{\Gamma, \Gamma' \vdash \Delta, \Delta', A\tensor B, C\with D}{
  \infer[\tensor_R]{\Gamma, \Gamma' \vdash \Delta, \Delta', A\tensor B, C}{
    \deduce{\Gamma \vdash \Delta, A}{\Xi_1}
    &
    \deduce{\Gamma' \vdash \Delta', B, C}{\Xi_2}
  }
  &
  \infer[\tensor_R]{\Gamma, \Gamma' \vdash \Delta, \Delta', A\tensor B, D}{
  \deduce{\Gamma \vdash \Delta, A}{\Xi_1}
  &
  \deduce{\Gamma' \vdash  \Delta', B, D}{\Xi_3}
  }
}
\]
\end{small}%
The proof transformation above is one of the many cases required in showing
that any instance of a $\tensor_R$ rule can permute over any instance
of a $\with_R$. In general, checking the correctness of such transformations
involves checking that: (\textbf{Obligation 1}) all rules are correctly applied; and that 
(\textbf{Obligation 2}) the premises of
the latter derivation can be proved using the proofs introducing the
premises of the former derivation. For instance, in the case above, the
proof $\Xi_1$ introducing the sequent $\Gamma \vdash \Delta, A$ in the former
derivation can be used twice in the latter derivation. Similar permutation 
lemmas also appear in the proof of cut-elimination, in particular, when 
transforming a proof with cuts into a proof with \emph{principal
cuts}~\cite{gentzen35}.\footnote{In fact, permutation lemmas compose a
great part of the cut-elimination proof, roughly half of the proof. See for
example the proof in \url{http://twelf.org/wiki/Admissibility_of_cut}.} 

Although one can check by hand the validity of such proof transformations,
this procedure is prone to human error as they have a \emph{combinatorial} nature and 
therefore one can easily miss a case or another. For instance, the cut-elimination 
result for Bi-Intuitionistic logic~\cite{rauszer74studia} given by Rauszer was later found to 
be incorrect~\cite{crolard01tcs} exactly because one of the permutation lemmas was in fact
not true. A much better approach, therefore, is to automate the check of proof
transformations. 

As we demonstrate in this paper, Answer Set Programming
(ASP)~\cite{gelfond90iclp} can be easily used
for checking the correctness of wide range of proof transformations. In our initial efforts, 
we tried to use functional programming to simulate all cases and check
whether the Proof Obligations 1 and 2 are satisfied. However, as illustrated
above, these problems require a combinatorial solution, for which our solution
using ASP turned out to be a much simpler. In particular, we construct two
Answer Set Programs (henceforth referred to as LP):

\noindent
\textbullet~The first program, $\Tscr$, is constructed for a given inference
rule, $r$, and specifies the set of its valid instances. 
We show that $\Tscr$ is sound and complete in the sense
that its set of \emph{answer-sets} corresponds 
exactly to the set of all possible valid instances of the rule $r$, 
solving Obligation 1.

\smallskip

\noindent
\textbullet~For two given sequents, $\Sscr_1$ and $\Sscr_2$, the
second program, $\Pscr$, checks whether the sequent $\Sscr_2$ is provable,
when assuming that $\Sscr_1$ is also provable. In particular, we show that
if the program $\Pscr$ has at least one answer-set, then it is
possible to derive a proof of $\Sscr_2$ from a proof of $\Sscr_1$. This is enough to
solve Obligation 2. As this problem is undecidable in general, we show the soundness
of this program.

% \textbullet~ The first program, $\Tscr$, is constructed for a given inference
% rule, $r$, and specifies the set of its valid instances. 
% We show that $\Tscr$ is sound and complete in the sense
% that its set of \emph{answer-sets} corresponds 
% exactly to the set of all possible valid instances of the rule $r$, 
% solving Obligation 1.
% 
% \noindent
% \textbullet~ For two given sequents, $\Sscr_1$ and $\Sscr_2$, the
% second program, $\Pscr$, checks whether the sequent $\Sscr_2$ is provable,
% when assuming that $\Sscr_1$ is also provable. In particular, we show that
% if the program $\Pscr$ has at least one answer-set, then it is
% possible to derive a proof of $\Sscr_2$ from a proof of $\Sscr_1$. This 
% solves Obligation 2.
% This
% result can be interpreted as a soundness result. On the other hand, as we
% are using a linear logic framework, this problem can be reduced to the
% provability problem in linear logic, which is undecidable in
% general~\cite{lincoln90focs}. Hence, there is no hope for proving the
% completeness of our method. Nevertheless, our method did not
% fail to prove any of the permutation cases in our experiments with existing
% proof systems.
% \end{itemize}
% we specify an Answer Set Program (AS-Program) that is only satisfiable if and only if a given (meta-)derivation 
% is indeed a derivation obtained by applying correctly the given inference rules, such as $\tensor_R$ and $\with_R$, 
% thus solving Obligation 1. In fact, each model of this AS-Program corresponds 
% to one possible valid derivation. Similarly, we also construct an AS-Program that solves Obligation 2, by checking whether
% the proof introducing a sequent, \eg, $\Gamma \vdash \Delta, A$, can be used to prove a premise of a
% given derivation.

Another main advantage of using ASP is that it enables the
use of powerful off-the-shelf
provers \cite{niemela97lpmnr,leone06tcl}. We
implemented a tool that takes the specification of a proof system and
checks automatically which inference rules of the object-system permute
over another rule. 
Whenever the tool can find a valid permutation
it outputs the corresponding proof transformation,
and whenever it cannot show that a rule permutes over another, it can output
the cases that it failed to find a valid permutation. 
We used this tool to show a number of proof
transformations. For instance, our tool checks all cases of the key
permutation lemmas
needed for showing the completeness of the focusing
discipline~\cite{andreoli92jlc} and uniform proofs~\cite{miller91apal}.
Up to the best of our knowledge there is no such tool yet available. 

After reviewing  in Section~\ref{sec:prelim} the proof theoretic and logic programming machinery needed in 
this paper, we propose in Section~\ref{sec:derivations} an answer set program that specifies the derivations
that are available from the proof system, proving the correctness and completeness of the specification. 
Section~\ref{sec:provIf} contains the answer set program that checks whether a derivation is provable
assuming the provability of another derivation. We also show that this check is sound. Section~\ref{sec:together} elaborates
on how these two programs can be used to check whether a rule permutes over another and discusses our implementation 
and experimental results. Finally in Section~\ref{sec:conc}, we discuss our tool and experimental results 
and conclude by pointing out related and future work.

\vspace{-2mm}

\section{Preliminaries}
\label{sec:prelim}

\subsection{Sequents with Contexts and Inference Rules}
\label{sec:sequents}
We assume that reader is familiar with the basic terminology of proof theory and only
introduce the terminology which will be used to guide our work.
In general, a \emph{sequent with contexts}~\cite{andreoli92jlc} is a generalization used in the context of 
logic programming and logical frameworks~\cite{nigam10jar,cervesato02ic} of the usual sequents in 
sequent calculus~\cite{gentzen35}. They are of the form
\[
\Gamma_1 \mid \Gamma_2 \mid \cdots \mid \Gamma_n \vdash
\Gamma_{n + 1} \mid \Gamma_{n + 2} \mid \cdots \mid \Gamma_{n + m}
\]
containing $n + m$ \emph{sequent contexts}. 
For example, in intuitionistic logic (LJ~\cite{gentzen35}) sequents are of the form 
$\Gamma \vdash A$ with $2$ contexts, one context to the left of the turnstyle and another
to the right.

An \emph{inference rule} is composed by one conclusion sequent and zero or more 
premises. These are normally written using \emph{context variables}, 
\eg, $\Gamma, \Delta, \Theta$, which may be instantiated with collections of formulas, and 
\emph{formula variables}, 
\eg, $A\land B$, $\square A$, whose schema variables, $A$ and $B$, can be
instantiated with any formula. 

For instance, the inference rules for $\land_R$ of 
intuitionistic logic and $\square_r$ of the modal logic S4 are shown below:
\[
%\infer[\tensor_R]{{\Gamma, \Gamma'} \vdash \Delta, \Delta', A\tensor B}
%{{\Gamma} \vdash \Delta, A \qquad  {\Gamma'} \vdash \Delta', B}
%\qquad
\infer[\land_R]{{\Gamma} \vdash  A\land B}
{{\Gamma} \vdash A \qquad  {\Gamma} \vdash B}
\qquad
\infer[\square_r]{\square \Gamma, \Gamma' \vdash \square A, \diamond \Delta, \Delta'}{\square \Gamma \vdash A, \diamond \Delta}
\]
% }
%The $\tensor_R$ rule contains two premises, namely the sequents $\Gamma \vdash \Delta, A$ and 
%$\Gamma' \vdash \Delta', B$ and a conclusion sequent $\Gamma, \Gamma' \vdash \Delta, 
%\Delta', A\tensor B$. It introduces the principal formula $A \tensor B$; $A$ and $B$ are 
%active formulas.
%
The $\square_r$ rule contains one premise, namely the sequent $\square \Gamma
\vdash A, \diamond \Delta$, and a conclusion sequent $\square \Gamma, \Gamma'
\vdash \square A, \diamond \Delta, \Delta'$. It introduces the principal formula
$\square A$; $A$ is an active formula.  
Any instance of the context variables $\Gamma, \Gamma',
\Delta, \Delta'$ and the formula schema $A$ in the rule above will correspond to a 
\emph{valid} instance of this rule. Similarly for the $\land_R$ rule. In the
case of the S4 system, we consider each sequent having \emph{four} contexts:
boxed formulas on the left, other formulas on the left, diamond formulas on the
right and other formulas on the right. Whereas for intuitionistic logic
sequents, only \emph{two} contexts are needed: left and right.

A sequent calculus \emph{proof system} $\Pscr$ for a logic $L$ is a set of
inference rules such that the formulas derived by the application of these rules
are exactly the formulas valid in $L$. We say that a sequent $\Sscr$ is
\emph{provable or derivable} in $\Pscr$ if there exists a derivation of $\Sscr$ using the
inference rules of $\Pscr$ such that all leaves are \emph{closed}, i.e., their
topmost rule has no premises. A leaf (or premise) is said to be \emph{open} if
it is not closed.

Following the terminology normally used in the logical framework 
literature~\cite{nigam10jar,cervesato02ic}, we classify sequent contexts in two ways: 
\emph{bounded} and \emph{unbounded}. 
Context variables appearing in a \emph{bounded} context are instantiated by 
a \emph{multiset} of formulas, which
cannot be contracted nor weakened. Thus, when a multiple premise rule is
applied, these formulas are split among the premises. This is the case for the
contexts in rule $\tensor_R$ of linear logic, shown in the Introduction.
Context variables appearing in an \emph{unbounded} context are instantiated by 
\emph{sets} of formulas, which
can be contracted or weakened as much as needed. Consequently, they are copied
among the premises of a rule, which is the case for $\Gamma$ in the $\land_R$
rule above.


In order to distinguish different occurrences of a formula, we 
associate to each formula occurrence $A$ a number. 
That is, two different occurrences of the same formula $A$ are represented by $(A, i), (A, j)$, 
where $i \neq j$.
% For instance, the derivation to the left, where
% the formula is contracted, is formally represented by the derivation to the right:
% \[
%  \infer{\Gamma, A \vdash \Delta}{\Gamma, A, A \vdash \Delta}
%  \qquad 
%  \infer{\Gamma, (A, i) \vdash \Delta}{\Gamma, (A, i), (A, j) \vdash \Delta}
% \]
% where $j$ is only used to mark the occurrence $A$.
For simplicity, we assume implicitly that different occurrences of 
a formula are distinguished this way.

We assume that the shape of sequents, 
\ie, the number of contexts a sequent has and their classification as bounded and unbounded 
 are given as part of the input.
This classification will guide the specification of the Answer Set Programs we use to 
check more proof transformations (Section~\ref{sec:provIf}). 
For example, if a context is unbounded, then the following lemma is provable:
\begin{lemma}
\label{lem:unbounded}
Let $\mathcal{P}$ be a proof system, whose sequents are of the form $\Sscr =
\Gamma_1 \mid \Gamma_2 \mid \cdots \mid \Gamma_i \mid \cdots \mid \Gamma_n
\vdash \Delta_1 \mid \Delta_2 \mid \cdots \mid \Delta_m$, where $\Gamma_i$ is an
unbounded context. Let $\Gamma_i' \supseteq \Gamma_i$ be a superset of $\Gamma_i$ for every $i$.
If the sequent $\Sscr$ is provable in $\mathcal{P}$, then the sequent $\Sscr'$
obtained from $\Sscr$ by replacing $\Gamma_i$ by $\Gamma_i'$ is also provable in
$\mathcal{P}$. 
\end{lemma}
The proof is straightforward, as one simply needs to weaken the formulas in $\Gamma_i'\setminus \Gamma_i$
in $\Sscr'$ and recover the sequent $\Sscr$, which is assumed to be provable in
$\mathcal{P}$.
Such lemmas enhance the checking of proof transformation, in particular for checking Obligation 2, 
detailed above.

Finally, we notice that there are proof systems that have other types of contexts, such as  
contexts that behave as lists of formulas, that is, that the exchange rule is not always
applicable~\cite{pfenning09lics}. There are not many of these types of proof systems. We believe, however, 
that our machinery for checking proof transformations can be extended for them, but 
this is left out of the scope of this paper.

\vspace{-2mm}
\subsection{Answer Set Programming}

Although we assume that the reader is familiar with Answer Set Programming~\cite{gelfond90iclp}, we 
review some of its basic definitions. Let $\Kscr$ be a set of propositional 
variables. A \emph{default literal} is an atomic formula preceded by 
\dnot.  A propositional variable and a default literal are both literals. 
A rule $r$ is an ordered pair $Head(r) \leftarrow Body(r)$, $Head(r)$ is 
a literal and $Body(r)$ is a finite set of literals. A rule with $Head = L$ and 
$Body(r) = \{L_1, \ldots, L_n\}$ is written $L \leftarrow L_1, \ldots, L_n$.
An \emph{interpretation} $M$ of $\Kscr$ is a subset of $\Kscr$. An atomic formula, $A$,
is true in $M$, written $M \vDash A$, if $A \in M$, otherwise false. A literal $\dnot\ A$
is true in $M$, written $M \vDash \dnot\ A$, if $A \notin M$, otherwise false.
An \emph{Answer Set Program} (LP) is a set of rules. An interpretation $M$
is an \emph{answer set} of a LP $P$ if $M' = least(P \cup \{\dnot\_A \mid A \notin M\} )$, 
where $M' = M \cup \{\dnot\_A \mid A \notin M\}$ and $least$ is the least model 
of the \emph{definite logic program} obtained from the argument program by replacing 
all occurrences of $\dnot\ A$ by a new atomic formula $\dnot\_A$. 
In the remainder
of this paper, we will not explicitly write the set $\Kscr$, but assume that it consists
exactly of the symbols appearing explicitly in the programs. Moreover, as usual, we 
consider variables appearing in programs as a shorthand for the set of all its possible ground 
instantiations.

The interpretation of the default negation $\dnot$ assumes a
\emph{closed-world} assumption of programs. That is, we assume to be true
only the facts that are explicitly supported by a rule. For example, the
following program with three rules has two answer-sets $\{a, c\}$ and $\{b\}$:
\[
 a \leftarrow \dnot\ b \quad\qquad b \leftarrow \dnot\ a \quad\qquad c \leftarrow a
\]
Finally, 
one can also specify a constraint in ASP by using a rule whose head is the falsity, 
denoted by the symbol $\bot$. For example, the rule specifies the constraint
that $b$ cannot be true:
\[ 
\bot \leftarrow b
\]
Thus, the program resulting from adding this rule to the program above has 
a single answer-set, namely $\{a, c\}$.

\newcommand\xor{\veebar}
\newcommand\thSeq{\ensuremath{\Tscr}}
\newcommand\groundSet{\ensuremath{\mathcal{B}}}
\begin{table}[t]
\caption{\small List of atomic formulas used together
with their denotations and their logical axiomatization $\thSeq$. Following 
usual logic programming conventions, all non-predicate term symbols are assumed 
to be universally quantified, and we use commas, ``$,$'', for conjunctions and
 ``$\leftarrow$'' for the reverse implication.}
\label{fig:predicates}
\begin{tabular}{l@{\quad}p{2cm}@{\quad}l}
\toprule
Alphabet & Denotation & Logic Specification \\[1pt]
\midrule
$\In{F}{\Gamma}$ & $F \in \Gamma$ & No theory.\\
\midrule
$\elin{F}{\Gamma}$ &  $\Gamma = \{F\}$ & (r1) $\In{F}{\Gamma} \leftarrow \elin{F}{\Gamma}$. \\[1pt]
&& (r2) $\bot \leftarrow \In{F_1}{\Gamma}, \elin{F}{\Gamma}, F_1 \neq F$. \\
\midrule
$\emp{\Gamma}$ & $\Gamma = \emptyset$ 
&  (r3) $\bot \leftarrow \In{F}{\Gamma}, \emp{\Gamma}$. \\
\midrule
$\union{\Gamma^1} {\Gamma^2} {\Gamma}$ & $\Gamma = \Gamma^1 \cup \Gamma^2$ & 
   (r4) $\In{F}{\Gamma} \leftarrow \In{F}{\Gamma^1}, \union{\Gamma^1}{\Gamma^2}{\Gamma}$. \\[1pt]
&& (r5) $\In{F}{\Gamma} \leftarrow \In{F}{\Gamma^2}, \union{\Gamma^1}{\Gamma^2}{\Gamma}$. \\[1pt]
&& (r6) $\emp{\Gamma}  \leftarrow \emp{\Gamma^1},\emp{\Gamma^2}, \union{\Gamma^1}{\Gamma^2}{\Gamma}$. \\[1pt]
&& (r7) $\In{F}{\Gamma^1}  \leftarrow \dnot\ \In{F}{\Gamma^2}, \In{F}{\Gamma}, \union{\Gamma^1}{\Gamma^2}{\Gamma}$. \\
&& (r8) $\In{F}{\Gamma^2}  \leftarrow \dnot\ \In{F}{\Gamma^1}, \In{F}{\Gamma}, \union{\Gamma^1}{\Gamma^2}{\Gamma}$. \\
\bottomrule
\end{tabular}
\vspace{-4mm}
\end{table}

\vspace{-2mm}
\section{Checking the Validity of Derivations}
\label{sec:derivations} 

Assume given a proof system, with inference rules detailed as before. 
For each rule in the proof system, we can construct a LP that specifies all 
its possible instances. The idea is that for each inference rule in the proof system, 
we associate an \emph{inference skeleton} and a \emph{set of atomic facts}. 
The skeleton specifies the tree structure of the inference rule, while the set of 
facts specifies how the formulas are moved along a derivation. 
From these, we can construct larger derivations.

%\paragraph{Answer Set Programs}
The alphabet and the theory used to specify these derivations are shown in Table~\ref{fig:predicates}. 
The logic program that we need is very simple, 
%$\Pscr_1 = \{ \textrm{(r1)}, \textrm{(r2)},\ldots, \textrm{(r10)}\}$, 
with only eight rules: $\textrm{(r1)}, \textrm{(r2)},\ldots, \textrm{(r8)}$. These
rules and the predicates in Table~\ref{fig:predicates} specify in a declarative 
fashion the content of context variables, $\Gamma$, in a derivation.
The encoding is all based on atomic formulas of the form $\In{F}{\Gamma}$, which 
specify that the formula $F$ is in the context $\Gamma$. 
% As it will be clear later in this section, 
% one may specify which formulas exactly appear in the conclusion sequent of a derivation.

The atomic formula $\elin{F}{\Gamma}$ specifies that the context $\Gamma$ has a
single formula $F$. 
The first rule (r1) specifies that $\In{F}{\Gamma}$, while the second rule (r2) is a constraint
rule specifying that there is no other formula $F'$ different from $F$ in the context $\Gamma$.

In some situations, for instance, when specifying the linear logic initial rule~\cite{girard87tcs}, 
we need to specify that some contexts are empty, which is done by using the atomic formula $\emp{\Gamma}$.  
Rule (r3) is a constraint that specifies that no formula can be in an empty context.

The most elaborate specification are the rules (r4) -- (r8), which specify the atomic formula 
$\union{\Gamma^1} {\Gamma^2} {\Gamma}$, i.e. $\Gamma = \Gamma^1 \cup \Gamma^2$.
The rules (r4) and (r5) specify that $\Gamma^1 \subseteq \Gamma$ and $\Gamma^2 \subseteq \Gamma$, 
that is, the occurrence\footnote{Recall that as discussed in Section~\ref{sec:prelim}, 
we keep track of the occurrences of formulas.} of a formula that is in $\Gamma^i$ is also in $\Gamma$. The rule (r6) specifies that 
if both $\Gamma^1$ and $\Gamma^2$ are empty then so is $\Gamma$. 
The rules (r7) and (r8) 
specify that these contexts are bounded (see Section~\ref{sec:sequents}), that is, the union 
$\Gamma = \Gamma^1 \cup \Gamma^2$ is a multiset union. An occurrence of a formula in $\Gamma$ either comes
from $\Gamma^1$ or from $\Gamma^2$. Notice how we use the default negation to generate accordingly
the splitting behavior of bounded contexts. 
%This will be used for specifying inference rules, 
%such as $\tensor_R$.  

\paragraph{Auxiliary Context Variables and Macros}
From these rules, we can specify more elaborate conditions on context   
variables and formula variables. For this we may need fresh auxiliary context variables,
written $\Gamma_{aux}^j$ with an $aux$ subscript. Freshness is guaranteed, as usual, by using a counter 
that is incremented whenever one needs a new auxiliary context.

For example, the theory below, written $\unions{\Gamma}{[\Gamma_1, \ldots, \Gamma_n]}$, 
specifies that $\Gamma = \Gamma_1 \cup \cdots \cup \Gamma_n$ by using auxiliary 
context variables as follows:
\[
 \union{\Gamma_1}{\Gamma_2}{\Gamma_{aux}^1}, \union{\Gamma_{aux}^1}{\Gamma_3}{\Gamma_{aux}^2},
 \ldots, \union{\Gamma_{aux}^{n-3}}{\Gamma_{n-1}}{\Gamma_{aux}^{n-2}}, \union{\Gamma_{aux}^{n-2}}{\Gamma_n}{\Gamma} 
\]
It is easy to check from the specification of $\union{\cdot}{\cdot}{\cdot}$ that indeed 
whenever $\In{F}{\Gamma_i}$ is true in an answer-set then so is $\In{F}{\Gamma}$, and 
conversely that if $\In{F}{\Gamma}$ is true in an answer-set then there is one $1 \leq i\leq n$
such that $\In{F}{\Gamma_i}$ is also true.

Similarly, we can define when a context variable $\Gamma = \{A_1, \ldots, A_n\}$, 
is a collection of formulas. This is specified by the theory below, written 
$\equal{\Gamma}{\{A_1, \ldots, A_n\}}$:
\[
 \elin{A_1}{\Gamma_{aux}^1}, \elin{A_2}{\Gamma_{aux}^2}, \ldots, \elin{A_n}{\Gamma_{aux}^n}, 
 \unions{\Gamma}{[\Gamma_{aux}^1, \ldots, \Gamma_{aux}^n]}
\]
While $\elin{A_i}{\Gamma_{aux}^i}$ specifies that the auxiliary context variable 
contains only the formula $A_i$, the specification $\unions{\Gamma}{[\Gamma_{aux}^1, \ldots, \Gamma_{aux}^n]}$
specifies that $\Gamma = \Gamma_{aux}^1 \cup \cdots \Gamma_{aux}^n$. Notice that
the auxiliary variables are fresh, so the auxiliary variables needed in 
$\unions{\Gamma}{[\Gamma_{aux}^1, \ldots, \Gamma_{aux}^n]}$ are different from 
those in $\{\Gamma_{aux}^1, \ldots, \Gamma_{aux}^n\}$.

Finally, another specification that we will need is when two context variables 
have the same formulas, that is, $\Gamma = \Gamma'$. This is specified by the 
theory $\equalCtx{\Gamma}{\Gamma'}$, specified below:
\[
 \emp{\Gamma_{aux}}, \union{\Gamma}{\Gamma_{aux}}{\Gamma'}
\]
The atom $\emp{\Gamma_{aux}}$ specifies that $\Gamma_{aux} = \emptyset$, while  
$\union{\Gamma}{\Gamma_{aux}}{\Gamma'}$ specifies that $\Gamma = \Gamma_{aux} \cup \Gamma'$.
% 

We are aware that these operations might not cover all possible context
transformations of sequent calculus rules, but,
as we discuss in more details in Section~\ref{sec:together}, we use a linear logical framework
for specifying proof systems~\cite{nigam11lsfa}. In this language, the theory in Table~\ref{fig:predicates} 
contains the necessary constructs to check the proof transformations proposed.
%for implementing a permutation checker in our linear logical framework.

\paragraph{Derivation Skeletons} 
Derivation skeletons are defined inductively by using introduction rules of a given 
proof system. Assume that the sequent contexts of this proof system are of the 
form 
% {\small
\[
\Gamma_1  \mid \cdots \mid \Gamma_n \vdash \Gamma_{n+1} \mid \cdots \mid \Gamma_{n + m}
\]
% }
that is, with $n + m$ contexts. 
\newcommand\ctx[2]{\ensuremath{\textrm{ctx}_R(#1,#2)}}
In Definition~\ref{def:inference-skeleton}, $\ctx{i}{p}$ denotes the $i^{th}$
context of premise $p$, if $p \neq 0$ or of the conclusion if $p=0$.

\begin{definition}[Inference skeleton]
\label{def:inference-skeleton}
Let $R$ be an inference rule in the proof system. 
The \emph{inference skeleton} for $R$ is a pair $\tup{\Xi, \Bscr}$, 
where $\Xi$ is the inference rule 
with conclusion $\Gamma_{0,1}  \mid \cdots \mid \Gamma_{0,n} \vdash 
\Gamma_{0,n+1} \mid \cdots \mid \Gamma_{0,n + m}$, and $k$ premises 
$\Gamma_{j,1}  \mid \cdots \mid \Gamma_{j,n} \vdash \Gamma_{j,(n+1)} \mid \cdots \mid \Gamma_{j,(n+m)}$, 
with $1 \leq j \leq k$, where all context variables have different names, \ie, 
$\Gamma_{i,k} = \Gamma_{j,l}$ only if $i = j$ and $k = l$.
$\Bscr$ is a set of formulas constructed by checking 
how the contexts 1 to $n+m$ are represented in the conclusion and premises of
$R$. 
$\Bscr$ is the smallest set such that

\noindent
\textbullet~ (No Context nor Formula) If $\ctx{i}{p}$ has no context nor formula variable, then $\emp{\Gamma_{i,p}} \in \Bscr$;

\noindent
\textbullet~ (Single Context) If $\ctx{i}{p}$ is a single context variable $\Gamma$, then $\equalCtx{\Gamma_{i,p}}{\Gamma} \subseteq \Bscr$;

\noindent
\textbullet~ (Single Formula) If $\ctx{i}{p}$ is of the form $A$, then $\elin{A}{\Gamma_{i,p}} \in \Bscr$;

\noindent
\textbullet~ (More than one Context and/or Formula) If $\ctx{i}{p}$ is of the form $\Gamma_1, \ldots, \Gamma_n, A_1, \ldots, A_m$, 
 then $\equal{\Gamma_{aux}}{\{A_1, \ldots, A_m\}}, \unions{\Gamma_{i,p}}{[\Gamma_1, \ldots, \Gamma_n, 
 \Gamma_{aux}]} \subseteq \Bscr$.

 \noindent
where the auxiliary context variable $\Gamma_{aux}$ is a fresh context name. 
\end{definition}

% Intuitively, the purpose of $\Xi$ is to  specify the tree structure of a derivation. 
% As the usual proof trees, derivation skeletons
% are constructed from the rules of the proof system. However, instead of containing all 
% the details of the formulas, derivation skeletons contain only context variables. 
%The models, if any, of the program constructed by using the atomic formulas $\Bscr$ and 
%the rules (r1), $\ldots$, (r10), will specify
%how the formulas are moved in the derivation skeleton.


\paragraph{Example:}
Consider the $\tensor_R$ rule shown to the left. The \emph{inference skeleton} for 
it is the pair $\tup{\Xi_\tensor, \Bscr_\tensor}$ obtained as described in
Definition~\ref{def:inference-skeleton}, where $\Xi_\tensor$ is the derivation shown to the right:
{\small
\[
\infer[\tensor_R]{{\Gamma, \Gamma'} \vdash \Delta, \Delta', A\tensor B}
{{\Gamma} \vdash \Delta, A \qquad  {\Gamma'} \vdash \Delta', B}
\qquad 
\infer{\Gamma_{0,1} \vdash \Gamma_{0,2} }
{
{\Gamma_{1,1} \vdash \Gamma_{1,2} }
&
{\Gamma_{2,1} \vdash \Gamma_{2,2} }
}
\]
}
Moreover, $\Bscr_\tensor$ is the set:
\begin{small}
\[
\Bscr_\tensor = \left\{
\begin{array}{c}
\equal{\Gamma_{aux}^1}{\{A \tensor B\}}, \unions{\Gamma_{0,2}}{[\Delta, \Delta', \Gamma_{aux}^1]}\\
\equal{\Gamma_{aux}^2}{\{A\}}, \unions{\Gamma_{1,2}}{[\Delta, \Gamma_{aux}^2]}\\
\equal{\Gamma_{aux}^3}{\{B\}}, \unions{\Gamma_{2,2}}{[\Delta, \Gamma_{aux}^3]}\\
\unions{\Gamma_{0,1}}{[\Gamma, \Gamma']}, \equalCtx{\Gamma_{1,1}}{\Gamma}, \equalCtx{\Gamma_{2,1}}{\Gamma'}\\
%\union{\Gamma_{1,1}}{\Gamma_{2,1}}{\Gamma_{0,1}}, \\
%\removed{A \tensor B}{\Gamma_{0,2}}{\Gamma_{aux_1}}, \union{\Gamma_{aux_2}}{\Gamma_{aux_3}}{\Gamma_{aux_1}}, \\
%\elin{A}{\Gamma_{aux_4}}, \union{\Gamma_{aux_2}}{\Gamma_{aux_4}}{\Gamma_{1,2}}, \\
%\elin{B}{\Gamma_{aux_5}}, \union{\Gamma_{aux_3}}{\Gamma_{aux_5}}{\Gamma_{2,2}}, \\
\end{array}
\right\}
\]
\end{small}%
It is easy to check that the Logic Program (LP) $\Bscr_\tensor \cup \Tscr$
has a single answer set, containing the formulas $\In{A \tensor B}{\Gamma_{0,2}}$,
$\In{A}{\Gamma_{1,2}}$ and $\In{B}{\Gamma_{2,2}}$. 

Now, consider the following case, where we know that $\Gamma, \Gamma'$ has the formula 
$C$, but we do not specify to which premise it was moved. This can be specified by adding
the formula $\In{C}{\Gamma_{0,1}}$ to the set $\Bscr_\tensor$. This addition will cause the resulting program to have
two answer-sets. One where $\In{C}{\Gamma_{1,1}}$, that is $C$ is moved to the first premise, 
and another answer-set where $\In{C}{\Gamma_{2,1}}$, that is, where $C$ is moved to the second
premise. Thus, we only need to build a generic derivation, $\Xi$, and specify declaratively
the contents of its contexts. 

The following definition specifies the set of derivation skeletons, which are obtained 
by using inference skeletons specified in Definition~\ref{def:inference-skeleton}.

\begin{definition}[Derivation skeleton]
The set of \emph{derivation skeletons} is defined inductively.
Every inference skeleton is a derivation skeleton. 
Let $\Dscr = \tup{\Xi_D, \Bscr_D}$ be a derivation skeleton with open premise $P$
of the form $\Gamma_{p,1} \mid \cdots \mid \Gamma_{p,n} \vdash \Gamma_{p,n + 1} \mid \cdots \mid \Gamma_{p,m + n}$.
Let $\Iscr = \tup{\Xi_I, \Bscr_I}$ be an inference skeleton, introducing a sequent of the form
$\Gamma_{I,1} \mid \cdots \mid \Gamma_{I,n} \vdash \Gamma_{I,n + 1} \mid \cdots \mid \Gamma_{I,m + n}$, 
where we assume that all context variables in $\Xi_I$ do not appear in $\Xi_D$.
Then  $\tup{\Xi, \Bscr}$
is a derivation skeleton, where $\Xi$ is obtained by replacing $P$ by $\Xi_I$ and 
where $\Bscr = \Bscr_D \cup \Bscr_I \cup \Bscr_{Join}$, where $\Bscr_{Join}$ is the 
set of formulas:
\[
 \equalCtx{\Gamma_{p,1}}{\Gamma_{I,1}}, \ldots, \equalCtx{\Gamma_{p,n+m}}{\Gamma_{I,n+m}}.
\]
%
%where $\equalCtx{\Gamma}{\Gamma'}$ is specified as 
%$\emp{\Gamma_{aux}}, \union{\Gamma}{\Gamma_{aux}}{\Gamma'}$.
\end{definition}

The interesting bit is the set $\Bscr = \Bscr_D \cup \Bscr_I \cup \Bscr_{Join}$ specifying 
the contents of the contexts in $\Xi$. In particular, it contains the same specification of 
$\Bscr_D$ and the specification of how formulas are moved in the inference rule ($\Bscr_I$). 
The set $\Bscr_{Join}$ specifies that the premise $P$ of $\Xi_D$ and the conclusion of $\Xi_I$
are the same.

\paragraph{Example} Continuing with the example above, consider the \emph{derivation skeleton}
obtained by applying another inference skeleton for the $\tensor_R$ rule, $\tup{\Xi_\tensor', \Bscr_\tensor'}$ 
to the left-premise of the inference skeleton built above, where 
$\tup{\Xi_\tensor', \Bscr_\tensor'}$
specifies the introduction of $C \tensor D$ on the right-hand-side and
$\In{C\tensor D}{\Gamma_{0,2}} \in \Bscr_\tensor$, 
that is, $C\tensor D$ appears in the conclusion sequent.
Moreover, assume that $\tup{\Xi_\tensor', \Bscr_\tensor'}$ mentions 
the contexts names $\Gamma_{j,1}, \Gamma_{j,2}$ for $3 \leq j \leq 5$.
We obtain the derivation skeleton $\tup{\Xi, \Bscr}$, where $\Xi$ is as follows:
% {\small
\[
\Xi \quad =\quad  
 \infer{\Gamma_{0,1} \vdash \Gamma_{0,2} }
{
{\infer{\Gamma_{3,1} \vdash \Gamma_{3,2}}
{
\Gamma_{4,1} \vdash \Gamma_{4,2}
&
\Gamma_{5,1} \vdash \Gamma_{5,2}
} }
&
{\Gamma_{2,1} \vdash \Gamma_{2,2} }
}
\]
% }
and $\Bscr = \Bscr_\tensor \cup \Bscr_\tensor' \cup \Bscr_{Join}$. It is
easy to check that the LP $\Bscr \cup \Tscr$ has two answer-sets. One
answer-set has $\In{A}{\Gamma_{4,2}}$, that is, the formula $A$ is moved to the left-premise. 
The second answer set has  $\In{A}{\Gamma_{5,2}}$, that is, the formula $A$ is moved to the 
right-premise. In this way, we can construct a single derivation skeleton, while the answer sets of 
the LP program associated to it specifies the concrete instance derivations, if there are any. 

% G: maybe we can get rid of this paragraph if we run out of space...
\paragraph{Recovering Derivations } 
We use the following rewrite system to recover a derivation of the proof system from an answer-set $a \in
\Ascr$ of a derivation skeleton $\tup{\Xi, \Bscr}$. The rewrite system has two phases:
\[
\begin{array}{l@{\quad}r@{~}l@{\quad}@{\quad}r@{\quad}l@{\qquad}r@{\quad}l}
\toprule
{\textrm{Phase 1:}} 
& \elin{F}{\Gamma}:  & \Gamma \rightarrow F 
& \emp{\Gamma}: & \Gamma \rightarrow \cdot 
& \union{\Gamma'}{\Gamma''}{\Gamma}: & \Gamma \rightarrow \Gamma',
\Gamma''\\
\midrule
\textrm{Phase 2:} & \multicolumn{6}{c}{
\In{F}{\Gamma}: \quad \Gamma \rightarrow \Gamma, F
}\\
\bottomrule
\end{array}
\]
In the first phase, for every applicable constraint, 
\tsl{emp}, \tsl{unitctx}, and \tsl{union} in $a$, and applicable context
variable, $\Gamma$, we apply the corresponding rule \emph{exactly once}. 
Then in the second phase, for every \tsl{in} constraint in $a$ and
applicable context variable, $\Gamma$, we apply the
corresponding rule again exactly once. 
We can show that this rewrite system is \emph{strongly confluent}.
%
\begin{definition}[Derivation Instance]
Let $\langle \Xi, \Bscr \rangle$ be a derivation skeleton and \LPder\ $= \Bscr
\cup \Tscr$. Let $\Ascr$ be the answer set
of \LPder. Then the derivation $\langle \Xi, a \rangle$ obtained by using the rewrite system above and an answer-set 
$a \in \Ascr$ is a \emph{derivation instance} of $\langle \Xi, \Bscr \rangle$.
\end{definition}

The following theorem is proved by induction on the height of derivations and 
by using  the rewrite system above.

\begin{theorem}[Soundness and Completeness]
Let $\Ascr = \{a_1, \ldots, a_n \}$ be the answer set of the derivation skeleton $\tup{\Xi, \Bscr}$ obtained
by applying some inference skeletons for a given proof system $\Pscr$. 
Then $\langle \Xi, a_i \rangle$ is a derivation instance iff it is a derivation
that can be obtained by applying the corresponding inference rules of $\Pscr$ in
the same order and on the same premises.
%
%Then the 
%derivation instances $\langle \Xi, a_1 \rangle, \ldots, \langle \Xi, a_n \rangle$ 
%are all valid derivations in $\Pscr$ and contains exactly those 
%obtained by applying in the same way the corresponding inference rules of $\Pscr$.
\end{theorem}
 
% G: commenting this out because condition (1) no longer exists.
%\newcommand\unbox[2]{\tsl{unbox}(#1, #2)}
%\noindent
%\textbf{Remark:} It is worth noticing that one can generalize the theory introduced in Table~\ref{fig:predicates}
%in order to relax the condition~\ref{qu:inference}. For instance, the following 
%rule for $K$ systems does not satisfy the condition~\ref{qu:inference}, 
%as the context $\square \Gamma$ is considered to be different to the one $\Gamma$, although
%there is an implicit connection, namely, $\Gamma$ are the formulas obtained from $\square \Gamma$ by removing
%the $\square$:
%\[
% \infer{\square \Gamma \vdash \square A}
% {\Gamma \vdash A}
%\]
%One can, however, extend the 
%LP introduced in this section with a new predicate $\unbox{\Gamma}{\Gamma'}$, specified
%by the following rules, in order to specify thie inference rule:
%\[
%\begin{array}{l}
% \In{F}{\Gamma} \leftarrow \In{\square F}{\Gamma'}, \unbox{\Gamma}{\Gamma'} \qquad 
% \bot \leftarrow \In{F}{\Gamma}, \dnot\ \In{\square F'}{\Gamma'}, \unbox{\Gamma}{\Gamma'}\\
% \qquad \qquad \qquad \qquad \bot \leftarrow \In{F}{\Gamma'}, \unbox{\Gamma}{\Gamma'}
%\end{array}
%\]
%The first rule says that if $\Gamma$ is the unboxed context of $\Gamma'$, then $\Gamma'$ contains
%all the formulas $F$ obtained by unboxing a formula $\square F \in \Gamma'$, while the second rule 
%that $\Gamma$ does not have more formulas than those mentioned in $\Gamma'$. 
%The third rule specifies that $\Gamma'$ has only boxed formulas. 

% G: moved this discussion afer the definition of the macros.
%As we discuss in more details in Section~\ref{sec:together}, we use a linear logical framework
%for specifying proof systems~\cite{nigam11lsfa}. In this language, the theory in Table~\ref{fig:predicates} 
%contains the set of constructs needed for implementing a permutation checker in our linear logical framework.

\section{Checking Provability Implication of Derivations}
\label{sec:provIf}

We now are interested in solving Obligation 2: given two derivation $\Xi_1$
and $\Xi_2$, are the premises of $\Xi_2$ provable given proofs of the premises
of $\Xi_1$? This problem is undecidable in general, as provability is undecidable already 
for first-order logic. So there is no hope to build a general tool that can automatically can answer this 
question. However, many proof transformations, such as permutation lemmas, do not need very complex arguments, though 
still combinatorial. Consider 
the following permutation:
% {\small
\[
\infer[cut]{\Gamma, \Gamma', A \wedge B \vdash \Delta, \Delta'}{
  \infer[\wedge_l]{\Gamma, A \wedge B \vdash \Delta, C}{
    \deduce{\Gamma, A, B \vdash \Delta, C}{\Xi}
  }
  &
  \deduce{\Gamma', C \vdash \Delta'}{\Xi' }
}
\qquad
\rightsquigarrow
\qquad
\infer[\wedge_l]{\Gamma, \Gamma', A \wedge B \vdash \Delta, \Delta'}{
  \infer[cut]{\Gamma, \Gamma', A, B \vdash \Delta, \Delta'}{
    \deduce{\Gamma, A, B \vdash \Delta, C}{\Xi}
    &
    \deduce{\Gamma', C \vdash \Delta'}{\Xi'}
  }
}
\]
% }
The premises of the derivation to the left $(\Xi_1)$ are the same as the premises
of the derivation to the right $(\Xi_2)$. Therefore, one can simply re-use the 
proofs $\Xi$ and $\Xi'$. In fact, as we show in this section, one can perform 
these checks in an automated fashion. We call this problem the \emph{Provability Implication}: 
does the proof of a sequent $\Sscr_1$ imply the proof of another sequent $\Sscr_2$? Although 
still undecidable, this problem is simpler as it allows us 
to consider the premises of the derivations individually, as done above.
Henceforth we denote this problem by $\Sscr_1 \Rightarrow \Sscr_2$. 

We show that  we can use ASP to solve automatically the Provability Implication problem for a wide range of proof systems.
For this, we add to the language the predicates in Table
\ref{provIf_predicates} and the rules in Table~\ref{fig:provIfTheory}.

% This amounts to check whether for every open leaf $\Sscr_2$ of $\Xi_2$
% there exists an open leaf $\Sscr_1$ such that the provability of $\Sscr_1$ implies the
% provability of $\Sscr_2$. So this problem can be reduced to what we call the
% \emph{Provability Implication} problem, namely: does the proof of a sequent
% $\Sscr_1$ imply the proof of another sequent $\Sscr_2$? Henceforth we denote this
% problem by $\Sscr_1 \Rightarrow \Sscr_2$.
% 
% Notice that this problem is in general undecidable. Consider for instance the proof system LK
% for first-order classical logic. The provability problem will amount to proving a first-order
% logic (FOL) formula, namely to prove that the formula denoted by $\Sscr_1$ 
% implies the formula denoted by $\Sscr_2$. Since FOL provability is undecidable in general, 
% the provability implication is also undecidable in general. Therefore, there is no 
% hope for a method that is complete if we wish to make it general enough to work
% for a broad set of logics.

% However, many proof transformations, such as permutation lemmas, do not need very complex arguments. Consider 
% the following permutation:
% {\small
% \[
% \infer[cut]{\Gamma, \Gamma', A \wedge B \vdash \Delta, \Delta'}{
%   \infer[\wedge_l]{\Gamma, A \wedge B \vdash \Delta, C}{
%     \deduce{\Gamma, A, B \vdash \Delta, C}{\Xi}
%   }
%   &
%   \deduce{\Gamma', C \vdash \Delta'}{\Xi' }
% }
% \qquad
% \rightsquigarrow
% \qquad
% \infer[\wedge_l]{\Gamma, \Gamma', A \wedge B \vdash \Delta, \Delta'}{
%   \infer[cut]{\Gamma, \Gamma', A, B \vdash \Delta, \Delta'}{
%     \deduce{\Gamma, A, B \vdash \Delta, C}{\Xi}
%     &
%     \deduce{\Gamma', C \vdash \Delta'}{\Xi'}
%   }
% }
% \]
% }
% The premises of the derivation to the left $(\Xi_1)$ are the same as the premises
% of the derivation to the right $(\Xi_2)$. Therefore, one can simply re-use the 
% proofs $\Xi$ and $\Xi'$. In fact, as we show in this section, one can perform 
% these checks in an automated fashion. This is again done by means
% of ASP, and for that we add to the language the predicates in Table
% \ref{provIf_predicates} and the rules in Table~\ref{fig:provIfTheory}.


% 
% When checking proof transformations, it is often the case that the derivation of
% some sequents are left implicit. In most of the cases, given that the same
% sequents are obtained, the same implicit derivations can be used. Take for example,
% the rewriting rule for the permutation of a cut over $\wedge_l$:
% {\small % G TODO: use an example where derivations are copied?
% \[
% \infer[cut]{\Gamma, \Gamma', A \wedge B \vdash \Delta, \Delta'}{
%   \infer[\wedge_l]{\Gamma, A \wedge B \vdash \Delta, C}{
%     \deduce{\Gamma, A, B \vdash \Delta, C}{\Xi_1}
%   }
%   &
%   \deduce{\Gamma', C \vdash \Delta'}{\Xi_2}
% }
% \qquad
% \rightsquigarrow
% \qquad
% \infer[\wedge_l]{\Gamma, \Gamma', A \wedge B \vdash \Delta, \Delta'}{
%   \infer[cut]{\Gamma, \Gamma', A, B \vdash \Delta, \Delta'}{
%     \deduce{\Gamma, A, B \vdash \Delta, C}{\Xi_1}
%     &
%     \deduce{\Gamma', C \vdash \Delta'}{\Xi_2}
%   }
% }
% \]
% }%
% In this transformation, both proofs $\Xi_1$ and $\Xi_2$ can be used in the derivation 
% to the right. It might also be the case
% that these proofs are used more than once, if a sequent happens to be
% copied during the transformation, \eg, in the permutation of $\tensor$
% over $\with$, or ever not needed.


% But of course this can be relaxed in the sense that the proofs are not
% copied, but a proof a sequent $\Sscr_1$ can be transformed into a proof of a
% sequent $\Sscr_2$. This is exactly what is needed to prove obligation
% 2: \emph{the premises of the latter derivation can be proved using the proofs
% introducing the premises of the former derivation}. 
% This is again done by means
% of ASP, and for that we add to the language the predicates in Table
% \ref{provIf_predicates}.

\begin{table}
\caption{Predicates used to reason whether $\Sscr_1 \Rightarrow \Sscr_2$.}
\label{provIf_predicates}
\begin{tabular}{ll}
\toprule
\textbf{Predicate} & \textbf{Meaning} \\
\midrule
$\inSequent{\Gamma}{\Sscr}$ & A context variable $\Gamma$ is in a sequent called $\Sscr$. \\
\midrule
$\inDer{\Sscr}{D}$ & The sequent $\Sscr$ belongs to the derivation $D$, where $D \in \{\Xi_1, \Xi_2\}$. \\
\midrule
$\bounded{\Gamma}$ & Context $\Gamma$ is bounded. \\
\midrule
$\provIf{\Sscr_2}{\Sscr_1}$ & $\Sscr_2$ is derivable if $\Sscr_1$ is derivable. \\
\midrule
$\notProvIf{\Sscr_2}{\Sscr_1}$ & It is not possible to affirm that there is a derivation of $\Sscr_2$ from a derivation of $\Sscr_1$. \\
\bottomrule
\end{tabular}
\caption{Theory $\Tscr_d$ used to reason whether the open leaves of a
derivation $\Xi_2$ are provable
given the proofs of the open leaves of a derivation $\Xi_1$. Here we consider that
$s_1, \ldots, s_n$ are the open leaves of $\Xi_2$.}
\label{fig:provIfTheory}
\begin{tabular}{lp{8.0cm}}
\toprule
(c1) &
$
\notProvIf{\Sscr_2}{\Sscr_1} \leftarrow \In{F}{\Gamma}, \inSequent{\Gamma}{\Sscr_1}, \inDer{\Sscr_1}{\Xi_1}, 
$
$
\dnot\ \In{F}{\Gamma'}, \inSequent{\Gamma'}{\Sscr_2}, \inDer{\Sscr_2}{\Xi_2}.
$ \\
\midrule
(c2) &
$
\notProvIf{\Sscr_2}{\Sscr_1} \leftarrow \In{F}{\Gamma}, \inSequent{\Gamma}{\Sscr_2}, \inDer{\Sscr_2}{\Xi_2}, \bounded{\Gamma},
$
$
\dnot\ \In{F}{\Gamma'}, \inSequent{\Gamma'}{\Sscr_1}, \inDer{\Sscr_1}{\Xi_1}.
$ \\
\midrule
(c3) &
$\provIf{\Sscr_2}{\Sscr_1} \leftarrow \dnot\ \notProvIf{\Sscr_2}{\Sscr_1},
\inDer{\Sscr_1}{\Xi_1}, \inDer{\Sscr_2}{\Xi_2}.$ \\
\midrule
(c4) & 
$ok \leftarrow \provIf{s_1}{\_}, \ldots, \provIf{s_n}{_}.$ \\
\midrule
(c5) & $\bot \leftarrow \dnot\ ok.$ \\
\bottomrule
\end{tabular}
\end{table}


The predicate $\notProvIf{\Sscr_2}{\Sscr_1}$ specifies the cases in which we
\emph{cannot} guarantee that $\Sscr_1 \Rightarrow \Sscr_2$.
%that 
%some cases when it is possible to infer that
%the existence of a proof of a sequent $\Sscr_1$  implies the existence of a proof of a sequent $\Sscr_2$. 
They are: (1) $\Sscr_1$ contains a formula that is not in $\Sscr_2$. If this
formula is used in the proof of $\Sscr_1$, we cannot transform it into a proof
of $\Sscr_2$. This is specified by rule (c1). And (2) $\Sscr_2$ contains a
formula in a \emph{bounded} context (see Section~\ref{sec:sequents}) that is not in $\Sscr_1$
% \footnote{It is
% important to remember that we are assuming that there is no implicit weakening
% in the initial rules. They are always of the form $A \vdash A$.}. 
This is specified by rule (c2). 
This rule increases the power of our
method, by using Lemma~\ref{lem:unbounded}:
if the sequent $\Sscr_1$ is provable and if $\Sscr_2$ is obtained
by adding a formula to an unbounded context, 
then $\Sscr_2$ is provable. 

Notice that we are being overcautious, as there 
are cases in which conditions (c1) and/or (c2) are satisfied, and $\Sscr_1 \Rightarrow \Sscr_2$. 
However, but there are no conditions for which (c1) and (c2) are not satisfied and 
it is not the case that $\Sscr_1 \Rightarrow \Sscr_2$. Thus it is sound, but not complete.

% Note that these conditions are sufficient but not necessary. There might be
% cases that $\Sscr_1 \Rightarrow \Sscr_2$ holds, but our method cannot
% identify it. 
%Take case (1) for example: if the extra formula in $\Sscr_1$ is
%weakened in its proof, then it might be that $\Sscr_2$ is also provable. But
%since this proof is not available in our case, we cannot make sure whether the
%extra formula is used or weakened. So to guarantee soundness of the method, we
%choose a stronger condition.

Given that there is a way to identify when it is not possible to guarantee whether 
$\Sscr_1 \Rightarrow \Sscr_2$, we use ``double negation'' to specify
when it is the case that $\Sscr_2$ is actually provable (predicate
$\provIf{\Sscr_2}{\Sscr_1}$). This is specified by rule (c3), which decides
whether a sequent $\Sscr_2$ from derivation $\Xi_2$ is provable given that another
sequent $\Sscr_1$ from derivation $\Xi_1$ is provable.

From rules (c1), (c2), and (c3),  we can check for many cases whether $\Sscr_1 \Rightarrow \Sscr_2$.
In order to prove
Obligation 2, we need to check if all the premises of $\Xi_2$ are provable from
proofs of the premises of $\Xi_1$. Since these premises are sequents
themselves, we can use the $\provIf{\Sscr_2}{\Sscr_1}$ predicate to reason
about all the of them. In particular, we need to make sure that \emph{every} premise of
$\Xi_2$ is proved from \emph{some} premise of $\Xi_1$. This is specified by clause
(c4).

Finally, we are interested only in whether there is a transformation or not, so we add the
rule (c5) to make sure that no models will be generated if some premise of $\Xi_2$ does not follow
from any premise of $\Xi_1$.

Let $\langle \Xi_1, a_1 \rangle$ and $\langle \Xi_2, a_2 \rangle$ be
derivation instances. Furthermore, let $\Lscr_i$ be the set of predicates
$\inSequent{\Gamma}{\Sscr}$, $\inDer{\Sscr}{D}$ and $\bounded{\Gamma}$
that describes the open leaves of $\Xi_i$. We define \LPprov\ $=
\Tscr_d \cup a_1 \cup a_2 \cup \Lscr_1 \cup \Lscr_2$. It is required in \LPprov\
that each derivation, sequent and context have unique names.

The following theorem is proved by constructing the proof of the open premises of $\Dscr_2$ from the proofs
introducing the open-premises of 
$\Dscr_1$ by weakening possible extra formulas in the open premises of $\Dscr_2$, as specified
by any answer-set of \LPprov.

\begin{theorem}[Soundness]
Let $\Dscr_1 = \langle \Xi_1, a_1 \rangle$ and $\Dscr_2 = \langle \Xi_2, a_2 \rangle$ be derivation
instances.
%a derivation skeleton $\tup{\Xi, \Bscr}$. 
If \LPprov\ returns a non-empty answer set, then all open leaves of $\Dscr_2$
are provable given proofs of open leaves of $\Dscr_1$.
\end{theorem}

% \begin{proof}[Proof sketch]
% The predicate $\notProvIf{\Sscr_2}{\Sscr_1}$ is true if there is a possibility
% that a proof of $\Sscr_1$ cannot be transformed into a proof of $\Sscr_2$. If it
% is false, then we are sure that a proof of $\Sscr_1$ can be transformed into a
% proof of $\Sscr_2$ (either because these sequents have the same formulas in the
% same contexts, or because $\Sscr_2$ has extra formulas that can be weakened). This is
% encoded by the clause (c3), which also guarantees that $\Sscr_1$ is from
% derivation $\Xi_1$ and $\Sscr_2$ is from derivation $\Xi_2$. We can conclude
% that if $\provIf{\Sscr_2}{\Sscr_1}$ is true, then it is certainly the case that
% a proof of $\Sscr_2$ follows from a proof of $\Sscr_1$.
% 
% Given clauses (c4) and (c5), the program will
% only return a non-empty set if $ok$ holds. According to clause (c4), $ok$ will
% hold when $\provIf{s_i}{\_}$ is true for every open leaf $s_i$ of $\Xi_2$, which
% means that proofs of $s_i$ will follow from proofs of open leaves of $\Xi_1$.
% \end{proof}

\section{Putting all Together: Tool and Experimental Results}
\label{sec:together}

In Section \ref{sec:derivations}, \LPder\ was defined to obtain the
possible derivations of a sequence of rule applications of a proof system. In
Section \ref{sec:provIf}, \LPprov\ was presented to determine if a
derivation instance $\Dscr_2$ follows from another derivation instance $\Dscr_1$. Using these two
programs, it is possible to automate the checking of a certain type of proof
transformations. In this Section we define the problem of rule permutation and
how this is solved combining \LPder\ and \LPprov.

\begin{definition}[Rule Permutation]
Let $\Sscr$ be a sequent and $\alpha$ and $\beta$ two inference rules of some
proof system $\Pscr$. Let $\Dscr_1$ be the set of derivations obtained by
applying $\alpha$ and then $\beta$ (bottom up) to $\Sscr$, and $\Dscr_2$ the set
of derivations obtained by applying $\beta$ and then $\alpha$. We say that
$\alpha$ \emph{permutes over} $\beta$ if for all $d_1 \in \Dscr_1$, there exists
$d_2 \in \Dscr_2$ such that the provability of $d_1$ implies the provability of
$d_2$.
\end{definition}

The input of our algorithm to check permutations is: a proof system $\Pscr$
(inference rules), the format of a sequent in $\Pscr$ (bounded and unbounded,
left and right contexts), two inference rules $\alpha$ and $\beta$ and the
description of an initial sequent, i.e., the principal formulas to which
$\alpha$ and $\beta$ are applied and where these formulas are in the sequent
(described by the predicate $\In{F}{\Gamma}$).

Using \LPder\ and the initial constraints, one can generate the \emph{derivation
instances} $\Dscr_1$ from the application of rules $\alpha / \beta$. Similarly,
the derivation instances $\Dscr_2$ are obtained from the application of $\beta / \alpha$.
Since \LPder\ is sound and complete, the sets in $\Dscr_1$ and $\Dscr_2$
correspond to all possible derivations of $\alpha / \beta$ and $\beta / \alpha$
respectively.

To check if $\alpha$ permutes over $\beta$, we need to check that all $d_1 \in
\Dscr_1$ have a corresponding $d_2 \in \Dscr_2$ such that the provability of
$d_1$ implies the provability of $d_2$. This provability check is done by
\LPprov. By executing the program a finite number of times, since the sets $\Dscr_1$
and $\Dscr_2$ are finite, one can check the permutation condition. Since
\LPprov\ is sound, it will only fail when indeed $d_2$ is not provable from
$d_1$.

\paragraph{Tool and Experimental Results}

In \cite{miller.ep} it was shown how linear logic can be successfully used as
a framework for the specification of sequent calculi. Later, in \cite{nigam11lsfa}
it was shown how linear logic with subexponentials can capture a wider range of
proof systems with rules that have more refined structural restrictions. Using
the latter logic it is possible to specify the well known sequent calculi LK and LJ,
but also more involved calculi such as S4 and G3K \cite{negri05} for modal
logics and $G_K$ \cite{avron13} for
paraconsistent logics. This framework, and the specification of several calculi, can be
found at \url{http://code.google.com/p/sellf}. This is part of a bigger project
on reasoning about sequent calculus systems, and the advantage of using this
framework is that we can check proof transformations for different calculi that
are available.

For this reason we chose to implement the methods of Sections
\ref{sec:derivations} and \ref{sec:provIf} for the focused sequent calculus for
linear logic. This fact has also influenced the decision for choosing the
alphabet in Table \ref{fig:predicates}. Using these methods, we implemented a
function that checks permutation lemmas and made it available for the users.
Given the specification of a logic in this framework, the user can choose two
inference rules, say, $\alpha$ and $\beta$, and the system automatically checks
if $\alpha$ permutes over $\beta$. Some examples of specifications are already
available in the system, but the user is free to write their own.

%In particular, we have implemented the checking of permutation lemmas. The user
%can choose two inference rules, say, $\alpha$ and $\beta$, from the specified
%logic and the system automatically checks by using the procedure above  
%if $\alpha$ permutes over $\beta$, 
% i.e., if a proof in which $\alpha$ occurs below $\beta$ can be transformed into
% a proof in which $\beta$ occurs below $\alpha$. This is done via the procedure
% explained in Section \ref{sec:together}. 
Currently, the system outputs only
whether the rules permute  or not (the negative answer corresponding to ``don't
know''), but in principle it could also show
the permutation cases. We expect to add this functionality until before the conference.

We tested the implementation using the rules for linear logic and
intuitionistic logic, in which
the permutation cases are well known and essential for the completeness of
the proof search disciplines focusing and uniform proofs, respectively. 
Our system identified correctly all the permutation cases for LL:
% neg/neg
$\bindnasrepma / \bindnasrepma$, $\bindnasrepma / \binampersand$,
$\binampersand
/ \bindnasrepma$, $\binampersand / \binampersand$,
% pos/pos
$\tensor / \tensor$, $\tensor / \oplus$, $\oplus / \tensor$, $\oplus /
\oplus$,
% neg/pos
$\oplus / \bindnasrepma$, $\tensor / \bindnasrepma$, $\oplus /
\binampersand$, $\tensor
/ \binampersand$;
% Permutation cases for uniform proofs: guarantee that =>_l can be left to
% be
% applied in the very end, when the goal is atomic.
and the all the permutations for LJ:
$\supset_l / \supset_r$, $\supset_l / \wedge_r$,
$\supset_l / \wedge_l$.

\vspace{-2mm}

\section{Related and Future Work}
\label{sec:conc}

This paper contributes to automating the check of important
proof theoretic properties. In particular, we showed how ASP is a suitable programming
paradigm for checking proof transformations. We proposed two programs for doing so. 
The first checks the validity of a derivation given where the formulas can
possibly occur in a sequent. The second program is used to check 
whether the proof of one sequent follows from the proof of another sequent.
This is part of an effort to build tools to help proof theorists to design proof 
systems. 

The problem of checking the validity of proof transformations and in particular, 
for the proof of permutation lemmas is as old as sequent calculus systems. Kleene
in \cite{kleene52ams} already investigated this problem for LK and LJ.
Some have also investigated more systematically how to determine whether a permutation 
is valid. For instance, \cite{galmiche94tcs} and more recently \cite{lutovac13comsis} have 
proposed some vocabulary based on the role of formulas in an instance of an inference rule. 
They were able to prove when a permutation is possible by using this vocabulary.
However, they do not provide the means to automatize this check as one still needs
to enumerate all possible instances of rule applications to check whether all instances
can permute. This paper gives a solution for this problem by using ASP. We believe that
the these two lines of work complement each other and could be combined. But this is
left as future work. In fact, up to the best of our knowledge, this is the first 
result on automating the check of proof transformations.

Similarly, in machine proofs for cut-elimination, such as those in Twelf~\cite{schurmann00phd} or 
Abella~\cite{abella.website}, one proves permutation lemmas. Again, one needs to prove each 
case by hand. Although the system will complain when one case is forgotten, still the permutation lemmas
compose roughly half of the proof. One future direction is to integrate 
our method, which only provides yes/no answers, with these systems. This would mean that 
our system would also output a proof object, which could be checked by Twelf or Abella.
Moreover, it would also remove the burden from the user to write down many (if not all) permutation 
lemmas by hand.

% There are several directions to follow from here. Currently, our system provides
% a yes/no answer (and until the conference, it will also draw the transformations). 
% It would be desirable to output a machine checkable proof object. We are currently 
% investigating how to output, in particular, a proof assistant code demonstrating some permutation, 
% that could be checked by a proof assistant, for example, Twelf~\cite{schurmann00phd} or 
% Abella~\cite{abella.website}. 

Finally, since the completeness proof of the focusing strategy heavily relies on permutation lemmas~\cite{miller07cslb}, 
we are currently investigating whether one can automate the proposal of focused proof
systems which are complete to their un-focused version. 

% \begin{itemize}
% \item Improve the usability of the system
% \item Translate the proofs
% \item Use the permutability of rules to deduce proof search procedures
% (focusing, e,g.)
% \end{itemize}

\paragraph{Acknowledgements} We thank Dale Miller, Elaine
Pimentel, and Bruno W. Paleo for fruitful discussions. Nigam was supported
by the CNPq.

\bibliographystyle{acmtrans}
% \bibliographystyle{alpha}
% \bibliography{../../master}

\begin{thebibliography}{}

\bibitem[\protect\citeauthoryear{Andreoli}{Andreoli}{1992}]{andreoli92jlc}
{\sc Andreoli, J.-M.} 1992.
\newblock Logic programming with focusing proofs in linear logic.
\newblock {\em J. of Logic and Computation\/}~{\em 2,\/}~3, 297--347.

\bibitem[\protect\citeauthoryear{Avron, Konikowska, and Zamansky}{Avron
  et~al\mbox{.}}{2013}]{avron13}
{\sc Avron, A.}, {\sc Konikowska, B.}, {\sc and} {\sc Zamansky, A.} 2013.
\newblock Cut-free sequent calculi for c-systems with generalized finite-valued
  semantics.
\newblock {\em J. Log. Comput.\/}~{\em 23,\/}~3, 517--540.

\bibitem[\protect\citeauthoryear{Cervesato and Pfenning}{Cervesato and
  Pfenning}{2002}]{cervesato02ic}
{\sc Cervesato, I.} {\sc and} {\sc Pfenning, F.} 2002.
\newblock {A Linear Logical Framework}.
\newblock {\em Information \& Computation\/}~{\em 179,\/}~1 (Nov.), 19--75.

\bibitem[\protect\citeauthoryear{Crolard}{Crolard}{2001}]{crolard01tcs}
{\sc Crolard, T.} 2001.
\newblock Subtractive logic.
\newblock {\em Theor. Comput. Sci.\/}~{\em 254,\/}~1-2, 151--185.

\bibitem[\protect\citeauthoryear{Gacek}{Gacek}{2009}]{abella.website}
{\sc Gacek, A.} 2009.
\newblock The {A}bella system and homepage.
\newblock \url{http://abella.cs.umn.edu/}.

\bibitem[\protect\citeauthoryear{Galmiche and Perrier}{Galmiche and
  Perrier}{1994}]{galmiche94tcs}
{\sc Galmiche, D.} {\sc and} {\sc Perrier, G.} 1994.
\newblock On proof normalization in linear logic.
\newblock {\em Theoretical Computer Science\/}~{\em 135}, 135--1.

\bibitem[\protect\citeauthoryear{Gelfond and Lifschitz}{Gelfond and
  Lifschitz}{1990}]{gelfond90iclp}
{\sc Gelfond, M.} {\sc and} {\sc Lifschitz, V.} 1990.
\newblock Logic programs with classical negation.
\newblock In {\em ICLP}. 579--597.

\bibitem[\protect\citeauthoryear{Gentzen}{Gentzen}{1969}]{gentzen35}
{\sc Gentzen, G.} 1969.
\newblock Investigations into logical deductions.
\newblock In {\em {The Collected Papers of Gerhard Gentzen}}, {M.~E. Szabo},
  Ed. North-Holland, Amsterdam, 68--131.

\bibitem[\protect\citeauthoryear{Girard}{Girard}{1987}]{girard87tcs}
{\sc Girard, J.-Y.} 1987.
\newblock Linear logic.
\newblock {\em Theoretical Computer Science\/}~{\em 50}, 1--102.

\bibitem[\protect\citeauthoryear{Kleene}{Kleene}{1952}]{kleene52ams}
{\sc Kleene, S.~C.} 1952.
\newblock Permutabilities of inferences in {Gentzen's} calculi {LK} and {LJ}.
\newblock {\em Memoirs of the American Mathematical Society\/}~{\em 10}, 1--26.

\bibitem[\protect\citeauthoryear{Leone, Pfeifer, Faber, Eiter, Gottlob, Perri,
  and Scarcello}{Leone et~al\mbox{.}}{2006}]{leone06tcl}
{\sc Leone, N.}, {\sc Pfeifer, G.}, {\sc Faber, W.}, {\sc Eiter, T.}, {\sc
  Gottlob, G.}, {\sc Perri, S.}, {\sc and} {\sc Scarcello, F.} 2006.
\newblock The {DLV} system for knowledge representation and reasoning.
\newblock {\em ACM Trans. Comput. Logic\/}~{\em 7}, 499--562.

\bibitem[\protect\citeauthoryear{Lutovac and Harland}{Lutovac and
  Harland}{2013}]{lutovac13comsis}
{\sc Lutovac, T.} {\sc and} {\sc Harland, J.} 2013.
\newblock A contribution to automated-oriented reasoning about permutability of
  sequent calculi rules.
\newblock Submitted to Computer Science and Information Systems.

\bibitem[\protect\citeauthoryear{Miller, Nadathur, Pfenning, and
  Scedrov}{Miller et~al\mbox{.}}{1991}]{miller91apal}
{\sc Miller, D.}, {\sc Nadathur, G.}, {\sc Pfenning, F.}, {\sc and} {\sc
  Scedrov, A.} 1991.
\newblock Uniform proofs as a foundation for logic programming.
\newblock {\em Annals of Pure and Applied Logic\/}~{\em 51}, 125--157.

\bibitem[\protect\citeauthoryear{Miller and Pimentel}{Miller and
  Pimentel}{2013}]{miller.ep}
{\sc Miller, D.} {\sc and} {\sc Pimentel, E.} 2013.
\newblock A formal framework for specifying sequent calculus proof systems.
\newblock {TCS}, 474: 98--116.

\bibitem[\protect\citeauthoryear{Miller and Saurin}{Miller and
  Saurin}{2007}]{miller07cslb}
{\sc Miller, D.} {\sc and} {\sc Saurin, A.} 2007.
\newblock From proofs to focused proofs: a modular proof of focalization in
  linear logic.
\newblock In {CSL}, 2007.

\bibitem[\protect\citeauthoryear{Negri}{Negri}{2005}]{negri05}
{\sc Negri, S.} 2005.
\newblock Proof analysis in modal logic.
\newblock {\em J. Philosophical Logic\/}~{\em 34,\/}~5-6, 507--544.

\bibitem[\protect\citeauthoryear{Niemel{\"a} and Simons}{Niemel{\"a} and
  Simons}{1997}]{niemela97lpmnr}
{\sc Niemel{\"a}, I.} {\sc and} {\sc Simons, P.} 1997.
\newblock Smodels - an implementation of the stable model and well-founded
  semantics for normal lp.
\newblock In {\em LPNMR}. 421--430.

\bibitem[\protect\citeauthoryear{Nigam and Miller}{Nigam and
  Miller}{2010}]{nigam10jar}
{\sc Nigam, V.} {\sc and} {\sc Miller, D.} 2010.
\newblock A framework for proof systems.
\newblock {\em J. Autom. Reasoning\/}~{\em 45,\/}~2, 157--188.

\bibitem[\protect\citeauthoryear{Nigam, Pimentel, and Reis}{Nigam
  et~al\mbox{.}}{2011}]{nigam11lsfa}
{\sc Nigam, V.}, {\sc Pimentel, E.}, {\sc and} {\sc Reis, G.} 2011.
\newblock Specifying proof systems in linear logic with subexponentials.
\newblock {\em Electr. Notes Theor. Comput. Sci.\/}~{\em 269}, 109--123.

\bibitem[\protect\citeauthoryear{Pfenning and Simmons}{Pfenning and
  Simmons}{2009}]{pfenning09lics}
{\sc Pfenning, F.} {\sc and} {\sc Simmons, R.~J.} 2009.
\newblock Substructural operational semantics as ordered logic programming.
\newblock In {LICS}, 2009.

\bibitem[\protect\citeauthoryear{Rauszer}{Rauszer}{1974}]{rauszer74studia}
{\sc Rauszer, C.} 1974.
\newblock A formalization of the propositional calculus h-b logic.
\newblock {\em Studia Logica\/}~{\em 33}, 23--34.

\bibitem[\protect\citeauthoryear{Sch{\"{u}}rmann}{Sch{\"{u}}rmann}{2000}]{schurmann00phd}
{\sc Sch{\"{u}}rmann, C.} 2000.
\newblock Automating the meta theory of deductive systems.
\newblock Ph.D. thesis, Carnegie Mellon University.
\newblock CMU-CS-00-146.

\end{thebibliography}


% \vspace{-2mm}

\label{lastpage}
\end{document}

% end of new_TLP2egui.tex
% \begin{itemize}
%   \item (Single Context Variable)
% 
% 
%  \item If $\Gamma_i$ is a unbounded context and different from $\Gamma_{p(A_i), c(A_i)}$ then $\Gamma_{j, i}  = \Gamma_i$ for all $1 \leq j \leq k$;
%  \item If $\Gamma_i$ is a bounded context, then the contexts $\Gamma_{j, i}$ for
%  $1 \leq j \leq k$ are \emph{fresh} context names, that is appearing only once in $\Xi$. 
% \end{itemize}
% The set of formulas $\Bscr$ is the smallest set such that:
% \begin{itemize}
%  \item $\In{\diamond(A_1,\ldots, A_k)}{\Gamma_{c(\diamond)}} \in \Bscr$;
%  \item If $\Gamma_{c(\diamond)}$ is unbounded, 
%  \item $\In{A_i}{\Gamma_{p(A_i), c(A_i)}}$ for all $i \in \{1, \ldots l\}$;
%  \item If $\Gamma_i$ is a bounded context, then we add the formulas 
%  $\unions{\Gamma_i}{[\Gamma_{1,i}, \ldots, \Gamma_{k,i}]}$.
%  \item 
% \end{itemize}
% 
% 
% \begin{definition}
% Let $D = (\Xi, \Bscr)$ be a derivation skeleton with a premise $P$ 
% and $D_R = (\Xi_R, \Bscr_R)$ be the derivation skeleton of a inference
% rule $R$. Then the derivation skeleton obtained by applying $D_R$ to 
% $D$ at premise $P$ as follows:
% \end{definition}
% 
% Assume that $R$ is a left-introduction rule in the given proof system with two premises, 
% introducing the formula $\diamond(A,B)$ in the context $\Gamma_i$, where $\diamond$ is some connective.
% Moreover, the sub-formula $A$ appears in the left-premise in the context $\Gamma_j$, 
% while the sub-formula $B$ appears in the right-premise in the context $\Delta_k$.
% We now show how to construct the derivation skeleton $\tup{\Xi, \Bscr}$ for an 
% inference rule $R$. The other cases follows, when a rule has more or less than 
% two premises follow similarly. $\Xi$ is the following derivation
% \[
% \infer{\Gamma_1  \mid \cdots \mid \Gamma_n \vdash \Delta_1 \mid \cdots \mid \Delta_m}
%   {\Gamma_{11}  \mid \cdots \mid \Gamma_{1n} \vdash \Delta_{11} \mid \cdots \mid \Delta_{1m}
%   &
%   \Gamma_{21}  \mid \cdots \mid \Gamma_{2n} \vdash \Delta_{21}  \mid \cdots \mid \Delta_{2m}
% }
% \]
% 
% For the remaining contexts, \ie, those bounded and 
% exactly one, we create fresh context variables. For instance, if $\Gamma_l$ is a bounded context in 
% the conclusion, then the contexts $\Gamma_{1l}$ and $\Gamma_{2l}$ are \emph{fresh context names} not appearing
% anywhere else. 
% 
% The derivation skeleton above is very similar to the rule $R$, but where the formulas are 
% abstracted away. The atomic formulas in $\Bscr$ specify
% 
% Later, we will specify using a LP based on the clauses in Table~\ref{fig:predicates} that in 
% this particular case $\Gamma_l = \Gamma_{1l} \cup \Gamma_{2l}$, that is, the formulas 
% in the context $\Gamma_{l}$ are split among the premises. Similarly, for contexts that have exactly 
% one formula. We use also use an LP to specify that these have exactly one formula.
