\documentclass[a0,portrait]{a0poster}
\usepackage{amsfonts, amsmath}
\usepackage{liacs-poster}
\usepackage{epic,eepic}
\usepackage{graphicx}
\usepackage{multirow}

\frenchspacing

\title{Discrete~Tomography: A~Neural~Network~Approach}
\authorone{Jonathan~K.~Vis $^\textrm{a}$}
\authortwo{Walter~A.~Kosters $^\textrm{a}$}
\authorthree{K.~Joost Batenburg $^\textrm{b,\,c}$}
\affiliation{$^\textrm{a}$~Leiden Institute of Advanced Computer Science, Universiteit Leiden \\
$^\textrm{b}$~Centrum Wiskunde \& Informatica, Amsterdam \\
$^\textrm{c}$~Vision Lab, Universiteit Antwerpen}
\emailone{}
\emailtwo{}
\emailthree{}

\definecolor{edlogoblue}{rgb}{0,0,1}
\definecolor{someblue}{rgb}{0.85,0.85,1}
\definecolor{liacsred}{rgb}{0.69,0,0.29}
\def\bordercolor{liacsred}
\def\colsepcolor{liacsred}

\begin{document}
\vspace{2cm}
\maketitle
\vspace{-3cm}

\large
\begin{multicols}{3}

\section*{\textcolor{edlogoblue}{Tomography}}

\colorbox{someblue}{
\begin{minipage}[c]{23cm}
\vspace{5mm}
In \textcolor{liacsred}{tomography}, we try to reconstruct an object from a number of
\textcolor{liacsred}{projections} in multiple directions.
\vspace{5mm}
\end{minipage}}

\vspace{1cm}
\noindent
Here, we will focus on projections
obtained by parallel beams through a finite object.

\noindent
The attenuations of the beams are measured on an infinite \textcolor{liacsred}{detector}.
Different projections are generated by rotating the detector around the object in disc~$A$ ($f : A \to \left[0, 1\right]$ where $0$ is black and $1$ is white).
The construction of the projections is performed by the so-called \textcolor{liacsred}{Radon
transform}, which is the integral transform of the function~$f$ over straight
lines~$L$:
\begin{equation}\label{eq:radon_line}
R_f\left(L\right) = \int_L \! f\left(\ell\right) \, d\ell.
\end{equation}

\begin{center}
\unitlength = 0.8mm
\begin{picture}(140, 140)(0, 0)
\thinlines
\drawline(70, 0)(70, 140)
\drawline(0, 70)(140, 70)
\put(70, 70){\line(3, 2){50}}
\put(100, 100){$R$}
\put(70, 70){\circle{120}}
\put(57, 135){$y$}
\put(135, 57){$x$}
\put(98, 5){$R_f\left(L\right)$}
\put(55, 72){$\theta$}
\put(70, 70){\arc{30}{4.37}{4.712}}
\thicklines
\linethickness{1.1mm}
\put(70, 70){\line(-1, 3){25}}
\put(70, 70){\line(1, -3){25}}
\Thicklines
\spline(70, 100)(80, 110)(110, 90)(105, 60)
\spline(105, 60)(90, 10)(60, 25)
\spline(60, 25)(50, 30)(70, 70)(40, 50)(37, 60)
\spline(37, 60)(30, 90)(50, 90)(70, 100)
\thinlines
\drawline(77, 17)(98, 38)
\drawline(63, 23)(106, 66)
\drawline(54, 34)(107, 87)
\drawline(63, 63)(98, 98)
\drawline(38, 58)(85, 105)
\drawline(34, 73)(52, 92)
\end{picture}
\end{center}

\noindent
The reconstruction of the original image from its projections is obtained by
applying the \textcolor{liacsred}{inverse Radon transform} with $k$~angles $\theta_1,\ldots,\theta_k$:

\begin{equation}\label{eq:prediradon}
f\left(x, y\right) = \sum^D_{\tau' = -D} \! h\left(\tau'\right)
\sum^k_{d = 1} \! P_f\left(\theta_d, \tau' + t_d\right),
\end{equation}
with $t_d = x \cos\theta_d + y \sin\theta_d$ and $h$ a suitable weight or \textcolor{liacsred}{kernel} function acting as a filter;
$P_f$ ``is'' the discretized $R_f$ with a finite detector size $2D$.

\bigskip

\noindent
In tomography the calculation of Equation~\eqref{eq:prediradon} is usually
performed by the \textcolor{liacsred}{filtered back projection} algorithm.

\vspace{0.2cm}
\begin{center}
\begin{picture}(510,10)
\allinethickness{1.1mm}
\put(0,5){\textcolor{liacsred}{\line(1,0){500}}}
\end{picture}
\end{center}
\vspace{-1cm}

\section*{\textcolor{edlogoblue}{Neural Networks}}
\colorbox{someblue}{
\begin{minipage}[c]{23cm}
\vspace{5mm}
Here, we will focus on simple forms of
feedforward networks called a \textcolor{liacsred}{perceptron}, and a \textcolor{liacsred}{multilayer perceptron}.
\vspace{5mm}
\end{minipage}}

\vspace*{0.5cm}
\begin{center}
\unitlength = 1mm
\begin{picture}(100, 90)(0, 0)
\Thicklines
\drawline(50, 70)(10, 20)
\drawline(50, 70)(30, 20)
\drawline(50, 70)(50, 20)
\drawline(50, 70)(90, 20)
\filltype{white}
\put(50, 70){\circle*{10}}
\put(10, 20){\circle*{10}}
\put(30, 20){\circle*{10}}
\put(50, 20){\circle*{10}}
\put(65, 20){$\ldots$}
\put(90, 20){\circle*{10}}
\put(10, 13){$\underbrace{\hspace*{80mm}}_{\vec{a} = \Sigma\mbox{\scriptsize Pr}}$}
\end{picture}
\begin{picture}(100, 90)(0, 0)
\Thicklines
\drawline(50, 90)(20, 70)
\drawline(50, 90)(40, 70)
\drawline(50, 90)(80, 70)
\drawline(20, 70)(10, 20)
\drawline(20, 70)(30, 20)
\drawline(20, 70)(50, 20)
\drawline(20, 70)(90, 20)
\drawline(40, 70)(10, 20)
\drawline(40, 70)(30, 20)
\drawline(40, 70)(50, 20)
\drawline(40, 70)(90, 20)
\drawline(80, 70)(10, 20)
\drawline(80, 70)(30, 20)
\drawline(80, 70)(50, 20)
\drawline(80, 70)(90, 20)
\filltype{white}
\put(50, 90){\circle*{10}}
\put(20, 70){\circle*{10}}
\put(40, 70){\circle*{10}}
\put(55, 70){$\ldots$}
\put(80, 70){\circle*{10}}
\put(10, 20){\circle*{10}}
\put(30, 20){\circle*{10}}
\put(50, 20){\circle*{10}}
\put(65, 20){$\ldots$}
\put(90, 20){\circle*{10}}
\put(10, 13){$\underbrace{\hspace*{80mm}}_{\vec{a} = \Sigma\mbox{\scriptsize Pr}}$}
\end{picture}
\end{center}

\noindent
The computational properties of a linear perceptron show a remarkable similarity with the computations
in Equation~\eqref{eq:prediradon}. We expect a linear perceptron to be able
to simulate these computations. The weights should form the kernel function,
and, for many projections.

\section*{\textcolor{edlogoblue}{Experiments}}
The first experiment aims to illustrate the statement that a linear perceptron is
capable of \textcolor{liacsred}{simulating} Equation~\eqref{eq:prediradon}. The second experiment
describes a \textcolor{liacsred}{real-life case study}. In all cases we use a custom developed
C$\stackrel{++}{}$ framework.

\subsection*{\textcolor{edlogoblue}{Simulating Filtered Back Projection}}
The original image (top row) and filtered back projection reconstruction
(second row) versus a linear perceptron (third row) and a multilayer perceptron
reconstruction with $2$ hidden nodes (bottom row), all with $32$ projections:

\vspace*{0.5cm}
\begin{center}
\smallskip

\smallskip

\smallskip
\end{center}

\noindent
The average absolute errors obtained
from the linear perceptron approach are structurally less than the errors from
filtered back projection. We can therefore conclude that a linear perceptron is
capable of simulating Equation~\eqref{eq:prediradon}.

\begin{center}
\small
\begin{tabular}{ p{2.5cm} | c | r@{$.$}l | r@{$.$}l || r@{$.$}l | r@{$.$}l | }
 \multicolumn{2}{ c }{} & \multicolumn{4}{ | p{8cm} || }{\mbox{Filtered back} \mbox{projection}} & \multicolumn{4}{ p{8cm} | }{ Linear \mbox{perceptron}} \\
 Image class & Projections & \multicolumn{2}{ | p{3.5cm} | }{Average error} & \multicolumn{2}{ | p{3.5cm} || }{Standard \mbox{deviation}} & \multicolumn{2}{ | p{3.5cm} | }{Average error} & \multicolumn{2}{ | p{3.5cm} | }{Standard \mbox{deviation}} \\
\hline
\multirow{5}{2cm}{\textsc{$2$~Ellipses (overlay)}}
 & $32$ & $0$ & $0458$ & $0$ & $0688$ & $0$ & $0205$ & $0$ & $0402$ \\
 & $16$ & $0$ & $0751$ & $0$ & $0862$ & $0$ & $0345$ & $0$ & $0482$ \\
 &  $8$ & $0$ & $1198$ & $0$ & $1262$ & $0$ & $0548$ & $0$ & $0632$ \\
 &  $4$ & $0$ & $1852$ & $0$ & $2017$ & $0$ & $0861$ & $0$ & $0865$ \\
 &  $2$ & $0$ & $2807$ & $0$ & $3135$ & $0$ & $1588$ & $0$ & $1401$ \\
 \hline
\end{tabular}
\end{center}

\subsection*{\textcolor{edlogoblue}{Real-life Case Study}}
The projection data for the real-life case study is not artificially created,
but it is instead actual \textcolor{liacsred}{real-life} output of a CT scanner, in this case a
homogeneous crystalline object. The data set consists of $332$ slices of the
homogeneous crystalline object. Per slice $500$ projections are included
(equally dividing the $0$ to $\pi$ semicircle).

\vspace*{0.5cm}
\begin{center}
\smallskip

\end{center}

\section*{\textcolor{edlogoblue}{Results}}
Some examples of tomographic reconstructions are shown. Neural Networks must be
trained (which implies a one time increase in effort), but carry the advantage
of being capable of reconstructing specific images, and improving themselves.
Here, we show the potential of single-pixel networks for this purpose,
in particular for situations with very few projection angles, also reducing
image artifacts.

\vspace*{0.5cm}
\begin{center}
\end{center}
\vspace*{0.5cm}

\noindent
The \textcolor{liacsred}{average absolute errors} are presented. The 
linear perceptron trained on the real-life data set performs best as can be
expected. We observe about the same difference in reconstruction quality with
regard to the filtered back projection algorithm as is observed on our
artificial image classes reconstructions. We conclude that a linear
perceptron can be applied for the reconstruction of real-life objects.

\begin{center}
\small
\begin{tabular}{l | r@{$.$}l | r@{$.$}l | }
 & \multicolumn{2}{ p{3cm} | }{Average error} &
   \multicolumn{2}{ p{3cm} | }{Standard \mbox{deviation}} \\
\hline
Filtered back projection ($k=50$) & $0$ & $1198$ & 0 & $1262$ \\
Linear perceptron                 & $0$ & $0548$ & 0 & $0632$ \\
\hline
\end{tabular}
\end{center}
\vspace{0.5cm}

\bigskip

\noindent
We present the \textcolor{liacsred}{weight vector} of a linear perceptron after $10,\!000,\!000$ training
instances. The discrete weights are connected by linear interpolation for
better readability.

\vspace{0.5cm}
\begin{center}
\textcolor{liacsred}{\hspace*{2.8cm}Ram-Lak} kernel \hfill Weight vector \hspace*{1cm}
\end{center}

\vspace{1.6cm}
\begin{center}
\begin{picture}(510,10)
\allinethickness{1.1mm}
\put(0,5){\textcolor{liacsred}{\line(1,0){500}}}
\end{picture}
\end{center}
\vspace{-1cm}

\section*{\textcolor{edlogoblue}{Conclusions}}
\begin{itemize}
\item Neural Networks are capable of generating high quality reconstructions;
\item generally, better than filtered back projection;
\item adaptable to specific image classes;
\item convergence is not guaranteed, and initialization of the weights has a huge effect;
\item image artifacts can be reduced;
\item drawback: ``softer'' object boundaries.
\end{itemize}

\subsection*{\textcolor{edlogoblue}{Future research:}}
\begin{itemize}
\item alternative network topologies;
\item eliminating the aggregation operator;
\item introducing more adaptive training strategies, e.g., boosting, weight decay, or adaptive learning rates.
\end{itemize}

\end{multicols}

\end{document}

