\documentclass[a4paper]{article}
\usepackage{graphicx}
\usepackage{amsfonts}
\usepackage[pdfauthor={Ino de Bruijn},%
pdftitle={A Predictor for Interface Regions of Membrane Proteins},%
pdftex]{hyperref}

\author{Ino de Bruijn}
\title{A Predictor for Interface Regions of Membrane Proteins}

\begin{document}
% Submission of the final written report. Note that the report should include a link to the web-server and a description of it. The written report should be written in the form of a "scientific" paper, containing the following sections: Abstract, Introduction, Methods, Results, Discussion and References. It should be between 10 and 30 pages (single spaced 12 points) long and contain between 2 and 6 figures/tables (of which at least one should be a sensitivity-specificity plot).
 
\maketitle
\tableofcontents

\newpage
\section{Report}
\begin{abstract}
\noindent{\bf Motivation:} Membrane proteins play an important role in many
biological processes. Determining their structures experimentally is difficult,
which is one of the reasons why there has been a lot of research on predicting
their structures. In recent studies it has been shown that knowing the location of the
interface region can be useful to determine the structure of membrane proteins.
In this study we have trained several SVMs on different training data to
predict the location of the interface region. The $Z$-coordinate, i.e. the
distance of a residue to the center of a membrane, determines whether a residue is in the interface region or not.\\
{\bf Results:}\\
{\bf Availability:} The webserver can be reached at \url{http://hasse.cbr.su.se/~inodb/}
\end{abstract}
{\bf Keywords:} {\em Membrane protein structure prediction, SVM, PSSM, $Z$-coordinate, interface region}.
\subsection{Introduction}
The $\alpha$-helical membrane proteins are important for many biological 
processes, such as signalling and transport processes. They comprise around 
20-25\% of the proteome.\cite{gransetha, krogh} Furthermore they are the 
targets for the majority of all drugs, so acquiring more information about 
their structure could be very benificial for the pharmacological 
industry.\cite{chenandrost} It is however a lot harder to experimentally 
determine the structure of transmembrane (TM) proteins than it is to determine 
the structure of globular proteins. This is why less than 1\% of the 3D 
structures in PDB are from transmembrane proteins.\cite{berman} For most 
structures we only know what parts of the sequence are transmembrane regions 
and what the orientation of the protein is relative to the 
membrane.\cite{elofsson}\\

Since it is hard to determine the structure of membrane proteins
experimentally, it could be worthwhile to predict the structure of TM proteins.
Several methods were developed over the years for predicting their
topologies.\cite{vonheijne,tusnadyandsimon,sonnhammer} The methods have scored
quite well on multiple benchmarks.\cite{chenetall,kallandsonnhammer} Recently
Granseth {\em et al.} have proposed a new physical property that could be of
use for determining the structure of TM proteins called the $Z$-coordinate,
i.e. the distance of a residue to the center of a membrane.\cite{elofsson} In
another study Granseth {\em et al.} found out that the membrane-water interface
region is dominated by irregular structures ($\approx70\%$) and helicesF
($\approx30\%$) but has no $\beta$ strands.\cite{gransethb} Predicting this region could thus be useful in determining the
structure of a TM protein. In this study we classify residues to be part
of the interface region if its $Z$-coordinate is within a certain range. We
compare two types of predictors: the first type only looks at local sequence
information while the second type uses profiles generated by PSI-BLAST.\cite{altschul} We also look at combinations of both. All the predictors are Support Vector Machines (SVM's).\cite{boser}

\subsection{Methods}
%Describe the method.
We define the interface region of a protein as those residues where $|Z| \leq 
22$ and $|Z| > 10$. This allows us to see the problem as a binary 
classification problem. Residues within the range are labelled as positive 
examples and residues outside of the range are labelled as negative ones. We 
train multiple Support Vector Machines on a set of non-homologous membrane 
proteins and perform a 5-fold cross-validation. The SVMlight implementation by 
Joakim {\em et al.} was used for this study.\cite{joachims}

\subsubsection{Training data}
For the training data a set of 530 membrane proteins was homology reduced using 
BLASTClust.\cite{altschul} Using 20\% for the sequence length to be covered and 
20\% for the percent identity score resulted in a dataset of 88 non-homologous 
membrane proteins. Two type of predictors were trained: one on local sequence 
information and another on profiles generated by PSI-BLAST.\cite{altschul} For 
the former the training data is sparse encoded in the following way. Each 
residue is expressed as a binary feature which results in 20 features in total 
for a window size of one (one for each residue). A negative training example 
could for instance look like this:
\begin{verbatim}
-1 1:0 2:0 3:0 4:0 5:0 6:0 7:0 8:0 9:1 10:0 11:0 12:0 13:0 14:0
15:0 16:0 17:0 18:0 19:0 20:0
\end{verbatim}
Only one of the features can be one, since a residue cannot be two residues at 
the same time. When training on larger windows, each position in the window is 
represented by 20 features so the total number of features then becomes $20 
\times s$ where $s$ is the size of the window.\\


For the second predictor profiles for each protein were generated using 
PSI-BLAST against the uniref90 database. PSI-BLAST detects distant homologues 
for a query sequence.\cite{altschul} Several SVMs were trained with a variety of 
combinations of information extracted from these profiles. The best scoring 
training data was achieved with a combination of the position-specific scoring 
matrix and the information per position. The position-specific scoring matrix 
(PSSM) was converted as done by Jones to values between 0 and 1 using the 
standard logistic function:
\begin{equation}
\frac{1}{1 + e^{-x}}
\end{equation}
where $x$ is the raw profile matrix value.\cite{jones} Using the 
weighted observed percentages rounded down or the relative weight of gapless 
real matches to pseudocounts resulted in worse perfoming models. The number of 
features that represent the combination of the PSSM and the information per 
position is $21 \times s$ where $s$ is the size of the window.

\subsubsection{Comparing define parameters acc, etc.}
The methods are compared on a variety of metrics to determine the quality of 
the predictions. We use the metrics in Table \ref{tab:metrics}. Here $TP$ are the true positives, $TN$ the true negatives, $FP$ the false positives and $FN$ the false negatives.\\

\begin{table}[!th]
\begin{center}
\begin{tabular}{c|c}
Metric & Formula\\
\hline
Accuracy & $(TP + TN) / (TP + FP + TN + FN)$\\
Sensitivity & $TP / (TP + FN)$\\
Specificity & $TN / (FP + TN)$\\
False Positive Rate ($FPR$) & $FP / (FP + TN)$\\
False Discovery Rate ($FDR$) & $FP / (FP + TP)$\\
\end{tabular}
\end{center}
\caption{Various metrics for determining the quality of a prediction}
\label{tab:metrics}
\end{table}

We also use the Matthew Correlation 
Coefficient ($MCC$) as an indication of the correlation between the predicted 
and observed results:
\begin{equation}
MCC = \frac{TP \times TN - FP \times TN}{\sqrt{(TP+FP)(TP+FN)(TN+FP)(TN+FN)}}
\end{equation}.\cite{matthews}

\subsubsection{Webserver}
The best scoring SVM of our study can be used from the web to predict the
interface region of a given sequence. A model is used that has been trained on
the position-specific scoring matrix, the information per position and the
local sequence information with a window size of 25.\\


After the sequence has been given in fasta format, PSI-BLAST is used to
generate an alignment profile for the sequence. The information from the
profile is sparse encoded and the data is classified. The result is then shown
to the user.

\subsection{Results}
%Show the results.
%Compare method with previous methods
A 5-fold cross validation was performed on several SVMs with a dataset of 88 
non-homologous membrane proteins. This set was constructed using PSI-BLAST with 
20\% for the sequence lenght to be covered and 20\% for the percent identity 
score on a set of 530 membrane proteins. The distribution of the 
$Z$-coordinates of the different residues from the homology reduced training 
set can be seen in Figure ??.

\subsubsection{Window information Sequence and profile}
We have compared several ways of using the PSI-BLAST profile, using only local 
sequence information and a combination of both with different window sizes. A 
comparison on accuracy can be seen in Figure \ref{fig:pssm_acc} and a comparison on MCC can 
be seen in Figure \ref{fig:pssm_mcc}. All training was done using the linear kernel. A combination of the point-specific scoring matrix (PSSM), the information per position (IPP) and the local sequence information with a window size of 51 scored best. It 
has an MCC value of $0.45109$ and an accuracy of $0.76170$. The PSSM and IPP combinaton is really close to that score as well at a window size of 61. We also tried to use the `weighted observed percentages rounded down' values of the PSI-BLAST 
profile, but those resulted in very low MCC values. Using the `relative weight of gapless real matches to pseudocounts' (RWGRMP) information did not really improve the predictions either.

\begin{figure}[h]
\begin{center}
\includegraphics[scale=0.65]{PSSM_MCC.png}
\end{center}
\caption{MCC comparison of varying training data}
\label{fig:pssm_mcc}
\end{figure}

\begin{figure}[h]
\begin{center}
\includegraphics[scale=0.65]{PSSM_ACC.png}
\end{center}
\caption{Accuracy comparison of varying training data}
\label{fig:pssm_acc}
\end{figure}

\subsubsection{Different kernels}
To improve the results from the PSSM and IPP combination several kernels were 
tried with different paramaters. SVMlight supports a lineair kernel, polynomial 
kernel ($(s a*b + c)^d$), a radial basis function ($-g ||a-b||^2$) and a 
sigmoid/poly kernel ($s a*b + c$).\cite{joachims} Only the polynomial kernel 
improved the results. In Figure ?? a comparison can be seen of training on the 
PSSM and IPP combination with a polynomial kernel and variable $d$ values. For 
$d = 3$ the results are ????.


\subsection{Discussion}
%Discuss the bidniz.
From all the various features we tried. it turned out that a combination of the local sequence information, the position-specific scoring matrix and the `information per position' gave the best results. Trying different polynomial kernels also improved the predictions significantly. Although the results are quite good there are still plenty of options to be explored. An idea might be to use secondary structure elements as features to be trained on and use PSIPRED to predict those secondary structure elements.  Another idea is to use regression instead of binary classification to make a prediction of the actual $Z$-coordinate giving us a metric of how certain the prediction is. If it fits in the middle of the $Z$-coordinate range that indentifies the interface region, the chance is higher that it is indeed part of the interface region than if it is on the boundaries of the range.  

\subsubsection{Other methods}
% Compared to previous methods it scores quite well.
ZPRED from Granseth {\em et al.} uses the output from a hidden Markov model together 
with a neural network.\cite{elofsson} The average error is 2.55{\AA} and 68.8\% of the residues are predicted
within 3{\AA} of the target $Z$-coordinate in the 5-25{\AA} region. Papaloukas {\em et al.} have presented Zpred2,
an improved version of ZPRED which has average accuracy error of 2.18{\AA} corresponding to an improvement
by 15\% compared to the previous version.\cite{papaloukas} Tseng {\em et al.} created a protein-protein predictor based on a `tree decomposition support vector machine' (TDSVM) which, according to them, speeds up the training processs compared to kernel-based support vector machines with a factor of nearly 300.\cite{tseng}

\newpage
\section{Summaries}

\subsection{SVM}
Summary of \cite{shawe}.\\

The approach of using examples to synthesize programs is known as the learning
methodology. There are various types of learning. Supervised learning uses 
input/output pairs. In unsupervised learning there are no output values; the 
learning task is to gain some understanding in the process that generated the 
data. In query learning the learner is allowed to query the environment about 
the value associated with a particular input. In reinforcement learning the 
learner has a range of actions at their disposal which they can take to move 
towards states where they can expect high rewards.\\

In supervised learning the examples used for learning are referred to as the
training data. Sometimes there exists an underlying function called the target
function which maps inputs to outputs. The estimation of the target function is
kown as the solution. The solution is chosen from a set of candidate functions
which map from the input space to the output domain. Usually a particular set
or class of candidate functions known as the hypotheses is chosen before trying
to learn the correct function. The algorithm which takes the training data as
inpunt and selects a hypothesis from the hypothesis space is referred to as the
learning algorithm. A learning problem with binary outputs is called a binary
classification problem, one with a finite number of categories a multi-class
classification and for real-valued outputs the problem is known as regression.
Another type of variation in learning models is the way in which the training
data are generated and how they are presented to the learner. With batch
learning all the data are given to the learner at the start of learning and
with on-line learning the learner receives one example at a time and gives
their estimate of the output before receiving the correct value. In on-line
learning the current hypothesis is updated in response to each new example and
the quality of learning is assessed by the total number of mistakes made during
learning.\\

It is difficult to find a verifiable consistent hypothesis, because the
function that we are trying to learn may not have a simple representation and
the training data is frequently noisy. The ability of hypothesis to correctly
classify data not in the training set is known as its generalization. A
hypothesis that becomes too complex in order to become consistent is said to
overfit. Support Vector Machines (SVM) are learning systems that use a
hypothesis space of linear functions in a high dimensional feature space,
trained with a learning algorithm from optimization theory that implements a
learning bias derived from statistical learning theory.\\

Binary classification is frequently performed by using a real-valued function 
$f : X \subseteq \mathbb{R}^{n} \rightarrow \mathbb{R}$. The input $\mathbf{x} 
= (x_1,...,x_n)'$ is assigned to the positive class if $f(\mathbf{x}) \geq 0$ 
and otherwise to the negative class. If $f(\mathbf{x})$ is a linear function it 
can be written as $f(\mathbf{x}) = \langle \mathbf{w} \cdot \mathbf{x} \rangle 
+ b$ where $(\mathbf{w},b) \in \mathbb{R}^n \times \mathbb{R}$ are the 
parameters that control the function. They are called the weight vector and the 
bias respectively. These parameters must be learned from the input data.  
Typically $X$ is used to denote the input space and $Y$ to denote the output 
domain.  Usually $X \subseteq \mathbb{R}^n$ with $Y = \{-1,1\}$ for binary 
classification, $Y = \{1,2,...,m\}$ for $m$-class classification and $Y 
\subseteq \mathbb{R}$ for regression. The training set is denoted by $S = 
((\mathbf{x}_1,y_1),...,(\mathbf{x}_\ell,y_\ell)) \subseteq (X \times Y)^\ell$ 
where $\ell$ is the number of examples.\\

The perception algorithm is the first iterative algorithm for learning linear 
classification. It is an on-line and mistake-driven procedure which starts with 
an initial weight vector $\mathbf{w}_0$ and adapts it each time a training 
point is misclassified by the current weights. The procedure is guaranteed to 
converge if the data is linearly separable i.e. there exists a hyperplane that 
correctly classifies the training data. The number of iterations depends on a 
quantity called the (functional) margin which is defined as $\gamma_i = 
y_i(\langle \mathbf{w} \cdot \mathbf{x}_i \rangle + b )$. The functional margin 
distribution of a hyperplane ($\mathbf{w},b$) with respect to a training set 
$S$ is the distribution of the margins of the examples in $S$. Another 
important concept for SVMs is the dual representation. The final hypothesis is 
a linear combination of the training points: $\mathbf{w} = \sum_{i=1}^{\ell} 
\alpha_iy_ix_i$ where $\alpha_i$ are positive values proportional to the number 
of times misclassification of $\mathbf{x}_i$ has caused the weight to be 
updated. The vector $\mathbf{\alpha}$ can be thought of a as an alternative 
representation of the hypothesis in dual coordinates. Two common methods used 
in linear regression are the least squares method and the ridge regression 
method.\\

In general complex real-world applications require more expressive hypothesis 
spaces than linear functions. Frequently the target concept cannot be expressed 
as a simple linear combination of given attributes but in general requires that 
more abstract features of the data be exploited. Kernel representations project 
the data into a high dimensional feature space to increase the computational 
power of the linear learning machines. Different approaches to feature 
selection exist. Frequently one seeks to identify the smallest set of features 
that still conveys the essential information contained in the original 
attributes. This is called dimensionality reduction.\\

Non-linear machines are build in two steps: first a fixed non-linear mapping 
transforms the data into a feature space $F$ and then a linear machine is used 
to classify them in the feature space. Using dual representation, the decision 
rule can be evaluated using just inner products between the test point and the 
training points: $f(\mathbf{x}) = \sum_{i=1}^{\ell}\alpha_iy_i 
\langle\mathbf{\phi}(\mathbf{x}_i) \cdot \mathbf{\phi}(\mathbf{x}) \rangle + 
b$. If there is a way to calculate the inner product 
$\langle\mathbf{\phi}(\mathbf{x}_i) \cdot \mathbf{\phi}(\mathbf{x}) \rangle$ in 
feature space directly it is possible to merge the two steps. Such a direct 
computation is called a kernel function. One of the curious facts about using a 
kernel is that we do not need to know the underlying feature map in order to be 
able to learn in the feature space.\\

One way to find a kernel function would be to first create a complicated 
feature space, then work out what the inner product in that space would be, 
and finally find a direct method of computing that value in terms of the 
original input. In practice the kernel function is defined directly, hence 
implicitly defining the feature space. Mercer's theorem provides a 
characterization of when a function $K(\mathbf{x},\mathbf{z})$ is a kernel. By 
using simple kernels that satisfy Mercer's theorem more complicated kernels can 
be formed.

\subsection{Prediction of Protein Secondary Structure at Better than 70\% 
Accuracy}
Summary of \cite{rost}.\\

Rost and Sander present a two-layered feed-forward neural network, trained on a 
non-redundant data base of 130 protein chains, that predicts the secondary 
structure of water-soluble proteins. By using sequence profiles containing 
evolutionary information in real coding and conservation weights the authors 
are able to gain a 6\% increase in accuracy. The jury decision over nine 
networks that they use as the third level of their network system improves the 
accuracy by 2\%. According to them their system is the first to surpass 70\% 
accuracy for soluble chains. The entire system is referred to as PHD.\\

PHD has also been tested on membrane proteins instead of soluble proteins, but 
the results were below average. The secondary structure content prediction 
turns out to be predicted correctly with  an error of below 10\%, which is 
comparable, at least, to an intermediate level of circular dichroism (CD) 
analysis. It is in particular competetive with CD for predicting strand. A 
further consistency check is the use of a reliability index which promises to 
be rather useful in practice: residues predicted more reliable than others can 
be identified. The fifth of all residues with highest reliability is predicted 
with an accuracy > 90\%. Filtering the prediction by substituting one or 
two-residue helices by loops does not effect the overal accuracy.\\

PHD's average prediction accuracy for a set of 26 new proteins having no 
significant sequence similarity to any protein in the training set used was 
72\%. This indicates that the quality of the network system is probably not 
overestimated.

\subsection{Protein Secondary Structure Prediction Based on Position-specific
Scoring Matrices}
Summary of \cite{jones}.\\

In the {\em Protein Secondary Structure Prediction Based on Position-specific 
Scoring Matrices} paper by David T. Jones a two-stage neural network that 
predicts protein secondary structure is presented. It consists of three stages: 
generation of a sequence profile, prediction of initial secondary structure and 
finally the filtering of the predicted structure.\\

The sequence profiles are generated with PSI-BLAST. The values from the 
position specific scoring matrix are scaled between 0-1 range using the 
standard logistic function, which are then used as input to the neural network.  
A standard feed-forward back-propagation network architecture with a single 
hidden layer is used for PSIPRED. A second network is used to filter successive 
outputs from the main network. The method has been evaluated with a 
cross-validation procedure. Instead of removing proteins with high sequence 
similarity from the test set (as done in previous works), the proteins with a 
similar fold have been removed.\\

The results indicate that the method is at the top of the range of accuracies 
documented for secondary structure prediction methods. The author gives three 
possible reasons for the success of PSIPRED: the produced alignments are based 
on pairwise local alignments, the use of iterated profiles greatly enhances the 
sensitivity of PSIPRED and the accuracy of PSI-BLAST alignments are 
significantly higher than any other method for automatic multiple sequence 
alignment.

\subsection{Improving the accuracy of transmembrane protein topology prediction 
using evolutionary information}
Summary of \cite{jones2}.\\

Membrane proteins mediate many important biological processes. It is however
hard to determine their structure because they are not water soluble. In the
paper {\em Improving the accuracy of transmembrane protein topology prediction
using evolutionary information} Jones presents a new method for predicting
transmembrane protein topology from sequence profiles (MEMSAT3). The method has
been benchmarked with full cross-validation on a standard data set of 184
transmembrane proteins. It predicts the correct topology and the the locations
of transmembrane segments for 80\% of the test set (comparable to 62-72\% for
other popular methods). With the use of a second neural network that
discriminates transmembrane from globular proteins a very low overall fasle
positive rate (0.5\%) can be achieved in detecting transmembrane proteins.\\

The MEMSAT method was the first prediction method to fully integrate the
prediction of transmembrane topology with the prediction of transmembrane
segments. The method made use of scores compiled from membrane protein data and
a dynamic programming algorithm to search through all possible topological
models by a process of expectation maximization. MEMSAT2 made use of the same
scoring tables as MEMSAT and the same dynamic programming algorithm, but used
sequence profiles to produce a consensus topology score across an aligned
family of sequences.\\

The major new source of information in MEMSAT3 is the evolutionary information
manifest in the PSI-BLAST-derived sequence profiles, from which the neural
network is able to determine much more reliable topogenic scores than could be
obtained through single residue statistics. The benchmarking results clearly
show that MEMSAT3 is an effective method for transmembrane topology prediction,
with higher overall prediction accuracy than other popular methods and the
previous version of the method (MEMSAT2) based on statistical scoring tables.\\

\subsection{A hidden Markov model for predicting transmembrane helices in protein
sequences}
Summary of \cite{sonnhammer}.\\

In the {\em A hidden Markov model for predicting transmembrane helices in
protein sequences} paper by Sonnhammer, Von Heijne and Krogh a novel method to
model and predict the location and orientation of alpha helices in
membrane-spanning proteins is presented (TMHMM). It is based on a hidden Markov
model (HMM) with an architecture that corresponds closely to the biological
system.  There are three main locations of a residue: in the transmembrane
helix core (in the hydrophobic tail region of the membrane), in the
transmembrane helix caps (in head region of the membrane), and in loops. Due to
different residue distributions on different sides, the authors chose to use 7
different states: one for the helix core, two for caps on either side, one for
loops on the cytoplasmic side, one each for short and long loops on the
non-cytoplasmic side, and one for ‘globular domains’ in the middle of each
loop. The total number of free variables in the entire model is 216, which is a
lot less than the tens of thousands usually used for neural networks.\\

The training happened in three stages. In the first stage, the model was
estimated by the Baum-Welch reestimation procedure, which is the standard
method for maximum likelihood estimation of HMMs. In the second stage, the
first model was used to relabel the data and in the third stage the second
model was trained further by a method for ‘discriminative’ training.\\

Two datasets were used for testing to compare the method with other methods.
One test set consisted of 38 multi-spanning and 45 single-spanning with
experimentally determined topologies, the other of 108 multi-spanning and 52
single-spanning proteins. The accuracy of the TMHMM is high compared to MEMSAT,
particularly on dataset 2. Notable was that despite TMHMM being based on single
sequences it obtained about the same single TM accuracy as PHDtmh using
multiple alignments. For overall topology PHDthmh performed better.

\subsection{ZPRED: Predicting the distance to the membrane center for residues in
$\alpha$-helical membrane proteins}
Summary of \cite{elofsson}.\\

Granseth, Viklund and Elofsson describe a novel challenge for the prediction of
$\alpha$-helical membrane proteins: to predict the distance between a residue
and the center of the membrane, a measure they define as the $Z$-coordinate.
They argue that it is advantageous to have a measure that is based on a more
``physical'' property since it implicitly contains information about re-entrant
helices, interfacial helices, the tilt of a transmembrane helix and loop
lengths. A significant difference between their Z-coordinate predictor and a
topology predictor is that the former only predicts the distance from the
center of the membrane and not the direction of this distance, i.e.  there is
no distinguish between the cytosolic (“inside”) and extracellular (“outside”)
sides of the membrane. This somehow simplifies the problem but is possible
since the membrane is symmetric to a large degree. A further simplification is
that all residues that are between 0-5{\AA} are defined to be in a central
hydrophobic region and hence set to 5{\AA}. All residues that are outside the lipid
bilayer, \textgreater25{\AA} are in a similar manner defined to be in a non-membrane
environment and set to 25{\AA}. This means that the predictor focuses the
predictions on the region where the environment inside the membrane changes
most.\\

The $Z$-coordinates were predicted using various HMM-based methods. The HMM
based method with best $Z$-coordinate accuracy is PRODIV-TMHMM with an average
error of 2.83{\AA}. It predicts 65.9\% of the residues within 3{\AA} from their target
Z-coordinate. Using artificial neural network based methods for $Z$-coordinate
prediction was less effective. It is interesting to note however that the
$Z$-coordinate can be predicted using the sequence alone. The use of
evolutionary profiles also gave a significant increase. The $Z$-coordinate was
also predicted using a combination of neural networks and hidden Markov models.
Some HMM-based method predictions significantly improved when using a trained
neural network as additional input, but PRODIV-HMM did not. Using the output
from PROD-TMHMM and evolutionary profiles as input to a neural network (ZPRED)
produced the method of choice for predicting the $Z$-coordinate. The average
error was 2.55{\AA}, around one half turn of a transmembrane helix, and more than
two-thirds of the residue were predicted to be within 3{\AA} from the target
$Z$-coordinate.

\subsection{A comprehensive assessment of sequence-based and template-based
methods for protein contact prediction}
Summary of \cite{wu}.\\

Ab initio protein structure predictions by folding simulations almost always
fail for the medium/large size proteins approximately \textgreater120-150
residues. The pair-wise residue contact information can be used to constrain
the simulation search within a smaller phase space which can also improve the
minimum of the landscape funnel of the overall energy function.\\

The methods of protein contact prediction can be categorized into 3 classes:
statistical methods using correlated mutations, machine learning and threading
template-based voting. There are also other combinations of the first two
methods. Wu and Zhang have developed two methods for protein-contact prediction
predictions: SVM-SEQ, a sequence-based learning approach, and SVM-LOMETS a
threading template-based voting method. SVM-SEQ is trained on a training set
with non-contacted and contacted residue pairs at a ratio of 4:1 (normally
\textgreater20:1) to prevent bias. It trains on a combination of local window
features and in-between segment features. SVM gave better results than all
tested neural networks. LOMETS uses nine threading programs to make a
prediction, which gives it a combination of sequence profile alignments,
structural profile alignments, pair-wise potentials and hidden Markov-models.
The problem is that LOMETS has a course-grained distance cut-off and the
alignment quality is not considered. SVM-LOMETS trains on the distance cutoff
and the quality alignment.\\

SVM-SEQ performs slightly better than other machine-learning methods, while
SVM-LOMETS showed significant improvement compared to LOMETS. When compared to
each other SVM-SEQ scores better in short range targets (6-11) and SVM-LOMETS
better in medium (12-24) and long (\textgreater24) targets. The targets were
also split in `Easy', `Medium', `Hard' and `Very Hard' targets. It turned out
that SVM-SEQ scored comparable or better for the `Hard' and `Very Hard'
targets and SVM-LOMETS scored better for the `Easy' and `Medium' targets.

\subsection{Protein homology detection by HMM-HMM comparison}
Summary of \cite{soding}.\\

S\"{o}ding presents a new method for detecting distant homologous relationships
between proteins in the {\em Protein homology detection by HMM-HMM comparison}.
The method is based on a generalization of the alignment of protein sequences
with a profile hidden Markov Model (HMM) to the case of pairwise alignment of
profile HMMs. The log-odds score for sequence-profile or sequence-HMM
comparison has proven to be highly successful in homology recognition. This is
underscored by the fact that virtually all sequence-profile and sequence-HMM
comparison methods are based on it. The log-odds score is a measure for how
much more probable it is that a sequence is emitted by an HMM rather than by a
random null model. S\"{o}lding has generalized the log-odds score for sequence-
HMMcomparison to the case of HMM-HMMcomparison. He also presents an algorithm
that maximizes the log-sum-of-odds score.\\

The HHsearch homology detection tool from S\"{o}ding is based on two methods. A
novel correlation score which increases the sensitivity by 5-10\% and a
statistical method to score predicted versus known secondary structure as well
as predicted versus predicted secondary structure that exploits the confidence
values of the secondary structure prediction. The method was benchmarked
against BLAST and PSI-BLAST as popular representatives of sequence-sequence and
profile-sequence methods, the HMM-sequence comparison package HMMER and the
profile-profile alignment tools PROF7\_SIM and COMPASS. The used dataset had a
sequence similarity below the twilight zone (20\% sequence identity). HHsearch
represents a significant improvement over the other methods, both in terms of
sensitivity and alignment quality. However, preliminary results from the
CAFASP4 structure prediction contest were below expectations. The author thinks
that the method could be improved by using seperate alignment databases for structure
prediction and homology detection.

\addcontentsline{toc}{section}{References}
\begin{thebibliography}{1}

\bibitem{altschul} Altschul,S.F., Gish,W., Miller,W., Myers,E.W., Lipman,D.J. (1990) Basic local alignment search tool. {\em J Mol Biol}, {\bf 215}, 403-410.

\bibitem{berman} Berman,H.M., Westbrook,J., Feng,Z., Gilliland,G., Bhat,T.N.  
and Weissig,H. (2000) The protein data bank. {\em Nucleic Acids Res.}, {\bf 
28}, 235-242.

\bibitem{boser} Boser,B., Guyon,I., Vapnik,V. (1992) A training algorithm for 
optimal margin classifiers. {\em Proceedings of the fifth annual workshop on 
Computational learning theory}, 144-152.

\bibitem{chenetall} Chen,C.P., Kernytsky,A. and Rost,B. (2002) Transmembrane 
helix predictions revisited. {\em Protein Sci.}, {\bf 11}, 2774-2791.

\bibitem{chenandrost} Chen,C.P. and Rost,B. (2002) State-of-the-art in membrane 
protein prediction. {\em Appl. Bioinformatics}, {\bf 1}, 21-35.

\bibitem{gransetha} Granseth,E., Daley,D.O., Rapp,M., Melen,K. and von 
Heijne,G.  (2005a) Experimentally constrained topology models for 51,208 
bacterial inner membrane proteins. {\em J. Mol. Biol.}, {\bf 352}, 489-494.

\bibitem{gransethb}  Granseth,E. et al. (2005) A study of the membrane-water interface
region of membrane proteins. {\em J. Mol. Biol}. {\bf 346}, 377-38

\bibitem{elofsson} Granseth,E., Viklund,H., Elofsson,A. (2006) ZPRED: 
Predicting the distance to the membrane center for residues in $\alpha$-helical 
membrane proteins. {\em Bioinformatics}, {\bf 22}, 191-196.

\bibitem{joachims}  Joachims,T. (1999) Making large-Scale SVM learning practical. {\em MIT-Press}, 169-184.

\bibitem{jones}  Jones,D.T. (1999) Protein secondary structure prediction based on position specific scoring 
matrices. {\em J Mol Biol 1999}, {\bf 292}, 195-202.

\bibitem{jones2} Jones,D.T. (2007) Improving the accuracy of transmembrane protein topology prediction using evolutionary information. {\em Bioinformatics}, {\bf 23}, 538-544.

\bibitem{kallandsonnhammer} K\"{a}ll,L. and Sonnhammer,E.L. (2002) Reliability 
of transmembrane predictions in whole-genome data. {\em FEBS Lett.}, {\bf 532}, 
415-418.

\bibitem{krogh} Krogh,A., Larsson,B., von Heijne,G. and Sonnhammer,E.L.  (2001) 
Predicting transmembrane protein topology with a hidden Markov model: 
Application to complete genomes. {\em J. Mol. Biol.}, {\bf 305}, 567-580.

\bibitem{matthews}  Matthews,B.W. (1975) Comparison of the predicted and observed secondary  structure of T4 
phage lysozyme. {\em Biochim Biophys Acta}, {\bf 405}, 442-45.

\bibitem{papaloukas} Papaloukas,C., Granseth E., Viklund H., Elofsson A. (2008) Estimating the length of transmembrane helices using Z-coordinate predictions. {\em Protein Sci.}, {\bf 17}, 271–278.

\bibitem{rost} Rost,B., Sander,C. (1993) Prediction of protein secondary structure at better than 70% accuracy. {\em J. Mol. Biol.}, {\bf 232}, 584-99.

\bibitem{shawe} Shawe-Taylor,J., Cristianini,N. (2000) Support Vector Machines and other kernel-based learning methods. {\em Cambridge University Press}.

\bibitem{sonnhammer} Sonnhammer,E.L., von Heijne,G. and Krogh,A. (1998) A 
hidden markov model for predicting transmembrane helices in protein sequences.  
{\em Proc. Int. Conf. Intell.  Syst. Mol. Biol.}, {\bf 6}, 175-182.

\bibitem{soding} S\"{o}ding,J. (2005) Protein homology detection by HMM-HMM comparison. {\em Bioinformatics}, {\bf 21}, 951-60.

\bibitem{tseng} Tseng,T.-S.  Guo,C.-Y., Wen-Lian,H., Chang,F. (2010) Protein-Protein Interface Prediction based on a Novel SVM Speedup, Technical Report, Number TR-IIS-10-001, Institute of Information Science, Academia Sinica

\bibitem{tusnadyandsimon} Tusna´dy,G.E., Simon,I. (1998) Principles 
governing amino acid composition of integral membrane proteins: Application to 
topology prediction. {\em J. Mol. Biol.}, {\bf 283}, 489-506.

\bibitem{vonheijne} Von Heijne,G. (1992) Membrane protein structure prediction.  
Hydrophobicity analysis and the positive-inside rule. {\em J. Mol. Biol.}, {\bf 
225}, 487-494.

\bibitem{wu} Wu,S., Zhang,Y. (2008) A comprehensive assessment of sequence-based and template-based methods for protein contact prediction. {\em Bioinformatics}, {\bf 24}, 924-31.

\end{thebibliography}

\end{document}
