%% This is file `elsarticle-template-3-num.tex',
%%
%% Copyright 2009 Elsevier Ltd
%%
%% This file is part of the 'Elsarticle Bundle'.
%% ---------------------------------------------
%%
%% It may be distributed under the conditions of the LaTeX Project Public
%% License, either version 1.2 of this license or (at your option) any
%% later version.  The latest version of this license is in
%%    http://www.latex-project.org/lppl.txt
%% and version 1.2 or later is part of all distributions of LaTeX
%% version 1999/12/01 or later.
%%
%% The list of all files belonging to the 'Elsarticle Bundle' is
%% given in the file `manifest.txt'.
%%
%% Template article for Elsevier's document class `elsarticle'
%% with numbered style bibliographic references
%%
%% $Id: elsarticle-template-3-num.tex 165 2009-10-08 07:58:10Z rishi $
%% $URL: http://lenova.river-valley.com/svn/elsbst/trunk/elsarticle-template-3-num.tex $
%%

% SUBMISSION: USE PREPRINT, FOR AN IDEA OF FINAL FORMAT USE FINAL
% \documentclass[preprint,12pt]{elsarticle}
\documentclass[final,3p,times,twocolumn]{elsarticle}

%% Use the option review to obtain double line spacing
%% \documentclass[preprint,review,12pt]{elsarticle}

%% Use the options 1p,twocolumn; 3p; 3p,twocolumn; 5p; or 5p,twocolumn
%% for a journal layout:
%% \documentclass[final,1p,times]{elsarticle}
%% \documentclass[final,1p,times,twocolumn]{elsarticle}
%% \documentclass[final,3p,times]{elsarticle}
%% \documentclass[final,3p,times,twocolumn]{elsarticle}
%% \documentclass[final,5p,times]{elsarticle}
%% \documentclass[final,5p,times,twocolumn]{elsarticle}

%% if you use PostScript figures in your article
%% use the graphics package for simple commands
%% \usepackage{graphics}
%% or use the graphicx package for more complicated commands
%% \usepackage{graphicx}
%% or use the epsfig package if you prefer to use the old commands
%% \usepackage{epsfig}

%% The amssymb package provides various useful mathematical symbols
\usepackage{amssymb}
%% The amsthm package provides extended theorem environments
%% \usepackage{amsthm}

%% The numcompress package shorten the last page in references.
%% `nodots' option removes dots from firstnames in references.
\usepackage[nodots]{numcompress}

%% The lineno packages adds line numbers. Start line numbering with
%% \begin{linenumbers}, end it with \end{linenumbers}. Or switch it on
%% for the whole article with \linenumbers after \end{frontmatter}.
%% \usepackage{lineno}

\usepackage{subfig}
\usepackage{linguex}
\usepackage{color}

\usepackage{multirow}

%% natbib.sty is loaded by default. However, natbib options can be
%% provided with \biboptions{...} command. Following options are
%% valid:

%%   round  -  round parentheses are used (default)
%%   square -  square brackets are used   [option]
%%   curly  -  curly braces are used      {option}
%%   angle  -  angle brackets are used    <option>
%%   semicolon  -  multiple citations separated by semi-colon
%%   colon  - same as semicolon, an earlier confusion
%%   comma  -  separated by comma
%%   numbers-  selects numerical citations
%%   super  -  numerical citations as superscripts
%%   sort   -  sorts multiple citations according to order in ref. list
%%   sort&compress   -  like sort, but also compresses numerical citations
%%   compress - compresses without sorting
%%
%% \biboptions{comma,round}

% \biboptions{}


\journal{Journal of Biomedical Informatics}

\hyphenation{bio-lexi-con}

\begin{document}

\begin{frontmatter}

%% Title, authors and addresses

%% use the tnoteref command within \title for footnotes;
%% use the tnotetext command for the associated footnote;
%% use the fnref command within \author or \address for footnotes;
%% use the fntext command for the associated footnote;
%% use the corref command within \author for corresponding author footnotes;
%% use the cortext command for the associated footnote;
%% use the ead command for the email address,
%% and the form \ead[url] for the home page:
%%
%% \title{Title\tnoteref{label1}}
%% \tnotetext[label1]{}
%% \author{Name\corref{cor1}\fnref{label2}}
%% \ead{email address}
%% \ead[url]{home page}
%% \fntext[label2]{}
%% \cortext[cor1]{}
%% \address{Address\fnref{label3}}
%% \fntext[label3]{}

\title{An investigation of challenges in the automatic acquisition of verb subcategorization information in the biomedical literature}

%% use optional labels to link authors explicitly to addresses:
%% \author[label1,label2]{<author name>}
%% \address[label1]{<address>}
%% \address[label2]{<address>}

\author{Author1}
\ead{a1@cl.cam.ac.uk}

\author{Author2}
\ead{a2@cl.cam.ac.uk}

\author{Author3}
\ead{a3@cl.cam.ac.uk}

\author{Author4}
\ead{a4@ucdenver.edu}

\author{Author5}
\ead{a5@ucdenver.edu}

\address{Cambridge, UK}
\address{Denver, CO, USA}

\begin{abstract}
%% Text of abstract

Information about verb subcategorization frames (SCFs) is important to
many tasks in natural language processing (NLP).  Biomedical text, a
major target for applications such as information extraction, is
composed of subdomains with different textual characteristics, and is
growing at an exponential rate.  Given the difficulty of manual domain
adaptation, it is essential to understand the issues involved in
automatic SCF acquisition for biomedicine and its subdomains.  This
paper considers three fundamental issues that arise: the performance of current
acquisition technology on biomedical text, the definition of
subcategorization in biomedical text, and the degree of SCF
variation at the level of biomedical subdomains.

First, we produce an SCF gold standard and evaluate two SCF lexicons, one from an existing
automatically-acquired biomedical resource 
% developed with NLP tools
% adapted for particular biomedical subdomains 
and the other automatically acquired for this paper from the entire PubMed Open Access collection (PMC OA) using SCF acquisition methods developed for general
language.
We find that a resource built using tools tuned for a subdomain of
biomedicine has greater precision, but a resource built using
general-language tools with a large biomedical corpus has better
coverage of SCFs that may be important for information extraction.  We
release the full unfiltered lexicon acquired from PMC OA as a
resource. 
Second, we investigate the implications of two definitions of
subcategorization, based on whether the argument-adjunct distinction
is maintained, by 
producing
% constructing 
and comparing two gold standards.
Evaluation of an acquired SCF lexicon against these gold standards reveals major performance
differences depending on the definition of subcategorization.
% automatically
% acquire an SCF lexicon from the entire PubMed corpus (OpenPMC) using
% current SCF acquisition methods developed for general language, and
% evaluate it along with an existing biomedical SCF resource.  
Finally, we quantify and present variation in SCF behavior
between subdomains of biomedicine.  We find significant variation,
which implies that resources targeted to a narrow subset of
biomedicine will not generalize to the entire domain. From the results
of these investigations we argue for minimally-supervised automatic
SCF acquisition methods as a way forward.


\end{abstract}

\begin{keyword}
%% keywords here, in the form: keyword \sep keyword

%% MSC codes here, in the form: \MSC code \sep code
%% or \MSC[2008] code \sep code (2000 is the default)

\end{keyword}

\end{frontmatter}

%%
%% Start line numbering here if you want
%%
% \linenumbers

%% main text
\section{Introduction}
  \label{intro}

Natural language processing (NLP) has an ever-increasing importance in
biomedical informatics due to the exponential growth in research
publications \citep{Hunter:2006}.  NLP is essential for managing vast
amounts of unstructured text, and facilitates access to information and data extraction
that would be intractable as a manual task.  A number of core NLP
technologies used in biomedical informatics could benefit from
knowledge of {\it verb subcategorization}, i.e.~the tendency of verbs
to ``select'' the syntactic phrase types they co-occur with: for
example, the fact that the verb {\it decrease} can be intransitive
({\it The contribution decreased}), while {\it compare} cannot ({\it
We compared the predictions}, but not simply {\it We
compared}). Technologies such as syntactic and semantic parsing, event
identification, relation extraction, and entailment detection all have
the potential to make use of subcategorization information.  For
example, \citep{ananiadou:10,rupp:10} used {\it subcategorization
frames} (SCFs) in event extraction from UKPubMedCentral documents.

While subcategorization resources and techniques are relatively
well-developed for general
text \citep{brent:91,brent:93,ushioda:93,manning:93,briscoe:97,korhonen:02,valex,preiss:07},
this is not yet the case for biomedicine, and the field of biomedical
informatics currently lacks a baseline understanding of how well SCF
acquisition technology performs in different subdomains of
biomedicine, and where domain adaptation efforts should be
targeted.  Studies of the
lexical characteristics of biomedical subdomains 
\citep{lippincott:10,Lippincott:2011} have shown
substantial variation, both between general and biomedical text and
across subdomains, which points to a need for accurate, comprehensive,
automatically-acquired lexical resources, since it is impractical to develop them manually for each subdomain.
Automatic methods
also facilitate the gathering of statistical information on the frequency
of terms and linguistic contexts, which can be put to further use in
NLP systems.  So far, however, fully automatic methods are more common for
nouns than verbs \citep{yu:02,mccrae:08,widdows:06}, despite the fact that verbs
are central to recovering the meaning and structure of sentences, and to
discovering relations between biomedical entities.  
In addition, there has as yet been no standard evaluation for SCF acquisition in
biomedicine, a prerequisite for understanding how to move forward in
this area.

A small number of resources that have been built to support NLP in
biomedicine do contain verb SCF information, including
BioFrameNet \citep{dolbey:06} and the UMLS SPECIALIST
Lexicon \citep{mccray:94}, but the SCF information is manually
constructed.  The BioLexicon \citep{biolexicon,sasaki:08} is the only
such resource containing an automatically constructed SCF
lexicon. However, the BioLexicon includes data from the E. Coli
subdomain alone, and each component used in acquisition of the
BioLexicon -- for example, the part-of-speech tagger, named entity
recognizer, and parser -- has been manually adapted to the subdomain
of molecular biology. Moreover, the definitions of subcategorization
used in different resources are not always uniform and sometimes
differ from the definitions used for general language systems.  The
implications of these different definitions for biomedical SCF
acquisition has not previously been investigated.

Before the field can develop state-of-the-art SCF acquisition methods
for biomedicine, it is necessary to investigate a number of topics
which will help define what such a system should look like. In this
paper we explore the following questions: How does existing
subcategorization acquisition technology, which is either developed
for general language or adapted to biomedicine by means of
manually-built resources focusing on individual subdomains, perform in
the biomedical domain? Among the various
definitions of subcategorization, what are the implications for
biomedicine of choosing one over another? And is there meaningful variation in
subcategorization behavior across subdomains of biomedicine?  The goal
of the paper is to review the state of the art and suggest ways
forward with regard to the challenges in the automatic acquisition of
biomedical SCF lexicons.

To answer our questions we undertake investigations in three main
areas. First, we evaluate the performance of current SCF acquisition
technology on the biomedical domain.  For this purpose we manually
annotate an SCF gold standard, comprising 30 verbs with
subcategorization frames and frequencies, using data from across the PubMed Open Access collection (PMC
OA) \citep{PMC:09}. This is the first gold standard for biomedical SCF in the literature. We compare two automatically acquired SCF lexicons against this gold standard.  The first is
the BioLexicon, which, as described above, was acquired using
components individually adapted to subdomains of biomedicine, and
applied to a corpus representing a particular subdomain. The second is BioVALEX, a new lexicon acquired using a system 
developed at the University of Cambridge \citep{preiss:07}, which was
previously used for general language, and which we have applied to a
large biomedical corpus without any further adaptation. This paper
represents the first such evaluation of biomedical SCF systems and
allows us to gain insight into how current technology performs and how
SCF information can best be acquired.

Second, we explore the meaning of subcategorization in biomedicine,
particularly in terms of the argument-adjunct distinction and the role
of highly selected adjuncts in biomedical subcategorization. We
manually annotate two SCF gold standards using two different
definitions of subcategorization which are common in NLP, to determine
their impact on the overall shape of the gold standard and the
accuracy of a subcategorization acquisition system.

Finally, we consider how SCFs vary at the level of biomedical
subdomains.  We use the Cambridge system to acquire BioVALEX, a large SCF
lexicon from the PMC OA corpus, representing the largest such corpus
to be used in automatic SCF acquisition.\footnote{Although e.g.~the
SPECIALIST lexicon has broad coverage of subdomains, it is less
comprehensive due to being manually compiled.} We use this new lexicon
to explore subdomain variation in biomedicine by measuring the
difference in subcategorization behavior across subdomains, providing
a new perspective on subdomain variation. We present a detailed
picture of subdomain variation in the behavior of six representative
verbs. The complete, unfiltered SCF lexicon acquired for this investigation will
be made publicly available.

Overall, our investigation seeks to provide the field of biomedical
information processing with a much-needed baseline representing the
current state of the art in SCF acquisition for biomedical text.
Previous work with large datasets for SCF aquisition has focused on
general language, so this investigation contributes towards our
knowledge of how to build domain-specific systems. We find that 
existing SCF systems suffer performance degradation in
biomedicine compared to general language, with the more
labor-intensive manually adapted system suffering most in recall, and
the unadapted general language system applied to biomedical text
suffering most in precision. 
In addition, we find that 
the
treatment of the argument-adjunct distinction has a major effect on
the ultimate shape of the resulting lexicon, and consequently on
measured performance of SCF acquisition systems. 
Finally, we find major variation between
SCF behavior in biomedical subdomains.  Taken together, these points
suggest the need for domain adaptation, potentially involving
minimally supervised approaches, and at a finer-grained level than
``biomedical text''.


\section{Background}
  \label{background}
  \subsection{Subcategorization Frames in Biomedicine}
    \label{subcat}


The traditional linguistic notion of subcategorization refers to the
syntactic arguments of a verb, that is, the syntactic phrase types
which occur obligatorily or with high probability for any given verb.
Some common syntactic phrase types which can serve as
arguments to a verb include noun phrases, prepositional phrases,
subordinate clauses, adjectives and adverbs.

Some basic examples of subcategorization frames (SCFs) can be seen in
Table~\ref{t:basic}. For the SCF names we use COMLEX Syntax notation
\citep{grishman:94}, which includes an abbreviation for each phrase type in the SCF. Thus the SCF for a transitive verb (taking one direct object noun phrase) is NP, and for a verb taking a direct object and a prepositional phrase NP-PP.  Note that we do not specify the subject NP as part of the SCF, since subjects are obligatory in English. Most verbs take several SCFs. In Table~\ref{t:basic}, it can be seen that {\it decrease} may occur with the following SCFs: NP, NP-PP, or $\oslash$ (intransitive). On the other hand, {\it compare} occurs with the first two but not as an intransitive.

\begin{table}
\begin{tabular}{|l|p{0.8\columnwidth}|}
\hline
SCF & Example \\
\hline
\hline
NP & The retraction screw and blade \underline{decreased} [$_{NP}$the risks of vessel injuries]. \\
NP-PP & Heterozygosity for twine also \underline{decreases} [$_{NP}$the frequency of precocious NEB] [$_{PP}$to less than 10\%]. \\
$\oslash$ & The contribution of cardiovascular diseases as cause of death \underline{decreased}. \\
\hline
NP & We \underline{compared} [$_{NP}$the performance of the Charlson and the Elixhauser comorbidity measures]. \\
NP-PP & We \underline{compared} [$_{NP}$the predictions] [$_{PP}$to the known interaction signs]. \\
$*$ $\oslash$ & $*$ We \underline{compared}. \\
\hline
\end{tabular}
\caption{Sample SCFs for {\it describe} and {\it compare}. Note that {\it compare} does not occur as an intransitive, represented by the asterisk. All examples adapted from the PMC OA corpus.}
\label{t:basic}
\end{table}


Additional examples of SCFs are shown in
Table~\ref{t:complex_scfs}. Here the COMLEX SCF names include
mnemonics for some additional information beyond the simple phrasal
types. For example, the frame NP-AS-NP is a subclass of NP-PP, where
the preposition is lexicalized as {\it as}. The frame NP-TOBE
represents a direct object and a predicate using {\it to be}. The
frame THAT-S represents a sentential complement introduced by the
complementizer {\it that}, and TO-INF is an infinitival complement that uses the {\it to} form of the verb in the lower clause.

\begin{table}
\begin{tabular}{|l|p{0.75\columnwidth}|}
\hline
SCF & Example \\
\hline
\hline
NP-AS-NP & Perception of complex stimuli occurs too rapidly to \underline{support} rate coding as a reliable mechanism. \\
NP-TOBE & The larger, unsaturated propyne group has been \underline{shown} to be a useful modification for antisense oligonucleotides. \\
PP-PP & Threshold values \underline{ranged} from 0.01 to 0.99. \\
THAT-S & Experiments with PTEN-null PGCs in culture \underline{revealed} that these cells had greater proliferative capacity.\\
TO-INF & Administration of DA agonists to the rat PFC \underline{acts} to enhance working memory in these animals.\\
\hline
\end{tabular}
\caption{Sample SCFs. All examples adapted from the PMC OA corpus.}
\label{t:complex_scfs}
\end{table}


Comparing SCFs to another argument structure representation sometimes
used in biomedicine, SCFs are more general than Predicate-Argument
Structures (PASs), which have been used in Semantic Role
Labeling \citep{wattarujeekrit:04,tsai:05,tsai:08}. PASs include very
specific per-verb roles such as, for the verb {\it delete}, ``entity
doing the removing'', ``thing being removed'', and ``removed
from''. SCFs also do not identify thematic roles such as Agent and
Patient nor functional roles such as Subject and Object (though these
types of roles can often be inferred from the SCF), but simply the
syntactic phrase types that are selected by the verb (NP, PP,
etc.). SCFs thus provide a basic level of argument structure information which can
aid in event identification, but are general enough to be
automatically acquired for a large number of verbs, compared to PASs
which must be defined on a per-verb basis and thus can only
practically be identified for a small number of very frequent
biomedical verbs.

An important notion for subcategorization is that of the {\it
argument-adjunct} distinction, with the linguistic notion of subcategorization -- and the one typically used in general language -- involving only arguments.  The hallmark of a syntactic {\it argument}
is that it is obligatory or very strongly selected by the
verb.\footnote{Recall, however, that most verbs take multiple SCFs
which may involve different obligatory arguments. Therefore, the
argument is properly considered to be obligatory with regard to the
verb-SCF pair, not just the verb.} Arguments are distinguished from
{\it adjuncts}, which are phrases that elaborate on an event and are
generally optional. This distinction is often relevant for classifying
prepositional phrases.
In particular, PPs describing location, manner, or
time tend to be adjuncts. 
In Figure~\ref{f:argadj}, the PP
{\it on Sunday} is 
optional, elaborating on the event description by describing the time at
which the cooking event 
took place. 
The PP {\it on the patient} is obligatory
and 
exhibits a special, idiomatic meaning in the context of the verb {\it operate}.
The argument-adjunct distinction is sometimes fuzzy, because the judgement of optionality can be difficult to make, especially when a phrase type occurs with high frequency for a given verb. However, Figure~\ref{f:argadj} illustrates another criterion, namely that the meaning of arguments often depends on the particular verb, while
adjuncts maintain their interpretation (e.g.~temporal, locative, manner) across a wide variety of verbal
heads \citep{grimshaw:90,pollard:87}. 
See
\citep{merlo:06,abend:10} for computational approaches to the argument-adjunct distinction.

\begin{figure}
\begin{tabular}{|l|}
\hline
ADJUNCT:\\ 
The chef cooked a good lunch on Sunday.\\[5pt]
ARGUMENT: \\
The surgeon operated on the patient. \\
\hline
\end{tabular}
\caption{Example adjunct and argument PPs.}
\label{f:argadj}
\end{figure}


In biomedicine, subcategorization 
is often defined more broadly,
to include adjuncts that are less strongly
selected but 
nevertheless
important for 
the complete
description of an
event,
especially
 from the point of view of Information Extraction. \citep{cohen:06} state that ``knowledge representation in this
      [biomedical] domain requires that we {\it not} make a
      distinction between adjuncts and core arguments''. As they note,
      the tradeoff is 
a loss of some ability to generalize about adjuncts across verbs,
but they argue that
      this loss is outweighed by the ``biological integrity in the
      knowledge representation''.
Within a PAS
annotation scheme, for example, \citep{wattarujeekrit:04} includes the
location PP in sentence \ref{ex:loc} and the manner adverb in sentence \ref{ex:manner}
as core arguments, neither of which would be considered arguments in general
language. Note that even under the broader definition, not every phrase type that co-occurs with the verb is an argument; \cite{wattarujeekrit:04} still consider aspectual or frequency adverbs such as {\it
  still} or {\it always} to be adjuncts.

\ex.\label{ex:loc} Apparently HeLa cells either initiate transcription \underline{at multiple sites within RPS14 exon 1} . . . \citep{wattarujeekrit:04}

\ex.\label{ex:manner} Mice have previously been shown to develop \underline{normally} . . . \citep{wattarujeekrit:04}


We will call these two views on subcategorization the ``syntactic''
view, which emphasizes syntactic obligatoriness, and the ``semantic''
view, which emphasizes a complete semantic description of an
event. Note that it is possible to use the same inventory of SCFs for
annotation on either view; the difference will be that more complex frames, e.g.~those involving PPs and adverbs, will be used more frequently on the 
semantic view. 
Put another way, the semantic view
implies using a lower 
co-occurrence frequency threshold for considering a phrase to
be an argument of a verb; \citep{wattarujeekrit:04} considers a frequency of 20\% relative to the predicate to be sufficient, for
example.  In this paper we perform the first investigation of SCF
acquisition 
that explicitly compares the two definitions of subcategorization.






  \subsection{Previous Verb Resources}
    \label{prev_subcat}


There are several existing verb lexicons for
general language that include a degree of syntactic information.  FrameNet \citep{framenet} and
VerbNet \citep{verbnet} are general language resources that indicate a
verb's predicate argument structure and the possible semantic roles of its
arguments.  The VALEX \citep{valex} general language lexicon was
produced using an earlier version of the system used in this study,
and is the largest SCF resource available for general language,
containing SCF and frequency information for some 6,400 verbs learned
from up to 10,000 sentences per verb.  PropBank \citep{propbank} is an
extension of the Penn TreeBank \citep{penntreebank} with information
about predicate-argument relationships.

A small number of verb lexicons already exist for biomedicine.  BioFrameNet\citep{dolbey:06} extends FrameNet with links to biomedical resources (e.g.~gene ontologies).  The UMLS SPECIALIST Lexicon \citep{mccray:94} includes verb subcategorization information for some 11,000 verbs, but is manually built from a variety of biomedical and general language dictionaries.  BioProp \citep{tsai:05} adds PropBank-style annotation to 500 abstracts from the GENIA corpus.  PASBio \citep{wattarujeekrit:04} is an inventory of predicate-argument structure frames for 30 verbs, focused on molecular biology.  The frames were constructed through expert examination of MEDLINE sentences, using guidelines similar to those of PropBank. The resource most relevant to this study is the BOOTStrep
BioLexicon \citep{biolexicon}, which produces verb subcategorization
data automatically. This system is described in full in Section~\ref{biolexicon}.

To the best of our knowledge there are no previous gold standards
available that are suitable for evaluation of automatically-acquired SCF lexicons for biomedical text.  As a general
principle, manually developed resources tend not to be sufficiently
comprehensive in their SCF coverage to serve as gold standards, due to
the rarity of many SCF types, which may be missed during the
introspective process of resource creation.  For example, the majority
of verbs considered in PASBio have just two attested frames, compared
to 9 for general language verbs in the gold standard associated with
VALEX, and 6 in the gold standards we produce here.  Manual resources
also lack the statistical information that is naturally gathered
during automatic production.  The BioLexicon, on the other hand, while produced from a
corpus, is unsuitable to be used as a gold standard because the output
has not been manually corrected. Moreover, the filtering used (a
relative frequency cutoff of 0.03), while suitable for removing noise
from a lexicon, is unsuitable for gold standard creation because many
SCFs are genuinely rare.




  \subsection{Domain variation}
    \label{variation}


Domain variation is widely recognized as a major factor in deploying and extending NLP systems.  A basic approach to automatically producing NLP resources is to use machine learning to train models on annotated data, with the assumption that the model will then be applied to data drawn from a similar distribution.  The degree to which this assumption holds determines how successful the model will be, and in practice this often leads to major performance degredation.  Different models and applications will be sensitive to different types of linguistic variation: for example, a document classifier using a bag-of-words representation will be sensitive to lexical variation but not to syntactic variation, while a lexicalised parser will be sensitive to both.  Previous studies \citep{Gildea:01,Clark:Curran:07} have demonstrated significant drops in accuracy for a parser trained on the Wall Street Journal section of the Penn Treebank (newswire) and tested on the Brown corpus section of the Treebank (mixed genre).  

When considering the transfer of NLP tools and techniques to biomedical text processing applications, the distance between source and target domains is far greater than that between the WSJ and Brown corpora.  Moreover, it is an important question whether linguistic variation between specific fields of biomedicine can severely impact performance.  We refer to this type of variation as ``subdomain variation'' to emphasize that we are considering further subdivisions of 
domain-specific 
data.
Previous studies of biomedical subdomains range from extensions of traditional linguistic theory \citep{Friedman:EtAl:02} to performance evaluation of typical NLP tasks such as parsing \citep{Rimell:Clark:09}.  Similar motivations have driven work into variation along other dimensions, such as publication format \citep{Verspoor:EtAl:09}.  These studies have all emphasized the need to consider linguistic variation at a finer level than ``biomedical text'', hence our focus on subdomains.

There are many examples of corpora constructed to facilitate the implementation and evaluation of tools for specific problems in biomedical language processing, for example the BioScope corpus \citep{Vincze:EtAl:08} for speculative language detection and the BioCreative I and II gene normalisation corpora \citep{Colosimo:EtAl:05,Morgan:EtAl:08}. There are also text collections that have been annotated for multiple tasks, most notably GENIA \citep{Kim:EtAl:03}, PennBioIE \citep{Kulick:EtAl:04} and BioInfer \citep{Pyysalo:EtAl:07}.
One common feature of these corpora is that they have been compiled from just one or two specific areas of biomedical text, typically molecular biology. GENIA consists of 2,000 abstracts dealing with transcription factors in human blood cells. PennBioIE is also a corpus of abstracts, in this case covering topics in cancer genomics and the behaviour of enzymes affecting a particular family of proteins. BioInfer contains 1,100 sentences that relate to protein-protein interactions. While these are without a doubt extremely valuable resources for application building, their limited coverage 
means we cannot assume
that a system that performs well on one will also perform well on biomedical text in general. 
Recent experiments  with a significantly larger full-text biomedical corpus
confirm that language processing tools trained on one specific corpus often do
not generalize well \citep{verspoor:inpress}.


\section{Gold Standards}
    \label{gold}


In this section we describe the set of gold standards we produced for
our investigations into current SCF technology in biomedicine
(Section~\ref{investigation_tech}) and the role of different
definitions of subcategorization 
(Section~\ref{investigation_subcat}). To our knowledge, these are the
first SCF gold standards which have been produced for biomedicine.

We created a gold standard for 30 verbs using the ``semantic''
definition of subcategorization traditionally favored for
biomedicine. We refer to this gold standard as SEM-30. The verbs were
chosen based on frequency\footnote{Verbs needed to be frequent enough
to ensure enough data to annotate for the gold standard, as well as
enough raw corpus data for the SCF acquisition system to produce a comprehensive
lexicon.}, occurrence across both biomedical and general
language text, and the fact that they are known to take multiple SCFs in
biomedical text. Among such verbs, we favored ones that we believed may
have developed specialized senses in biomedicine -- e.g. {\it
activate} -- since this makes it more likely that they may have specialized
SCFs as well.
Table~\ref{t:verblist} shows the verbs in all the gold standards, with
SEM-30 on the left. In Section~\ref{investigation_tech} we will
introduce SEM-26, which is a subset of SEM-30 consisting of the 26
verbs in SEM-30 that also appear in the BioLexicon.

\begin{table}
\begin{tabular}{|l|c|c|c|c|}
\hline
\multirow{3}{*}{verb} & \multirow{3}{*}{SEM-30} & \multirow{3}{*}{SEM-26} & SYN-10 & Overlap \\ 
 & & & and & with \\
 & & & SEM-10 & \citep{preiss:07} \\
\hline
\hline
activate & $\bullet$ & $\bullet$ & $\bullet$ &  \\
analy(z/s)e & $\bullet$ & $\bullet$ & $\bullet$ & $\bullet$ \\
associate & $\bullet$ & $\bullet$ & $\bullet$ & \\
cause & $\bullet$ & $\bullet$ & & $\bullet$ \\
compare & $\bullet$ & $\bullet$ & $\bullet$ & $\bullet$ \\
contain & $\bullet$ & $\bullet$ & & \\
decrease & $\bullet$ & $\bullet$ & $\bullet$ & \\
detect & $\bullet$ & $\bullet$ & & \\
develop & $\bullet$ & $\bullet$ & & \\
enhance & $\bullet$ & $\bullet$ & & \\
examine & $\bullet$ & & & \\
express & $\bullet$ & $\bullet$ & $\bullet$ & \\
fail & $\bullet$ & & & \\
follow & $\bullet$ & $\bullet$ & & \\
generate & $\bullet$ & $\bullet$ & & \\
improve & $\bullet$ & $\bullet$ & $\bullet$ & \\
increase & $\bullet$ & $\bullet$ & & \\
induce & $\bullet$ & $\bullet$ & & $\bullet$ \\
inhibit & $\bullet$ & $\bullet$ & & \\
modify & $\bullet$ & $\bullet$ & & \\
mutate & $\bullet$ & $\bullet$ & $\bullet$ & \\
occur & $\bullet$ & $\bullet$ & $\bullet$ & \\
perform & $\bullet$ & & & \\
predict & $\bullet$ & & $\bullet$ & \\
produce & $\bullet$ & $\bullet$ & & $\bullet$ \\
recogni(z/s)e & $\bullet$ & $\bullet$ & & \\
reduce & $\bullet$ & $\bullet$ & & \\
regulate & $\bullet$ & $\bullet$ & & \\
transcribe & $\bullet$ & $\bullet$ & & \\
treat & $\bullet$ & $\bullet$ & & \\
\hline
\end{tabular}
\label{t:verblist}
\caption{Verbs in the gold standards SEM-30 (our full gold standard), SEM-26 (overlap of SEM-30 with verbs in the BioLexicon), SYN-10 and SEM-10 (our comparative syntactic and semantic gold standards), and those that overlap with the general language gold standard of \citep{preiss:07}.}
\end{table}


For annotation we chose to use the SCF inventory of \citep{preiss:07},
a rich, manually-developed inventory for general language. It consists
of 163 frames obtained by manually merging the frames exemplified in
the COMLEX Syntax \citep{grishman:94} and ANLT \citep{boguraev:87}
dictionaries, along with a small number of additional frames
identified by inspection of general language data.  We refer to this
inventory as the ``Cambridge inventory''.

Although the Cambridge inventory was developed for general language,
we found that it was suitable for biomedical text as well. During
annotation of SEM-30, only 20 sentences, or about 0.3\% of the 6,473
total annotated sentences, were identified by the annotator as
involving an SCF not included the Cambridge inventory. Since the number of examples was so small, 
we chose to discard these sentences rather than change the
inventory.
For the semantic annotation, 
the 
annotator was guided by PropBank roles, and instructed to include in the SCF 
all phrases attached to the verbal head which were important for
biomedicine.  

Sentences for annotation were randomly selected from across all
subdomains of PMC OA. The annotator annotated betwen 100-250 
sentences per verb. A
custom-built tool was used which highlighted the target verb in each sentence 
and allowed the annotator to select an SCF from a drop-down menu, as
well as add comments.  A screen shot of the annotation tool is shown
in Figure~\ref{f:annointerface}. The SEM-30 gold standard was derived directly from these annotations,
and includes the SCFs and their relative frequencies for each verb. A
sample entry from SEM-30 for the verb {\it transcribe} is shown in
Figure~\ref{f:sample-gs}.

\begin{figure*}
\includegraphics[height=.4\textheight]{figures/bw_annotation.png}
\caption{Annotation interface.}
\label{f:annointerface}
\end{figure*}

\begin{figure}
\begin{tabular}{|lr|}
\hline
\multicolumn{2}{|c|}{transcribe}\\
NP & 0.719424 \\
NP-PP & 0.215827 \\
NP-as-NP & 0.021583 \\
NP-PP-PP & 0.014388 \\
PP & 0.014388 \\
INTRANS & 0.007194 \\
ADVP & 0.007194 \\
\hline
\end{tabular}
\caption{Sample gold standard entry for {\it transcribe}. Column 1 shows the SCF and column 2 shows the relative frequency across sentences annotated for the gold standard.}
\label{f:sample-gs}
\end{figure}

In order to investigate the difference between semantic and syntactic
annotation, we chose ten verbs from SEM-30 to annotate according to
the syntactic definition of subcategorization. For this comparison, we
inspected the corpus data and chose verbs that appeared to occur with
a relatively large number of highly selected adjuncts, making them
more likely to exhibit variations in subcategorization behavior across
the two definitions of subcategorization.  We call this gold standard
SYN-10, and the corresponding set of semantically annotated 
verbs SEM-10.  See Table~\ref{t:verblist} for the list of verbs in
SYN-10 and SEM-10.


The Cambridge inventory of SCFs was used for the syntactic as well as
semantic annotation. Note, however, that the syntactic and semantic
interpretations for any given SCF in the inventory may differ. For
example, according to the syntactic annotation guidelines, the frame NP-ADVP would only be used
for certain obligatory adverbs such as {\it there} as in the sentence {\it She put it
there}. In semantic annotation, the use of this frame would be extended, e.g.~for
adverbs such as {\it significantly} or {\it normally}.

A 
second 
annotator performed the syntactic annotation for SYN-10, and was given different guidelines from those given to the semantic annotator. 
The
syntactic
 annotator was instructed to use the
traditional criterion of optionality when deciding on the SCF.  The
same annotation tool was used (Figure~\ref{f:annointerface}), and the
annotator again annotated between 100-250 instances of each verb.  Any
sentence which had not been annotated by both annotators (due to
differences in opinion about whether the sentence was a valid
instance) was discarded. The total number of sentences for each verb
in SYN-10 and SEM-10 
can be seen in Table~\ref{t:agreement} in Section~\ref{investigation_subcat}. 

\begin{table}
\begin{tabular}{|l|c|c|c|c|}
\hline
 & SEM-30 & SEM-10 & SYN-10 & Gen \citep{preiss:07} \\
\hline
\hline
Low & 1 &  4 & 3 & 1 \\
High & 10 & 10 & 9 & 25 \\
Avg & 5.4 & 6.6 & 5.9 & 9.4 \\
\hline
\end{tabular}
\label{t:gs_stats}
\caption{Number of SCFs per verb in the different gold standards.}
\end{table}

It is instructive to compare some characteristics of SEM-30, SEM-10,
and the general language gold standard of
\citep{preiss:07}; see Table~\ref{t:gs_stats}. The average number of SCFs per verb is slightly higher for SEM-10 than SEM-30, which may reflect the fact that the ten verbs were manually selected based on having wide
varieties of adjuncts, which may lead to additional frames. More interestingly, the average number and the maximum number of SCFs per verb is much lower for SEM-30 than for the general language gold standard. 
This observation suggests that verb usage becomes specialized in
biomedical text and that the range of SCFs observed is only a limited
subset of that observed in general language. Interestingly, this was
the case even though the semantic definition of subcategorization was
used for SEM-30, and the syntactic for the general language gold
standard.

\section{Investigation of Current SCF Technology in Biomedicine}
  \label{investigation_tech}

Our first investigation looked at how well current SCF acquisition
technology performs in the field of biomedicine. We chose two
different SCF lexicons to evaluate against SEM-30, constituting the
first evaluation of SCF acquisition in biomedicine. We conclude that
each type of 
lexicon
has strengths and weaknesses, but overall the
accuracy in biomedicine is lower than in general language.




  \subsection{Subcategorization Frame Acquisition Systems}

Automatic SCF acquisition systems typically consist broadly of two
major components: hypothesis generation and hypothesis selection. As a
pre-processing step, a corpus of text is processed with a natural
language parser to produce a syntactic analysis for each sentence. The
hypothesis generator uses the parser output to decide which SCF is
taken by each verb in each sentence. These hypotheses are then
amalgamated into a lexicon, which consists of a set of hypothesized SCFs for each
verb appearing in the corpus.

The larger the corpus, the more likely
it is that the lexicon will capture a comprehensive set of SCFs for
each verb. However, the output of the hypothesis generation step
is typically noisy, due to the difficulty of the task: ideally the SCF acquisition system does not make use of lexical information such as SCF dictionaries, since this is 
what is being acquired, although such dictionaries are routinely used in other NLP tasks.
Thus a filtering step is required to select from among the hypotheses those that are most reliable. Filtering is a challenging task,
since some SCFs are inherently rare; 
infrequent attestation does not mean an SCF should be filtered out of the lexicon.

Within these broad outlines, approaches vary along several dimensions; 
see \cite{schulteimwalde:09} for an overview. Hypothesis generation
may involve a shallow parser (chunker) or a deep grammatical
parser. The SCF inventory may be manually defined, in which case the
task of hypothesis generation involves matching the syntactic analyses
to the pre-defined SCFs; or the SCF inventory may be learned directly from the corpus.
The size of SCF
inventories can vary widely between systems, from only a few
to
some two hundred 
SCFs, although more recent state of the art systems for general language tend to use
relatively large inventories. 
There are a number of mechanisms
for generating hypotheses, as well, using a variety of cues in the
parsed text to identify the SCFs.

A number of SCF acquisition systems have been developed for general
language (usually newswire)
text \citep{korhonen:02,valex,preiss:07}. Very good accuracy has been
obtained, although the best results use sophisticated methods such as
smoothing the SCF distributions smoothing based on the semantic
classes of the verbs \cite{korhonen:02}.  Here, we wish to see how
state of the art methods translate to biomedicine.  We evaluated two
automatically acquired lexicons:
the BioLexicon \cite{biolexicon} and 
a new lexicon which we call BioVALEX.
The Biolexicon was built using tools adapted to a subdomain of biomedicine. 
BioVALEX was built using tools developed for general language \cite{briscoe:97,preiss:07}, and we have adapted them 
only by applying 
them
to a biomedical rather than a general domain corpus.




    \subsubsection{The BioLexicon}
       \label{biolexicon}

The BOOTStrep BioLexicon \citep{biolexicon} contains automatically-produced verb subcategorization data.  Six million words of MEDLINE E. Coli
abstracts and articles are parsed with the Enju deep
parser \citep{enju}, which has been adapted to the biomedical domain as
described in \citep{hara:06}.  No SCF inventory is assumed in advance; rather,
the set of grammatical relations for each verb instance are considered
as a potential SCF.  These are filtered at a relative frequency
threshold of 0.03, i.e. for any given verb, all SCFs with a relative frequency less than 0.03 are discarded. Filtering leads to an 
inventory of 136 SCFs.  Further arguments and strongly-selected adjuncts are
chosen according to their log-likelihood with respect to the verb.  It
is important to note that the BioLexicon draws on a single subdomain of
biomedical literature.  
Moreover, 
the parsing model
used in SCF discovery
is lexicalized, with a built-in notion of subcategorization, and is
tuned for biomedical data using a variety of external resources such
as GENIA \citep{Kim:EtAl:03}.  While there are immediate benefits to these
approaches in terms of accuracy in SCF acquisition within the same
domain as the training data, the model's reliance on manual annotation is costly, and its preconception of subcategorization may introduce
bias against new subdomain behaviors.

The BioLexicon is 
publicly available through ELRA (http://catalog.elra.info). 
We used BioLexicon exactly as provided without additional training or
adaptation.  Our intention here was to see how a system trained using bio-specific tools, but only on a single subdomain, could perform against a gold standard constructed from a wider variety of subdomains.



    \subsubsection{BioVALEX}
       \label{subcat_system}


We produced BioVALEX using an updated version of the tools in 
\cite{preiss:07}, which we will refer to as the Cambridge system, or Cambridge tools.\footnote{The updating consisted of a more recent unpublished version of the SCF classifier, which re-implemented the original classifier rules in a different programming language.} In this system an input corpus is first parsed with 
RASP
\cite{briscoe:06}. A classifier consisting of manually-defined rules 
then matches the RASP output to the SCFs in the Cambridge inventory (see Section~\ref{gold}), and 
the resulting lexicon is filtered. The intention here was to evaluate how well an SCF system designed for general language and consisting only of general-language tools 
could perform against a biomedical SCF gold standard 
when applied to a large biomedical corpus.

For our corpus we used the PubMed Open Access Subset (PMC OA), which
is the largest publicly available corpus of full-text articles in the
biomedical domain \cite{PMC:09}.  PMC OA comprises 169,338 articles
drawn from 1,233 medical journals indexed by the Medline citation
database, totalling approximately 400 million words.  Articles are
formatted according to a standard XML tag set \cite{PMC:XML:09}. The
National Institute of Health (NIH) maintains a one-to-many mapping
from journals to 122 subdomains of biomedicine \cite{PMC:Subjects:09}.
The mapping covers about a third of the PMC OA journals, but these
account for over 70\% of the total data by word count.  Journals are
assigned up to five subdomains, with the majority assigned one (69\%)
or two (26\%). We used the same dataset as \cite{Lippincott:2011},
composed of journals that are assigned a single subdomain, and
discarding subdomains with less than one million words of data. The
resulting dataset contains a total of 342 journals in 37 biomedical
subdomains, with Genetics and Medical Informatics being the largest,
and Complementary Therapies and Ethics the
smallest. See \cite{Lippincott:2011} (Figure 4) for the distribution
of PMC OA data by subdomain.
 It has been shown that the open access collection is representative of the broader biomedical literature \citep{Verspoor:EtAl:09}.


RASP is a modular statistical parsing system which
includes a tokenizer, tagger, lemmatizer, and a wide-coverage
unification-based tag-sequence parser. We used the standard scripts
supplied with RASP to output the set of grammatical relations (GRs)
for the most probable analysis returned by the parser or, in the case
of parse failures, the GRs for the most likely sequence of
subanalyses. 
In contrast to Enju, RASP is an unlexicalized parser, meaning that
it does not have access to a lexicon of information about the behavior
of specific words (as opposed to classes of words, e.g.~words with particular part-of-speech tags), and thus does not
already embody a notion of subcategorization.\footnote{We
performed an experiment using output of the unlexicalized Stanford
parser \citep{klein:03} as input to the subcategorization steps and found that accuracy was the same on the SCF evaluation as for RASP.}
In the Cambridge system, a rule-based classifier incrementally matches GRs with the
corresponding SCFs.
The rule set was an updated version of that used in \citep{preiss:07}; note that it was developed for general language and not adapted for biomedical text.
From the classifier output, preliminary lexical entries are
constructed for each verb, containing the raw and relative frequencies
of SCFs found for each verb in the data. Finally, the entries are
filtered to obtain a more accurate lexicon. 

We used two filtering methods.  The first method was simple relative
frequency filtering, as in the BioLexicon.  Here, an empirically
determined threshold is set on the relative frequencies of SCFs,
filtering out SCFs whose relative frequency for a given verb is lower
than the threshold. This simple method has been shown to have more
accurate results than more complex statistical hypothesis tests
\citep{korhonen:02}. 
Previous work on SCF acquisition for general language using similar
SCF systems found a threshold of 0.02 to give the most accurate
results. In development experiments we found 0.02 and 0.03 to give
the most accurate results under different conditions, and we chose to
use a threshold of 0.03 to match the threshold used by the Biolexicon.

Second, we used a novel method which we call SCF-specific
filtering. The intuition behind this method is that the appropriate
reliability threshold for each SCF may be different, since some SCFs
are inherently much more frequent than others.  We did not have
information about the overall frequency of the different SCFs in
biomedical text, so we used information about their overall frequency
in general language from the COMLEX and ANLT dictionaries, along with
empirical information about high and low frequencies from the
unfiltered lexicon acquired for biomedicine, to set a specific
threshold for each SCF.  Although this method gives more accurate
results overall, it uses information about general language which may
or may not be applicable to the biomedical domain.

\subsubsection{Release of BioVALEX lexicon}

Along with this paper we release BioVALEX, the lexicon obtained by
using the Cambridge tools on the PMC OA data.
The archive contains a file for each subdomain we studied, as well as
an ``overall'' file for the entire PMC OA data set (note that the
latter is more than just the union of the former, since not all
journals are assigned a subdomain).  Each file has raw counts of each
verb/SCF combination in lines of the format ``VERB SCF COUNT''.  These
counts are unfiltered, and so include low-occurence verbs: practical
use of the resource will likely require filtering appropriate to the
task at hand.  The file ``subcat\_frames.xml'' describes the SCF
inventory used in this study, with cross-references to other common
inventories (e.g.~COMLEX, the BioLexicon, etc).



  \subsection{Lexicon Evaluation and Results}
    \label{evaluation_methods}


To better understand the strengths and weaknesses of the BioLexicon
and 
BioVALEX,
we evaluated them against the SEM-30 and SEM-26 gold standards
(see 
Section~\ref{gold}).

First we evaluated 
BioVALEX
against SEM-30, using
the two filtering methods described in Section~\ref{subcat_system}. We
used the same evaluation measures as previous SCF evaluations for
general language \citep{korhonen:02,preiss:07}, namely type precision,
type recall, and F-score (the harmonic mean of precision and
recall). We also looked at the number of gold standard SCFs unseen in
the system output; that is, false negatives which were not detected at
all by the classifier (rather than being filtered out). 

The accuracy of 
BioVALEX 
on SEM-30 is shown in
Table~\ref{t:mainresult}. 
With the relative frequency threshold we see an overall
F-score of approximately 45, with recall favored over precision. Using
SCF-specific thresholds we obtain an F-score of nearly 60 with
precision slightly favored over recall. This improvement shows that
knowledge about general language SCFs can be useful for filtering in
biomedicine. The high score of 60 is about 9 points lower than can be
obtained for general language, e.g.~\citep{preiss:07}. This result is
respectable considering that no adaptations were made to the
acquisition system besides applying it to large biomedical corpus, but
it shows that there is scope for adaptation to the biomedical
domain.  Note also that no SCFs are unseen in the unfiltered lexicon,
showing that the system is capable of finding all the SCFs in the gold
standard.

\begin{table}[]
\begin{tabular}{| l | r | r | r | r |}
  \hline
  Filtering     &  F-score & Prec & Rec & Unseen\\
  \hline
  \hline
  0.03 thresh & 44.96 & 39.37 & 52.41 & 0\\
  SCF-specific  & 59.94   & 60.87     & 59.04 & 0 \\
  \hline
\end{tabular}
\caption{Accuracy of the Cambridge SCF acquisition system on the 30-verb semantic gold standard.}
\label{t:mainresult}
\end{table}

Second, we performed a comparative evaluation of 
BioVALEX
with the BioLexicon. Since the BioLexicon includes only 26 of the 30
verbs in SEM-30 (see Table~\ref{t:verblist}), we used SEM-26, a subset of SEM-30 consisting of those 26 verbs.

The comparative evaluation was not straightforward since 
the BioLexicon SCF inventory is quite different from the Cambridge
inventory, and the mapping between the two inventories is
many-to-many. Recall that the BioLexicon inventory is induced from the
parsed corpus, whereas the Cambridge inventory is pre-defined. Each
inventory is more fine-grained in certain areas. For example, the
Cambridge inventory includes multiple frames for various constructions
that are distinguished in linguistics, such as predicate nominals
({\it He seemed a fool}, where {\it fool} is predicated of {\it He})
as opposed to direct object nominals ({\it He saw a fool}). On the
other hand, the BioLexicon inventory differentiates SCFs with PP
arguments according to preposition, so that NP-PP-{\it through},
NP-PP-{\it from}, and NP-PP-{\it for}, etc. are different frames,
while the Cambridge inventory has only two frames with an NP-PP
configuration.  Since SEM-26 is annotated using the Cambridge
inventory, we had two options: map the BioLexicon inventory to the
Cambridge inventory and evaluate the BioLexicon directly on SEM-26, or
modify SEM-26 to use a common intermediate representation and map both
inventories to this representation for evaluation. We used both these options.

We call the manual mapping from the BioLexicon inventory to the Cambridge inventory
``best match''. Here we manually examined each SCF in the BioLexicon
inventory and chose which single SCF in the Cambridge inventory it was most
likely to correspond to. Following the example above, the BioLexicon
frame NP-PP-{\it through}, NP-PP-{\it from}, etc. would map to the Cambridge
frame NP-PP. Similarly, the BioLexicon frame NP-PP-{\it into}-PP-{\it on}
would map to the Cambridge frame NP-PP-PP.  This process
resulted in a set of 22 SCF types for the BioLexicon. This is far lower
than the 97 SCF types reported in \citep{biolexicon:2008}\footnote{We found 136 SCF types when querying the BioLexicon.}, 
since we collapse the SCFs that are lexicalized for preposition.

After performing the ``best match'' mapping, we evaluated the
BioLexicon and 
BioVALEX
directly on SEM-26. A simple
relative frequency threshold of 0.03 was used for filtering in both
the BioLexicon and 
BioVALEX.
Although we have demonstrated that general language statistics can be
successfully used for SCF-specific filtering in biomedicine, we used relative
frequency thresholds for the comparative evaluation since it provides
a level playing field for the BioLexicon and BioVALEX.

We again report type precision, type recall, and F-score. We also
report the number of SCFs missing from the {\it filtered} lexicon.
Note that an SCF may be missing from the filtered lexicon either because it was not in the lexicon at all, or because it had low frequency and was filtered out.\footnote{We do not report SCFs unseen in the unfiltered lexicon here, because we did not have access to the unfiltered BioLexicon.}

The accuracy of BioVALEX
and the BioLexicon using ``best match'' on SEM-26 is shown
in Table~\ref{t:best-match}.\footnote{Note that the figures for BioVALEX with the 0.03
threshold differ from those in Table~\ref{t:mainresult} because they
are for only 26 verbs.}
We can see that the BioLexicon has a much higher
F-score even with simple filtering, approaching the F-score achieved
by 
BioVALEX
with SCF-specific filtering. Interestingly, we can also
see that the BioLexicon strongly favors precision over recall, while 
BioVALEX
is stronger on recall. The high precision of the BioLexicon is a result of the fact that it is produced with a deep, lexicalized parser already
adapted to the biomedical domain, including a POS tagger trained on biomedical text. This means that the output of the parsing stage already took into account some subcategorization information specific to the biomedical domain. This results in a high-precision system for biomedical text, but relies on up-front domain adaptation, whereas the Cambridge system is less precise but can be ported to new domains as long as there is a large corpus of raw data available. 

The lower recall likely reflects the
fact that the BioLexicon is based on only a single subdomain of
biomedicine, while 
BioVALEX 
is built from across PMC OA. It can be seen that the Cambridge system is able to hypothesize SCFs which are likely to be important for interpretation of the text. The trade-off is that the Cambridge system hypothesizes more frames overall, resulting in relatively low precision. This can be overcome in the future, however, with more sophisticated filtering methods, as suggested by the results in Table~\ref{t:mainresult}.

The importance of the higher recall for information extraction can be
seen when we look at the SCFs in SEM-26 which are not included in the BioLexicon. The sentences
in Figure~\ref{f:missing-fine} are examples of frames which appear in 
BioVALEX
but not in the BioLexicon. Note that the BioLexicon may include these frames for other verbs, but at least for the verbs in SEM-26 they were either filtered out or not present to begin with.

\begin{figure}
\begin{tabular}{|p{0.9\columnwidth}|}
\hline
 NP-ING-SC:\\ 
This study indicates that all treatment protocols seemed to be sufficiently effective and safe and that cheyletiellosis in rabbits can be successfully \underline{treated} using ivermectin or selamectin in clinical practice.\\[2pt]
While the AT immunologic activity is normal in this deficiency , plasma AT functional activity is markedly \underline{reduced} leading to risk of thrombosis.\\[5pt]
NP-PP-PP:\\ 
This phenomenon is dose-dependently \underline{inhibited} by leukotriene receptor antagonism with FPL 55712, SK\&F 104353 and montelukast.\\[2pt]
Thus , unlike the Tetrahymena ribozyme , the changes \underline{induced} in precursor RNA by incubation in the absence of divalent cations result in activation of the ribozyme.\\
\hline
\end{tabular}
\caption{Examples of SCFs in SEM-26 and BioVALEX but missing from the BioLexicon.}
\label{f:missing-fine}
\end{figure}



\begin{table}
\begin{tabular}{| l | r | r | r | r | }
  \hline
  Lexicon             & F-score & Prec & Rec & Missing \\
  \hline
  \hline
  BioVALEX    & 46.20   & 40.00     & 54.68 & 11\\
  BioLexicon          & 58.37   & 87.14     & 43.88 & 20 \\
  \hline
\end{tabular}
\caption{Accuracy of BioVALEX (threshold 0.03) and the BioLexicon, using best-match, on the 26 verbs in the intersection of the semantic gold standard and the BioLexicon.}
\label{t:best-match}.
\end{table}

Because of the many-to-many nature of the mapping, we were concerned
that the ``best match'' mapping might be unfair to the BioLexicon,
since it only captured one SCF in the Cambridge inventory for each SCF
in the BioLexicon, even though there might be more than one legitimate
choice. Therefore, we also pursued another method to handle the
different SCF inventories.

We created from our gold standard a new gold standard with a much
coarser-grained SCF inventory. We did this by semi-manually creating
equivalence classes of SCFs based on types that both the BioLexicon
and Cambridge inventory could differentiate. First, we expanded the
best match by manually defining a more ``inclusive'' match, listing
all the Cambridge inventory SCFs which could be a match to a
BioLexicon SCF (accounting for the one-to-many aspect of the
BioLexicon-Cambridge inventory mapping). Then we created a bipartite
graph in which one set of nodes represented the Cambridge SCFs and the
other set represented the BioLexicon SCFs. Edges represented
``inclusive'' mapping rules. Each connected component was then considered a
coarse SCF. We call this mapping to coarse-grained SCFs semi-manual,
because the inclusive mapping rules were manually defined, but the
equivalence classes were found automatically.

The resulting coarse-grained inventory contained 14 broad SCFs. We
evaluated both BioVALEX
and the BioLexicon against a version of the gold
standard which had been mapped to this coarser inventory. We again
report type precision, type recall, F-score, and missing SCFs.


\begin{table}
\begin{tabular}{| l | r | r | r | r |}
  \hline
  Lexicon        &     F-score & Prec & Rec & Missing\\
  \hline
  \hline
  BioVALEX   & 65.38   & 55.43     & 79.69 & 2\\
  BioLexicon  & 69.23   & 90.00     & 56.25 & 4\\
  \hline
\end{tabular}
\caption{Accuracy of BioVALEX (threshold 0.03) and the BioLexicon using coarse-grained inventory, on the 26 verbs in the intersection of the semantic gold standard and the BioLexicon.}
\label{t:coarse}
\end{table}

The results using the coarse-grained inventory are shown in
Table~\ref{t:coarse}. As expected, both lexicons show higher accuracy
when evaluated using this more forgiving inventory. The general trends found earlier
still hold, however, with the BioLexicon favoring precision while 
BioVALEX
favors recall.

Note that even on the coarse-grained SCF inventory, the BioLexicon is
missing more SCFs post-filtering than 
BioVALEX.
The sentences
in Figure~\ref{f:missing-coarse} show examples of frames that were missing
from the BioLexicon for the verbs in our gold standard, but not from the Cambridge
lexicon.

\begin{figure}
\begin{tabular}{|p{0.9\columnwidth}|}
\hline
THAT-S:\\ 
Additionally , our image analysis allowed us to \underline{detect} that FTG mice also ventured further into the open arm compared to FNTG controls.\\[2pt]
All the caregivers \underline{expressed} that the feeling of safety for the patient and the caregiver was essential, emphasizing that professional back-up 24 hours a day was important.\\[5pt]
ING:\\ 
All of these stimuli \underline{activated} signaling through the MAP kinase/ERK pathway and led to the induction of P-YB-1S102.\\[2pt]
Although none of the mutations \underline{increased} binding to the same degree as removing the entire USH, they had little effect on the solubility of the protein compared to removal of the entire USH.\\

\hline
\end{tabular}
\caption{Examples of SCFs in the coarse-grained version of SEM-26 and BioVALEX but missing from the BioLexicon.}
\label{f:missing-coarse}
\end{figure}


Overall, it can be seen that the accuracy of both BioVALEX and the
BioLexicon against a biomedical gold standard is lower than for
general language SCF acquisition against general language SCF gold standards.
We believe 
the lower accuracy
arises from different sources for the two
lexicons. 
BioVALEX is insufficiently adapted to biomedical text, and hypothesizes a
wide variety of SCFs inappropriate for the domain, resulting in low precision. The BioLexicon, on the other hand, suffers from lower recall, which may mean that
a system whose components
have been manually adapted 
to a single subdomain does not generalize well enough to the
variety of
subdomains in PMC OA.
Domain adaptation for SCF acquisition is clearly needed
in order to create accurate, scalable SCF lexicons
to help with downstream NLP tasks, but 
a less-supervised approach is required to avoid over-adaptation to a single subdomain.


\section{Investigation of Definitions of Subcategorization in Biomedicine}
    \label{investigation_subcat}

Our second investigation looked at the notion of subcategorization in
biomedicine, and how it affects manual and automatic development of
lexical resources.  In Section~\ref{subcat} we introduced two
definitions of subcategorization: the ``syntactic'' definition defines
verbal arguments as those syntactic phrases which occur obligatorily
with a verb, while the ``semantic'' definition defines verbal
arguments as those syntactic phrases which are important for a full
description of a biomedical event. Section~\ref{gold} describes how we manually annotated a set of
sentences using both definitions, resulting in the SYN-10 and SEM-10 gold standards. In this section we describe how we used these resources to 
make comparisons
between the two definitions 
of subcategorization,
and also to investigate the effects on accuracy of an automatic SCF acquisition
system.
Both of these comparisons are novel in the literature.

We first look at some basic statistics about SYN-10 and
SEM-10. Table~\ref{t:gs_stats} in Section~\ref{gold} shows that SEM-10
had an average of 6.6 SCFs per verb, ranging from a low of 4 to a high
of 10; while SYN-10 had an average of 5.9 SCFs per verb, ranging from
a low of 3 to a high of 9. This observation suggests that the
syntactic method results in annotating slightly fewer frames per verb
than the semantic method, which is not surprising, since the
semantic method takes into account a broader range of phrases that the
syntactic method might consider adjuncts. However, the difference is
not very great.

We next compared SYN-10 and SEM-10 directly using the kappa
measure \citep{cohen:60}. Kappa is typically used to measure
inter-annotator agreement, when multiple annotators perform the same
task on the same data, and serves as a measure of the difficulty of
the task and the reliability of the human annotation. However, in this
case, we do not have a typical inter-annotator agreement scenario,
since the two annotators were given different instructions. Rather,
kappa here measures the difference between the two {\it methods}
of annotation, corresponding to the two definitions of
subcategorization. Results are presented in
Table~\ref{t:agreement}. The overall kappa was 0.58, well below the
threshold of 0.67 which is considered a minimum for moderate agreement
on NLP annotation tasks \citep{krippendorff:80}, indicating the
definitions have an effect on the resulting gold
standards.\footnote{We could have attempted to rule out an effect of different individuals doing the annotation by asking both annotators to annotate the data twice, once with each set of guidelines, but the time and effort required would have been prohibitive.}
The kappa for some verbs was well below 0.5; recall, however, that the
ten verbs in SYN-10 and SEM-10 were chosen in part for their large
number of adjuncts, so the agreement between the syntactic and semantic methods of annotation might be lower for this set of verbs than others in SEM-30.

As examples of sentences that the annotators treated differently,
consider sentences \ref{ex:activate-1} and \ref{ex:activate-2}. In
\ref{ex:activate-1}, the semantic annotator chose the frame NP-ADVP,
treating the adverb {\it significantly} as part of the SCF, while the
syntactic annotator chose the simple transitive frame NP (the NP is
{\it transcription}). In \ref{ex:activate-2}, the semantic annotator
chose the frame NP-PP, treating the PP {\it through the canonical and
noncanonical pathways} as part of the SCF, while the syntactic
annotator again chose NP (the NP is {\it NF-kB}). In both cases the
syntactic annotator was judging by what was syntactically obligatory,
and the semantic annotator by what was important to an understanding
of the event.

\ex.\label{ex:activate-1} The p53 mutant, which contains a disabled DNA-binding domain, does not \underline{activate} transcription significantly.

\ex.\label{ex:activate-2} Both receptors \underline{activate} NF-kB through the canonical and noncanonical pathways, with RANK specifically requiring TRAF6.

\begin{table}
\begin{tabular}{| l | l | l |}
  \hline
  verb & Kappa score & instances\\
  \hline
  \hline
  activate & 0.204022 & 152 \\
  analy(s$|$z)e & 0.214015 & 227 \\
  associate & 0.803061 & 203 \\
  compare & 0.390602 & 224 \\
  decrease & 0.498399 & 173 \\
  express & 0.479512 & 223 \\
  improve & 0.619959 & 239 \\
  mutate & 0.548926 & 108 \\
  occur & 0.044539 & 242 \\
  predict & 0.311750 & 172 \\
  \hline
  overall & 0.586751 & 1963 \\
  \hline
\end{tabular}
\caption{Agreement between methods using instructions for syntactic and semantic gold standards.}
\label{t:agreement}
\end{table}

Finally, we evaluated BioVALEX on SYN-10 and SEM-10.
The results are shown in
Table~\ref{t:syngs}.  Note that the F-score for SEM-10 is lower than for the full set of 30 verbs in SEM-30 (Table~\ref{t:mainresult}); precision in particular is much lower. This is
because the small number of verbs provides insufficient evidence
across SCFs for the SCF-specific filtering to perform at its best
(although it still slightly out-performs threshold filtering). An
interesting result emerges, however, which is that 
BioVALEX
is more accurate on SEM-10 than SYN-10, despite the fact that it uses
strictly syntactic information (parser output) as the input to
hypothesis generation. This reflects the fact that the Cambridge
system hypothesizes a wide variety of phrases as parts of the SCFs,
including some that are considered adjuncts by the linguistic definition of subcategorization, but not by the biomedical definition.

\begin{table}
\begin{tabular}{| l | r | r | r |}
  \hline
  Gold standard & F-score & Prec & Rec \\
  \hline
  \hline
  Semantic      & 53.19   & 40.98     & 75.76 \\
  Syntactic     & 47.67   & 36.28     & 69.49 \\
  \hline
\end{tabular}
\caption{Accuracy of BioVALEX (with SCF-specific filtering) on semantic and syntactic gold standards, for the ten verbs in the syntactic gold standard.}
\label{t:syngs}
\end{table}






\section{Investigation of Subdomain Variation}
   \label{investigation_variation}
   \subsection{Subdomain Variation Methods}
    \label{subdomain_variation_methods}


This section describes our approach to quantifying differences in verb
subcategorization behavior 
across
subdomains of biomedicine.  The primary type of data that we
investigate is a verb's {\it SCF distribution}, that is, the
probability distribution representing the relative frequency 
of the
verb appearing in each SCF in the Cambridge inventory.
Our goal is to discover
the presence or absence of significant differences between a verb's
SCF distribution in different subdomains. By investigating whether
individual verbs exhibit specialized behavior across subdomains, we build up
an overall picture of subdomain variation in verb subcategorization.

To obtain the SCF distributions we use the Cambridge system, applied
to 37 input corpora consisting of the PMC OA data from the individual
subdomains, to produce a per-verb SCF distribution for each subdomain.
We compute a distance metric between the per-verb SCF distributions for each pair of subdomains. 
We use clustering and graphical methods to illustrate the results.

\subsubsection{Measuring divergence}
To measure the distance between two SCF distributions we use the Jensen-Shannon divergence (JSD) \citep{Grosse:02}, a finite and symmetric measurement of divergence between probability distributions, defined as:
\[
JSD = H(X + Y) - H(X) - H(Y)
\]
where \( H \) is the Shannon entropy of a distribution
\[
\sum_x x \log{x}
\]
JSD values range between 0 (identical distributions) and 1 (disjoint distributions), and is closely related to the familiar, but asymmetric, Kullback-Leibler divergence \citep{cover:91}.  We calculate the JSD between a given verb's SCF distributions for each pair of subdomains.

\subsubsection{Presentation}
We present detailed results for six verbs: {\it develop}, {\it express}, {\it
perform}, {\it predict}, {\it recognize} and {\it treat}.  These verbs
were chosen because they exemplify one or more interesting
characteristics, such as sharp divergence in a single subdomain or a
wide variety across all subdomains; there was a wide variety in the amount of variation exhibited 
(see Section~\ref{subdomain_subcat_behavior}). For each of the six verbs there are four
different views of the data, described below.  For a given verb, we
only show subdomains in which it occurs a minimum of 200 times.

Heat maps present pairwise calculations of a metric between a set of objects: cell \( <x, y> \) is shaded according to the value of \( metric(x, y) \).  Our heat maps show the JSD values between pairs of subdomains for a given verb: the cells are shaded from white (JSD value of 1, maximum divergence) to black (JSD value of 0, identity).  The actual values are inscribed in each cell.

Dendrograms present the results of hierarchical clustering performed directly on the JSD values.  The algorithm begins with each instance (in our case, subdomains) as a singleton cluster, and repeatedly joins the two most similar clusters until all the data is clustered together.  The order of these merges is recorded as a tree structure that can be visualised as a dendrogram in which the length of a branch represents the distance between its child nodes.  Similarity between clusters is calculated using average cosine distance between all members, known as ``average linking''.  The tree leaves represent data instances (subdomains) and the paths between them are proportional to the pairwise distance.  This allows visualization of multiple potential clusterings, as well as a more intuitive sense of how distinct clusters truly are.  Rather than choosing a set number of flat clusters, the trees mirror the nested structure of the data.

Scatter plots project the optimal K-Means clustering onto the first two principal components of the data.  The optimal clustering was determined via the Gap Statistic \citep{Tibshirani:01}, which increases the cluster count and runs K-Means until the improvement in error on the data is within a small range of the improvement on randomly-generated data with similar statistical properties.  The principal components are normalised, and points coloured according to cluster membership, with the subdomain written immediately above.  The clustering is performed using the full SCF distributions, while the principle component analysis relies on decomposing the distributions into two optimal dimensions.

Finally, tables show the top three SCFs for each subdomain, along with their relative frequencies.  The SCFs are shown in their equivalent COMLEX forms, which reflect the complements involved, as described in Section \ref{investigation_subcat}.




   \subsection{Discussion}
    \label{subdomain_variation_discussion}


\subsubsection{Background}


In previous studies \cite{Lippincott:2011} biomedical subdomains have been compared in terms of basic lexical features (verb, noun, adverb and adjective lemmas, part-of-speech tags, etc) and using topic and selectional preference modeling methods.  The results often contrast with those of the current paper, and we briefly review them here for easier comparison.

It was found that subdomains formed stable clusters in terms of basic lexical behavior, and several recurrent clusters were identified, shown in Table \ref{clusters}.  The first cluster includes subdomains dealing primarily with microscopic processes and can be further subdivided into groupings of biochemical (\emph{Biochemistry, Genetics}) and cellular (\emph{Cell Biology}, \emph{Embryology}) study.  The second cluster includes subdomains focused on specific anatomical systems (\emph{Endocrinology}, \emph{Pulmonary Medicine}).  The third cluster includes subdomains focused on clinical medicine (\emph{Psychiatry}) or specific patient-types (\emph{Geriatrics}, \emph{Pediatrics}).  The fourth and final cluster includes subdomains focused on social and ethical aspects of medicine (\emph{Ethics}, \emph{Education}).

\begin{table*}[h]
  \begin{tabular}{|p{0.2\textwidth}|p{0.2\textwidth}|p{0.2\textwidth}|p{0.2\textwidth}|p{0.2\textwidth}|}
    \hline
    \multicolumn{2}{|c|}{\emph{Microscopic}} & & & \\
\cline{1-2}
    \emph{Cellular}     & \emph{Biochemical}        & \emph{System-specific}    & \emph{Clinical}           & \emph{Social} \\
    \hline 
    Cell Biology & Biochemistry       & Endocrinology      & Geriatrics         & Ethics \\
    Virology     & Molecular Biology  & Rheumatology       & Pediatrics         & Education \\
    Microbiology & Genetics           & Pulmonary Medicine & Psychiatry         & \\
    Embryology   &                    &                    & Obstetrics         & \\
    \hline
  \end{tabular}
  \caption{Common subdomain clusters when considering lexical features.}
  \label{clusters}
\end{table*}

Almost all variation was significant at a high (\textgreater .99) level, supporting the intuition that lexical features such as vocabulary are primary aspects of different subdomains.  It was also noted that the handful of syntactic features considered, such as average sentence length and grammatical relation types, did not necessarily align with the more stable lexical clusters.  Verbs showed a mixture of the syntactic and lexical variation, reflecting their combined semantic and structural roles.

\subsubsection{Verb subcategorization behavior}
\label{subdomain_subcat_behavior}

There was great variety in the amount of variation each verb exhibited between subdomains.  For example, the verb {\it induce} has a maximum JSD of .07 (between Botany and Physiology), while {\it develop} has a maximum of .62 (between Embryology and Therapeutics).  Similarly, some verbs shift behavior in just one or two subdomains (e.g.~{\it activate} in Molecular Biology and Biochemistry) while others are broadly heterogeneous (e.g.~{\it predict}).

In contrast to the lexical results, verb subcategorization tends to show small pockets of specialized behavior, and the distinction between microscopic, systemic, clinical and social subdomains is less consistent.  Instead, there are specific cases where verbs have taken on a specific usage in a single subdomain.  The clearest example of this is {\it develop} (Figure \ref{develop:hm}), which has the distinct emphasis on intransitive usage INTRANS in Embryology (``The fetus develops''), compared to it typical transitive usage NP in other subdomains (``He developed a tumor'').  A similar example is the verb {\it express}, which takes NP-AS-NP-SC (``He expressed X as Y'') frequently in most subdomains, but not in Genetics and Cell Biology, where the simple transitive NP is unusually common.

Sometimes the reasons for specialized behavior are not so obvious: {\it perform} behaves differently in Medical Informatics and Education.  Both subdomains show unusually high usage of NP-PRED-RS, and Education is unique in its frequent use of TRANS.  

Not all verb behavior follows the pattern of extreme specialization in one or two subdomains: the heatmap for {\it predict} (Figure \ref{predict:hm}), for example, is extremely diverse.  Looking at the corresponding dendrogram (Figure \ref{predict:dend}) shows a clear distinction between system-specific and clinical subdomains in the top half, and the microscopic subdomains in the bottom half.  The top SCFs (Table \ref{predict:table}) show that the microscopic subdomains use {\it predict} in conjunction with infinitival forms (e.g.~NP-TOBE, ``We predicted it to be'').  {\it Recognize}, like {\it predict}, shows a diverse set of JSD values.  It is unclear why some subdomains prefer e.g.~THAT-S or NP-AS-NP, except perhaps that diagnosis-oriented subdomains prefer the latter.

Some verbs may have more than one specialized behavior: {\it treat} is
generally either used in a clinical sense (NP-FOR-NP, ``We
treat the patient for concussion'') or with raising
(NP-AS-NP-SC, ``We treat the infection as a separate issue'').
The most distinct subdomain, Public Health, appears as an outlier
because of its unique combination of both usages.  This is an example
of a heterogeneous subdomain merging SCF behaviors into a third,
unique distribution.





\section{Conclusions}


Our study has provided some insights into the current state of verb
subcategorization frame acquisition for biomedicine.  First, using a
system unadapted for biomedicine but applied to a large biomedical
corpus, we achieved acceptable results using a simple relative
frequency threshold similar to previously reported optimum
thresholds. A new method of SCF-specific filtering was found to offer
improved accuracy even though it depended on SCF frequency information
from general language. Still, performance drops off considerably
compared to general language, losing more than 10 points on F-score,
indicating that there is room for adaptation of SCF systems to
biomedicine.

Second, we compared two biomedical SCF lexicons, each representing a
different aspect of the state of the art in SCF acquisition. We found
that the BioLexicon, built with an SCF acquisition system in which
each component has been adapted to biomedical text using manually
annotated data in the molecular biology subdomain, favored precision
over recall when evaluated against our SCF gold standard drawn from
across PMC OA. On the other hand, BioVALEX, built using a state of the
art system for general language SCF acquisition and unadapted to
biomedical text save for the input corpus, favored recall over
precision. Neither type of system is ideal, and the contrast between
the two highlights the need for domain adaptation techniques that can
cover a broader range of subdomains.

Next, we observed that using two different definitions of
subcategorization -- the ``semantic'' definition, which collapses the
argument-adjunct distinction, and the ``syntactic'' definition, which
retains it -- result in very different styles of annotation, and
therefore different evaluation results for an SCF system depending on
the definition used in the gold standard.  Interestingly, because the
Cambridge system readily hypothesizes many phrase types co-occurring
with verbs as part of the SCF, it is more consistent with the semantic
definition of subcategorization and achieved higher accuracy on the
semantic gold standard than the syntactic one. This behavior may or
may not be desirable depending on the application, but needs to be
taken into consideration.

Finally, we found significant variation in SCF behavior between
biomedical subdomains, with different properties than in previously
studied lexical variation.  Most notably, subdomain clusters produced
from the subcategorization behavior of individual verbs did not align
well with clusters based on simple lemma
frequencies \cite{Lippincott:2011}, and often were not readily
interpretable in terms of major subdomain-spanning topics. Some verb
behavior occurred in discrete pockets, just one or two subdomains,
rather than in one of the major clusters identified in lexical
studies.  One factor in this result is that 
we considered individual verbs, whereas lexical studies average 
variation across all lexical items of a given class.  
Another potential factor is that distinct senses of a verb, e.g.~general and specialized, may create confounding effects when the SCF behavior of the two senses is overlaid in a subdomain.
Future work could involve broadening
the set of verbs considered and averaging the divergence in their SCF
distributions to determine whether there is a correlation with the
lexical results.  This would require a principled way of combining the
distributions, beyond simple equal weighting, because the proportion
of verbs that change SCF behavior is small and would be overwhelmed by
noise.


Our results suggest several drawbacks to current methods of SCF
acquisition for biomedicine.  First, differences in the definition of
subcategorization, and consequently different SCF inventories and gold
standards, make performance comparison difficult.  Second, the large
performance drop when applying an unadapted system to biomedical text
demonstrates the need for domain-specific adaptation.  Third, the
significant subdomain variation suggests that adaptation based on a
small subset of biomedical text does not address biomedical text in
general.  Current biomedical lexicons rely either directly on manual
annotation, or indirectly on other resources that do.  Because of the
difficulty in producing such resources, it is not feasible to rebuild
them for new subdomains.

Task-based evaluation uses the output of a system to augment performance on a downstream task that is easier to assess \citep{vlachos:2011}.
For example, an unlexicalized parser or relationship extractor could be augmented with SCF
probabilities, and then re-evaluated to determine improvement.  In this setup, the definition of subcategorization and the SCF inventories used by each system would not need to be reconciled: the candidate parses would simply be reranked based on the new probabilities from the lexicon. Some promising results in this direction have already been obtained for general language \cite{Carroll:98}.

By decoupling evaluation from a particular definition and inventory, unsupervised methods, such as 
clustering and graphical models, could be evaluated alongside supervised and rule-based methods.
Unsupervised approaches have a particular advantage in domain adaptation, since they do not
rely on domain-specific resources, and because their definitions and inventories emerge from
their domain-specific training data.  Ideally, this would also involve moving away from features 
that are domain-sensitive, such as parser output, to shallower and more robust features like 
parts-of-speech or phrase chunking.  There are a range of semi-supervised methods between these
extremes, such as self-training and hybrid graphical modeling \citep{zhu:2006}.  A potential area for future work is determining an optimal middle ground.

Finally, some of the best results on SCF acquisition for general
language have used information about verb semantic classes to smooth
conditional SCF distributions \citep{korhonen:02}, based on the
linguistic fact that semantically similar verbs tend to have
syntactically similar behavior. This avenue needs further exploration
in biomedicine. Incorporating word-sense disambiguation may also
improve accuracy and understanding of subcategorization in
biomedicine, especially since we see that verb behavior in different
subdomains may involve overlays of general and 
specialized
senses.

%\appendix


\newpage
\begin{figure*}
  \centering
  \includegraphics[height=.4\textheight]{figures/develop_heatmap.png}
  \caption{Heat map of Jensen-Shannon divergence between subdomains for the SCF distributions of {\it develop}.}
  \label{develop:hm}
\end{figure*}

\begin{figure*}
  \centering
  \includegraphics[height=.4\textheight]{figures/develop_dendrogram.png}
  \caption{Hierarchical clustering of subdomains via average-linking for the SCF distributions of {\it develop}.}
  \label{develop:dend}
\end{figure*}

\newpage

\begin{figure*}
  \centering
  \includegraphics[height=.4\textheight]{figures/develop_pca.png}
  \caption{Two-dimensional PCA reduction with Gap-statistic-optimal clustering for the SCF distributions of {\it develop}.}
  \label{develop:pca}
\end{figure*}

\begin{figure*}
\centering
\scriptsize
\begin{tabular}{| l | l l | l l | l l |}
    \hline
    Subdomain & \multicolumn{6}{| c |}{Top three SCFs} \\
    \hline
    Psychiatry & NP & 0.399905 & NP-PRED-RS & 0.141902 & NP-FOR-NP & 0.137602 \\
Education & NP & 0.328025 & NP-FOR-NP & 0.140127 & INTRANS & 0.121019 \\
Environmental Health & NP & 0.309671 & INTRANS & 0.138097 & NP-FOR-NP & 0.128797 \\
Pharmacology & NP & 0.441249 & NP-FOR-NP & 0.118324 & NP-PRED-RS & 0.115859 \\
Geriatrics & NP & 0.390192 & NP-PRED-RS & 0.140725 & NP-FOR-NP & 0.115139 \\
Public Health & NP & 0.361242 & NP-FOR-NP & 0.158063 & NP-PP-PRED & 0.101749 \\
Biotechnology & NP & 0.356888 & NP-FOR-NP & 0.173096 & NP-PRED-RS & 0.098217 \\
Biomedical Engineering & NP & 0.385159 & NP-FOR-NP & 0.169611 & NP-PP-PRED & 0.111307 \\
Medical Informatics & NP & 0.410649 & NP-FOR-NP & 0.168911 & NP-PP-PRED & 0.083231 \\
Obstetrics & NP & 0.315455 & INTRANS & 0.152435 & NP-PRED-RS & 0.120678 \\
Medicine & NP & 0.345473 & NP-PRED-RS & 0.137849 & NP-PP-PRED & 0.091899 \\
Genetics, Medical & NP & 0.303856 & NP-PRED-RS & 0.143445 & NP-FOR-NP & 0.114139 \\
Tropical Medicine & NP & 0.345211 & INTRANS & 0.116705 & NP-PRED-RS & 0.114743 \\
Microbiology & NP & 0.293089 & NP-FOR-NP & 0.127123 & NP-PRED-RS & 0.095342 \\
Neoplasms & NP & 0.304064 & NP-PRED-RS & 0.147233 & NP-PP-PRED & 0.099857 \\
Critical Care & NP & 0.340197 & NP-PRED-RS & 0.182325 & INTRANS & 0.099528 \\
Molecular Biology & NP & 0.245846 & NP-FOR-NP & 0.156345 & NP-PP-PRED & 0.100831 \\
Physiology & NP & 0.366467 & NP-FOR-NP & 0.131138 & NP-PRED-RS & 0.100599 \\
Veterinary Medicine & NP & 0.287117 & NP-PRED-RS & 0.117791 & INTRANS & 0.099387 \\
Science & NP & 0.263721 & INTRANS & 0.128445 & NP-PP-PRED & 0.109314 \\
Genetics & NP & 0.261829 & NP-FOR-NP & 0.142401 & INTRANS & 0.107713 \\
Neurology & NP & 0.231093 & INTRANS & 0.207683 & NP-PP-PRED & 0.103842 \\
Cell Biology & NP & 0.223591 & INTRANS & 0.200704 & PP-PRED-RS & 0.084507 \\
Therapeutics & NP & 0.350314 & NP-FOR-NP & 0.155172 & NP-PRED-RS & 0.101097 \\
Endocrinology & NP & 0.273525 & INTRANS & 0.161085 & NP-PRED-RS & 0.137959 \\
Communicable Diseases & NP & 0.287262 & NP-PRED-RS & 0.149480 & INTRANS & 0.144714 \\
Pediatrics & NP & 0.361596 & NP-PRED-RS & 0.194514 & INTRANS & 0.124688 \\
Biochemistry & NP & 0.285505 & INTRANS & 0.231332 & NP-FOR-NP & 0.120059 \\
Botany & NP & 0.281346 & INTRANS & 0.189602 & NP-FOR-NP & 0.128440 \\
Virology & NP & 0.379412 & NP-PRED-RS & 0.136275 & NP-FOR-NP & 0.109804 \\
Gastroenterology & NP & 0.334848 & NP-PRED-RS & 0.210606 & NP-PP-PRED & 0.127273 \\
Pulmonary Medicine & NP & 0.300429 & NP-PRED-RS & 0.158798 & NP-PP-PRED & 0.115880 \\
Ethics & NP & 0.274298 & INTRANS & 0.228942 & NP-PP-PRED & 0.155508 \\
Vascular Diseases & NP & 0.318367 & NP-PRED-RS & 0.155102 & INTRANS & 0.101224 \\
Rheumatology & NP & 0.306562 & NP-PRED-RS & 0.159647 & NP-PP-PRED & 0.119491 \\
Ophthalmology & NP & 0.245421 & NP-PRED-RS & 0.146520 & INTRANS & 0.124542 \\
Embryology & INTRANS & 0.510504 & INTRANS-RECIPSUBJ-PL & 0.172269 & NP & 0.120798 \\

\hline
\end{tabular}
\caption{Top three SCFs, by subdomain, for {\it develop}.}
\label{develop:table}
\end{figure*}
\clearpage

\newpage
\begin{figure*}
  \centering
  \includegraphics[height=.4\textheight]{figures/express_heatmap.png}
  \caption{Heat map of Jensen-Shannon divergence between subdomains for the SCF distributions of {\it express}.}
  \label{express:hm}
\end{figure*}

\begin{figure*}
  \centering
  \includegraphics[height=.4\textheight]{figures/express_dendrogram.png}
  \caption{Hierarchical clustering of subdomains via average-linking for the SCF distributions of {\it express}.}
  \label{express:dend}
\end{figure*}

\newpage

\begin{figure*}
  \centering
  \includegraphics[height=.4\textheight]{figures/express_pca.png}
  \caption{Two-dimensional PCA reduction with Gap-statistic-optimal clustering for the SCF distributions of {\it express}.}
  \label{express:pca}
\end{figure*}

\begin{figure*}
\centering
\scriptsize
\begin{tabular}{| l | l l | l l | l l |}
    \hline
    Subdomain & \multicolumn{6}{| c |}{Top three SCFs} \\
    \hline
    Genetics & NP & 0.484719 & NP-PP-PRED & 0.088202 & NP-PRED-RS & 0.077303 \\
Cell Biology & NP & 0.436123 & NP-PRED-RS & 0.256388 & NP-PP-PRED & 0.183260 \\
Genetics, Medical & NP & 0.445434 & NP-PRED-RS & 0.084633 & NP-PP-PRED & 0.082405 \\
Biochemistry & NP & 0.320611 & NP-PRED-RS & 0.122137 & NP-AS-NP-SC & 0.113700 \\
Botany & NP & 0.457393 & NP-PRED-RS & 0.107769 & PP-PRED-RS & 0.084586 \\
Molecular Biology & NP & 0.401806 & NP-PP-PRED & 0.151806 & NP-PRED-RS & 0.125282 \\
Microbiology & NP & 0.393716 & NP-PRED-RS & 0.192811 & NP-PP-PRED & 0.152821 \\
Tropical Medicine & NP & 0.362590 & NP-AS-NP-SC & 0.152518 & NP-AS-NP & 0.152518 \\
Pharmacology & NP & 0.300459 & NP-AS-NP-SC & 0.181193 & NP-AS-NP & 0.181193 \\
Physiology & NP & 0.320866 & NP-AS-NP-SC & 0.140748 & NP-AS-NP & 0.140748 \\
Endocrinology & NP & 0.389426 & NP-PRED-RS & 0.131325 & NP-AS-NP-SC & 0.117112 \\
Neoplasms & NP & 0.439103 & NP-PP-PRED & 0.200038 & NP-PRED-RS & 0.171003 \\
Biotechnology & NP & 0.416469 & NP-PRED-RS & 0.182106 & NP-PP-PRED & 0.165479 \\
Rheumatology & NP & 0.435431 & NP-PRED-RS & 0.136413 & NP-PP-PRED & 0.132412 \\
Neurology & NP & 0.384721 & NP-PP-PRED & 0.137646 & NP-PRED-RS & 0.135582 \\
Communicable Diseases & NP & 0.336735 & NP-AS-NP-SC & 0.204082 & NP-AS-NP & 0.204082 \\
Virology & NP & 0.388041 & NP-PRED-RS & 0.227216 & NP-PP-PRED & 0.185567 \\
Science & NP & 0.392503 & NP-PRED-RS & 0.172770 & NP-PP-PRED & 0.138302 \\
Medicine & NP & 0.396785 & NP-PRED-RS & 0.167203 & NP-PP-PRED & 0.154984 \\
Vascular Diseases & NP-AS-NP-SC & 0.281022 & NP-AS-NP & 0.281022 & NP & 0.253650 \\
Pulmonary Medicine & NP & 0.328225 & NP-AS-NP-SC & 0.186462 & NP-AS-NP & 0.186462 \\
Environmental Health & NP & 0.281679 & NP-AS-NP-SC & 0.167877 & NP-AS-NP & 0.167877 \\
Public Health & NP & 0.266667 & NP-PP-PRED & 0.183333 & NP-PP & 0.126190 \\

\hline
\end{tabular}
\caption{Top three SCFs, by subdomain, for {\it express}.}
\label{express:table}
\end{figure*}
\clearpage

\newpage
\begin{figure*}
  \centering
  \includegraphics[height=.4\textheight]{figures/perform_heatmap.png}
  \caption{Heat map of Jensen-Shannon divergence between subdomains for the SCF distributions of {\it perform}.}
  \label{perform:hm}
\end{figure*}

\begin{figure*}
  \centering
  \includegraphics[height=.4\textheight]{figures/perform_dendrogram.png}
  \caption{Hierarchical clustering of subdomains via average-linking for the SCF distributions of {\it perform}.}
  \label{perform:dend}
\end{figure*}

\newpage

\begin{figure*}
  \centering
  \includegraphics[height=.4\textheight]{figures/perform_pca.png}
  \caption{Two-dimensional PCA reduction with Gap-statistic-optimal clustering for the SCF distributions of {\it perform}.}
  \label{perform:pca}
\end{figure*}

\begin{figure*}
\centering
\scriptsize
\begin{tabular}{| l | l l | l l | l l |}
    \hline
    Subdomain & \multicolumn{6}{| c |}{Top three SCFs} \\
    \hline
    Medical Informatics & NP & 0.361941 & NP-PP-PRED & 0.177756 & NP-PRED-RS & 0.084217 \\
Education & NP & 0.442718 & NP-PRED-RS & 0.116505 & INTRANS & 0.100971 \\
Molecular Biology & NP & 0.248283 & NP-ING-SC & 0.124142 & NP-ING-OC & 0.124142 \\
Genetics, Medical & NP & 0.262342 & NP-ING-SC & 0.120675 & NP-ING-OC & 0.120675 \\
Pharmacology & NP & 0.304765 & NP-PP-PP PFORM & 0.146923 & NP-ING-SC & 0.102581 \\
Critical Care & NP & 0.441001 & NP-PP-PP PFORM & 0.080182 & NP-ING-SC & 0.075064 \\
Communicable Diseases & NP & 0.431208 & NP-ING-SC & 0.075201 & NP-ING-OC & 0.075201 \\
Gastroenterology & NP & 0.484485 & NP-PP-PP PFORM & 0.070187 & NP-ING-SC & 0.069537 \\
Therapeutics & NP & 0.511537 & NP-FOR-NP & 0.065702 & NP-ING-SC & 0.057880 \\
Ophthalmology & NP & 0.536599 & NP-PP-PRED & 0.057788 & NP-ING-SC & 0.055036 \\
Obstetrics & NP & 0.454327 & NP-PP-PRED & 0.079327 & NP-ING-SC & 0.066106 \\
Biomedical Engineering & NP & 0.393035 & NP-PP-PRED & 0.074627 & NP-TO-INF-OC & 0.073383 \\
Pulmonary Medicine & NP & 0.374464 & NP-PP-PP PFORM & 0.094271 & NP-ING-SC & 0.092366 \\
Medicine & NP & 0.362900 & NP-ING-SC & 0.099518 & NP-ING-OC & 0.099518 \\
Physiology & NP & 0.394495 & NP-ING-SC & 0.083524 & NP-ING-OC & 0.083524 \\
Neoplasms & NP & 0.382559 & NP-PP-PP PFORM & 0.091148 & NP-ING-SC & 0.083187 \\
Rheumatology & NP & 0.333756 & NP-PP-PP PFORM & 0.106480 & NP-ING-SC & 0.089181 \\
Neurology & NP & 0.331288 & NP-PP-PP PFORM & 0.105171 & NP-ING-SC & 0.088721 \\
Tropical Medicine & NP & 0.370042 & NP-ING-SC & 0.105513 & NP-ING-OC & 0.105513 \\
Psychiatry & NP & 0.381216 & NP-PRED-RS & 0.092344 & NP-PP-PRED & 0.092344 \\
Environmental Health & NP & 0.300141 & NP-PP-PRED & 0.103796 & NP-ING-SC & 0.091065 \\
Pediatrics & NP & 0.450953 & NP-PRED-RS & 0.073572 & NP-PP-PRED & 0.062320 \\
Veterinary Medicine & NP & 0.407389 & NP-ING-SC & 0.099351 & NP-ING-OC & 0.099351 \\
Vascular Diseases & NP & 0.444747 & NP-ING-SC & 0.089117 & NP-ING-OC & 0.089117 \\
Geriatrics & NP & 0.457423 & NP-PRED-RS & 0.080250 & NP-TO-INF-VC & 0.072671 \\
Virology & NP & 0.312346 & NP-ING-SC & 0.115070 & NP-ING-OC & 0.115070 \\
Embryology & NP & 0.260802 & NP-ING-SC & 0.135802 & NP-ING-OC & 0.135802 \\
Microbiology & NP & 0.276414 & NP-ING-SC & 0.126016 & NP-ING-OC & 0.126016 \\
Botany & NP & 0.249518 & NP-ING-SC & 0.131218 & NP-ING-OC & 0.131218 \\
Biochemistry & NP & 0.264828 & NP-ING-SC & 0.134100 & NP-ING-OC & 0.134100 \\
Science & NP & 0.255107 & NP-ING & 0.130580 & NP-ING-OC & 0.130580 \\
Genetics & NP & 0.305055 & NP-ING & 0.114337 & NP-ING-OC & 0.114337 \\
Biotechnology & NP & 0.337702 & NP-ING & 0.107471 & NP-ING-OC & 0.107471 \\
Cell Biology & NP & 0.297386 & NP-ING & 0.153232 & NP-ING-OC & 0.153232 \\
Public Health & NP & 0.338684 & NP-PRED-RS & 0.097372 & NP-TO-INF-VC & 0.081143 \\
Endocrinology & NP & 0.352185 & NP-ING & 0.141674 & NP-ING-OC & 0.141674 \\

\hline
\end{tabular}
\caption{Top three SCFs, by subdomain, for {\it perform}.}
\label{perform:table}
\end{figure*}
\clearpage

\newpage
\begin{figure*}
  \centering
  \includegraphics[height=.4\textheight]{figures/predict_heatmap.png}
  \caption{Heat map of Jensen-Shannon divergence between subdomains for the SCF distributions of {\it predict}.}
  \label{predict:hm}
\end{figure*}

\begin{figure*}
  \centering
  \includegraphics[height=.4\textheight]{figures/predict_dendrogram.png}
  \caption{Hierarchical clustering of subdomains via average-linking for the SCF distributions of {\it predict}.}
  \label{predict:dend}
\end{figure*}

\newpage

\begin{figure*}
  \centering
  \includegraphics[height=.4\textheight]{figures/predict_pca.png}
  \caption{Two-dimensional PCA reduction with Gap-statistic-optimal clustering for the SCF distributions of {\it predict}.}
  \label{predict:pca}
\end{figure*}

\begin{figure*}
\centering
\scriptsize
\begin{tabular}{| l | l l | l l | l l |}
    \hline
    Subdomain & \multicolumn{6}{| c |}{Top three SCFs} \\
    \hline
    Vascular Diseases & NP-PP-PRED & 0.319039 & NP & 0.259005 & NP-PRED-RS & 0.197256 \\
Psychiatry & NP & 0.296053 & NP-PP-PRED & 0.265351 & NP-PRED-RS & 0.155702 \\
Public Health & NP & 0.313056 & NP-PP-PRED & 0.258160 & NP-PRED-RS & 0.143917 \\
Medicine & NP & 0.333758 & NP-PP-PRED & 0.249682 & NP-PRED-RS & 0.152866 \\
Communicable Diseases & NP-PP-PRED & 0.272923 & NP & 0.242837 & NP-PRED-RS & 0.139685 \\
Physiology & NP & 0.297170 & NP-PP-PRED & 0.266509 & NP-PRED-RS & 0.127358 \\
Neoplasms & NP-PP-PRED & 0.301850 & NP & 0.252678 & NP-PP & 0.176241 \\
Critical Care & NP & 0.321659 & NP-PP-PRED & 0.291244 & NP-PRED-RS & 0.185253 \\
Pulmonary Medicine & NP & 0.610138 & NP-PP-PRED & 0.117051 & NP-PRED-RS & 0.073733 \\
Rheumatology & NP & 0.287570 & NP-PP-PRED & 0.257885 & NP-PRED-RS & 0.150278 \\
Environmental Health & NP & 0.356804 & NP-PP-PRED & 0.259309 & NP-PRED-RS & 0.119838 \\
Neurology & NP & 0.239140 & NP-PP-PRED & 0.174610 & HAT-S & 0.115141 \\
Biotechnology & NP & 0.304348 & NP-PP-PRED & 0.214393 & NP-TOBE & 0.143928 \\
Virology & NP & 0.176289 & NP-TOBE & 0.139175 & NP-PP-PRED & 0.126804 \\
Biochemistry & NP-PP-PRED & 0.190345 & NP & 0.167586 & NP-TOBE & 0.124138 \\
Tropical Medicine & NP & 0.261468 & NP-PP-PRED & 0.133486 & NP-PRED-RS & 0.104587 \\
Molecular Biology & NP & 0.212812 & NP-PP-PRED & 0.185082 & NP-TOBE & 0.105761 \\
Microbiology & NP & 0.211287 & NP-TOBE & 0.165237 & NP-TO-INF-OC & 0.125508 \\
Botany & NP & 0.265457 & NP-TOBE & 0.139535 & NP-TO-INF-OC & 0.119682 \\
Genetics & NP & 0.258138 & NP-PP-PRED & 0.137358 & NP-TOBE & 0.103301 \\
Genetics, Medical & NP & 0.277823 & NP-PP-PRED & 0.187652 & NP-TO-INF-OC & 0.130788 \\

\hline
\end{tabular}
\caption{Top three SCFs, by subdomain, for {\it predict}.}
\label{predict:table}
\end{figure*}
\clearpage

\newpage
\begin{figure*}
  \centering
  \includegraphics[height=.4\textheight]{figures/recognize_heatmap.png}
  \caption{Heat map of Jensen-Shannon divergence between subdomains for the SCF distributions of {\it recognize}.}
  \label{recognize:hm}
\end{figure*}

\begin{figure*}
  \centering
  \includegraphics[height=.4\textheight]{figures/recognize_dendrogram.png}
  \caption{Hierarchical clustering of subdomains via average-linking for the SCF distributions of {\it recognize}.}
  \label{recognize:dend}
\end{figure*}

\newpage

\begin{figure*}
  \centering
  \includegraphics[height=.4\textheight]{figures/recognize_pca.png}
  \caption{Two-dimensional PCA reduction with Gap-statistic-optimal clustering for the SCF distributions of {\it recognize}.}
  \label{recognize:pca}
\end{figure*}

\begin{figure*}
\centering
\scriptsize
\begin{tabular}{| l | l l | l l | l l |}
    \hline
    Subdomain & \multicolumn{6}{| c |}{Top three SCFs} \\
    \hline
    Public Health & NP & 0.257610 & NP-PP-PRED & 0.125464 & NP-AS-NP & 0.096511 \\
Environmental Health & NP & 0.302128 & HAT-S & 0.093617 & NP-PP-PRED & 0.093617 \\
Medicine & NP & 0.413386 & NP-PP-PRED & 0.118110 & NP-PRED-RS & 0.100394 \\
Medical Informatics & NP & 0.332331 & NP-PP-PRED & 0.169925 & IT-PASS-SFIN & 0.075188 \\
Tropical Medicine & NP & 0.423986 & NP-S & 0.108108 & IT-PASS-SFIN & 0.104730 \\
Vascular Diseases & NP & 0.251641 & IT-PASS-SFIN & 0.157549 & NP-AS-NP-SC & 0.135667 \\
Pulmonary Medicine & NP & 0.362429 & IT-PASS-SFIN & 0.132827 & NP-S & 0.121442 \\
Neoplasms & NP & 0.447775 & NP-PP-PRED & 0.117166 & NP-AS-NP-SC & 0.101726 \\
Neurology & NP & 0.396584 & NP-PP-PRED & 0.146110 & NP-PRED-RS & 0.104364 \\
Rheumatology & NP & 0.505841 & NP-PP-PRED & 0.156542 & NP-PRED-RS & 0.096963 \\
Genetics & NP & 0.491974 & NP-PP-PRED & 0.130016 & NP-PRED-RS & 0.108347 \\
Microbiology & NP & 0.505447 & NP-PP-PRED & 0.159041 & NP-PRED-RS & 0.100218 \\
Virology & NP & 0.525084 & NP-PP-PRED & 0.158863 & NP-PRED-RS & 0.107023 \\
Science & NP & 0.530660 & NP-PP-PRED & 0.136792 & NP-PRED-RS & 0.106132 \\
Communicable Diseases & NP & 0.463087 & NP-AS-NP-SC & 0.194631 & NP-AS-NP & 0.194631 \\
Biochemistry & NP & 0.465596 & NP-PP-PRED & 0.135321 & NP-PRED-RS & 0.080275 \\

\hline
\end{tabular}
\caption{Top three SCFs, by subdomain, for {\it recognize}.}
\label{recognize:table}
\end{figure*}
\clearpage

\newpage
\begin{figure*}
  \centering
  \includegraphics[height=.4\textheight]{figures/treat_heatmap.png}
  \caption{Heat map of Jensen-Shannon divergence between subdomains for the SCF distributions of {\it treat}.}
  \label{treat:hm}
\end{figure*}

\begin{figure*}
  \centering
  \includegraphics[height=.4\textheight]{figures/treat_dendrogram.png}
  \caption{Hierarchical clustering of subdomains via average-linking for the SCF distributions of {\it treat}.}
  \label{treat:dend}
\end{figure*}

\newpage

\begin{figure*}
  \centering
  \includegraphics[height=.4\textheight]{figures/treat_pca.png}
  \caption{Two-dimensional PCA reduction with Gap-statistic-optimal clustering for the SCF distributions of {\it treat}.}
  \label{treat:pca}
\end{figure*}

\begin{figure*}
\centering
\scriptsize
\begin{tabular}{| l | l l | l l | l l |}
    \hline
    Subdomain & \multicolumn{6}{| c |}{Top three SCFs} \\
    \hline
    Pulmonary Medicine & NP & 0.337748 & NP-PP-PP PFORM & 0.167770 & NP-NP-PRED & 0.129139 \\
Pharmacology & NP & 0.274845 & NP-NP-PRED & 0.184783 & NP-NP & 0.184783 \\
Veterinary Medicine & NP & 0.360000 & NP-FOR-NP & 0.120000 & NP-PP-PP PFORM & 0.106667 \\
Vascular Diseases & NP & 0.388060 & PP & 0.099502 & PP-PRED-RS & 0.099502 \\
Tropical Medicine & NP & 0.425547 & NP-NP-PRED & 0.103035 & NP-NP & 0.103035 \\
Medicine & NP & 0.355288 & NP-PP-PP PFORM & 0.126160 & NP-PRED-RS & 0.080705 \\
Communicable Diseases & NP & 0.353806 & NP-PP-PP PFORM & 0.173010 & NP-FOR-NP & 0.121107 \\
Neoplasms & NP & 0.314900 & NP-PP-PP PFORM & 0.219662 & PP-PRED-RS & 0.094470 \\
Biochemistry & NP & 0.252427 & NP-PP-PP PFORM & 0.200647 & PP-PRED-RS & 0.101942 \\
Endocrinology & NP & 0.240283 & NP-PP-PP PFORM & 0.207303 & PP-PP & 0.089517 \\
Rheumatology & NP & 0.283192 & NP-PP-PP PFORM & 0.203390 & PP & 0.133475 \\
Science & NP-PP-PP PFORM & 0.224299 & NP & 0.190314 & PP-PP & 0.115548 \\
Neurology & NP & 0.260030 & NP-PP-PP PFORM & 0.228826 & NP-NP-PRED & 0.123328 \\
Virology & NP-PP-PP PFORM & 0.300000 & NP & 0.209524 & NP-NP-PRED & 0.102381 \\
Microbiology & NP-PP-PP PFORM & 0.322925 & NP & 0.201828 & PP-PRED-RS & 0.105864 \\
Cell Biology & NP-PP-PP PFORM & 0.389027 & NP & 0.182045 & PP-PP & 0.114713 \\
Botany & NP & 0.214421 & NP-PP-PP PFORM & 0.204934 & NP-NP & 0.100569 \\
Physiology & NP & 0.358191 & NP-PP-PP PFORM & 0.107579 & NP-NP-PRED & 0.074572 \\
Environmental Health & NP & 0.385877 & NP-PP-PP PFORM & 0.091298 & NP-AS-NP-SC & 0.077746 \\
Genetics & NP & 0.211664 & NP-PP-PP PFORM & 0.189040 & NP-AS-NP-SC & 0.096531 \\
Molecular Biology & NP-PP-PP PFORM & 0.281690 & NP & 0.170775 & PP-PP & 0.070423 \\
Geriatrics & NP & 0.346975 & NP-PRED-RS & 0.097865 & NP-PP-PRED & 0.088968 \\
Critical Care & NP & 0.413424 & NP-PP-PP PFORM & 0.108949 & PP-PRED-RS & 0.090467 \\
Gastroenterology & NP & 0.546099 & NP-PP-PP PFORM & 0.148936 & PP-PRED-RS & 0.083333 \\
Public Health & NP & 0.342735 & NP-FOR-NP & 0.124786 & NP-AS-NP-SC & 0.101709 \\

\hline
\end{tabular}
\caption{Top three SCFs, by subdomain, for {\it treat}.}
\label{treat:table}
\end{figure*}
\clearpage






% \section{Theory/Calculations}

%
%% The Appendices part is started with the command \appendix;
%% appendix sections are then done as normal sections
%% \appendix

%% \section{}
%% \label{}

% \section{Appendices}



%% References
%%
%% Following citation commands can be used in the body text:
%% Usage of \cite is as follows:
%%   \cite{key}          ==>>  [#]
%%   \cite[chap. 2]{key} ==>>  [#, chap. 2]
%%   \citet{key}         ==>>  Author [#]

%% References with bibTeX database:

%\bibliographystyle{model3-num-names}
\bibliographystyle{plain}
\bibliography{jbmi_bib}

%% Authors are advised to submit their bibtex database files. They are
%% requested to list a bibtex style file in the manuscript if they do
%% not want to use model3-num-names.bst.

%% References without bibTeX database:

% \begin{thebibliography}{00}

%% \bibitem must have the following form:
%%   \bibitem{key}...
%%

% \bibitem{}

% \end{thebibliography}


\end{document}

%%
%% End of file `elsarticle-template-3-num.tex'.

