\documentclass[prodmode,acmtist]{acmsmall}

% Package to generate and customize Algorithm as per ACM style
\usepackage[ruled]{algorithm2e}
\usepackage{verbatim}
\newcommand{\BoldGreek}[1] {\mbox{\boldmath${#1}$}}
\renewcommand{\algorithmcfname}{ALGORITHM}
\SetAlFnt{\small}
\SetAlCapFnt{\small}
\SetAlCapNameFnt{\small}
\SetAlCapHSkip{0pt}
\IncMargin{-\parindent}

% Metadata Information
%\acmVolume{9}
%\acmNumber{4}
%\acmArticle{39}
%\acmYear{2010}
%\acmMonth{3}

% Document starts
\begin{document}

% Page heads
\markboth{L. Hong and B.D. Davison}{Empirical Study of Topic Modeling in Twitter}

% Title portion
\title{Empirical Study of Topic Modeling in Twitter}
\author{Liangjie Hong
\affil{Dept. of Computer Science and Engineering, Lehigh University}
Brian D. Davison
\affil{Dept. of Computer Science and Engineering, Lehigh University}}

\begin{abstract}
Micro-blogging services (MBS), such as Twitter, Tumblr and Sina Weibo, have been a crucial source of information for a wide spectrum of users. Popular information that is deemed important by communities propagates through these websites. In a series of significant nature disasters occurring in recent years, MBS, especially Twitter, become a platform for users to be able to exchange information effectively and efficiently. More recently, Twitter messages also played an instrumental role in facilitating the political upheavals in the Middle East. In addition, many organizations, celebrities and politicians are using micro-blogging services to connect to their customers, fans and supporters. Due to the importance of these services, research communities and industrial companies demonstrate great interests in understanding and discovering patterns in these newly emerged tools. Indeed, studying the characteristics of content in short messages posted by users becomes vital for a number of tasks, such as breaking news detection, personalized message recommendation, friends recommendation, sentiment analysis and others. While many researchers wish to use standard text mining tools, especially topic models, to understand messages on MBS, the restricted length of those messages prevents them from being employed to their full potential. Furthermore, in order to taking characteristics of MBS into account, numbers of heuristics have been applied to standard topic models without any theoretical justification or even empirical comparisons. Additionally, the large scale nature of MBS places a great hinder to use conventional sampling methods to train topic models, limiting the power of using this kind of models.

In this paper, we address the problem of using standard topic models in MBS environments by studying how the models can be trained on the dataset efficiently. We also consider two simple extensions to existing models, which consider some important characteristics of MBS. In addition, we propose several schemes to train a standard topic model and compare their quality and effectiveness through a set of carefully designed experiments from both qualitative and quantitative perspectives. By using recently developed online learning techniques for topic models, we show that standard topic models with their variants can be easily applied to millions of messages without employing large scale computing powers such as MapReduce framework. For standard topic models, we show that by training a topic model on aggregated messages we can obtain a higher quality of learned model which results in significantly better performance in two real-world classification problems. For simple extensions, we demonstrate that they can perform consistently better in terms of topic modeling and classification results.
\end{abstract}

\category{H.3.3}{Information Storage and Retrieval}{Information Search and Retrieval}

\terms{Algorithms, Design, Experimentation}

\keywords{Twitter, Topic models, Social media, Online learning}

%\acmformat{Zhou, G., Wu, Y., Yan, T., He, T., Huang, C., Stankovic, J. A., and Abdelzaher, T. F.  2010. A multifrequency MAC specially designed for  wireless sensor network applications.}

\begin{comment}
\begin{bottomstuff}
This work is supported by the National Science Foundation, under
grant CNS-0435060, grant CCR-0325197 and grant EN-CS-0329609.

Author's addresses: G. Zhou, Computer Science Department,
College of William and Mary; Y. Wu  {and} J. A. Stankovic,
Computer Science Department, University of Virginia; T. Yan,
Eaton Innovation Center; T. He, Computer Science Department,
University of Minnesota; C. Huang, Google; T. F. Abdelzaher,
Computer Science Department, University of Illinois at Urbana-Champaign.
\end{bottomstuff}
\end{comment}

\maketitle

%===========================================================
\section{Introduction}\label{sec:introduction}
%===========================================================
Micro-blogging environments, such as Twitter, Tumblr\footnote{http://www.tumblr.com} and Sina Weibo\footnote{http://www.weibo.com}, have become important communication tools for on-line users. The platform of micro-blogging services is increasingly used for communicating breaking news, eyewitness accounts and connecting large groups of people worldwide. Users of these websites have become accustomed to receiving timely updates on important events, both of personal and global value. For instance, Twitter was used to propagate information in real-time in many crisis situations such as the Iran election in 2009, the Haiti earthquakes in 2010, and the tsunami in Japan in 2011. More recently, Twitter messages played an instrumental role in facilitating the political upheavals in the Middle East. In addition to being used for tracking events, micro-blogging websites are vital for many organizations, celebrities and politicians to connect to their customers, fans and supporters.

A variety of research areas show increasing interests in micro-blogging services even from their early stage, especially Twitter. Early work mainly focused on qualitative studies on a number of aspects and characteristics of Twitter. For example, Java et al.~\cite{Java2007} studied the topological and geographical properties of Twitter's social network in 2007 and found that the network has high degree correlation and reciprocity, indicating close mutual acquaintances among users. Krishnamurthy et al.~\cite{Krishnamurthy2008} studied the geographical distribution of Twitter users and their behaviors among several independent crawls. The authors mostly agree with the classification of user intentions presented by Java et al., but also point out evangelists and miscreants (spammers) that are looking to follow anyone. Shamma et al.~\cite{Shamma2009} studied the practice of sharing Twitter messages during the U.S. 2008 Presidential Debates where they found that the level of Twitter activity serves as a predictor of changes in topics in the media event and conversational cues can identify the key players in the media object.

Later work starts to investigate a wider range of questions, primarily focusing on how information propagates and how users interact with each other on Twitter. One direction of research is to identify influential users from Twitter. For instance, Weng et al.~\cite{Weng2010} studied the problem of identifying influential users on Twitter by proposing an extension of the PageRank algorithm to measure the influence taking both the topical similarity between users and the link structure into account. They also presented evidence to support the existence of homophily in Twitter. In their work, they utilized topic models (described below) to understand users' interests. Pal et al.~\cite{Pal2011} utilized a Gaussian Mixture Model with features including topical signal, retweet impact and mention impact. A ranking mechanism for users was proposed by using the CDF of Gaussian function for each feature. The authors found that their method can discover users who are more interesting and more authoritative. Bakshy et al.~\cite{Bakshy2011} analyzed how users influence their followers on Twitter for a particular type of messages, the messages containing URLs. First, by using several simple features, they found that the past influence provides the most informative features as well as the number of followers. Then, the authors asked Amazon Mechanical Turks to classify messages into topical categories (including spams). Another line of research is to investigate the factors that contribute to the process of information diffusion on Twitter. On particular, researchers are interested in identifying messages that are potentially popular to be retweeted. Suh et al.~\cite{Suh2010} studied a number of factors that might be critical for messages to be retweeted without building any predictive models. Hong et al.~\cite{Hong2011} built classifiers with a wide range of features to predict how likely a message will become popular in the future.

Among the research mentioned above and others, researchers wish to use messages posted by users to infer users' interests, model social relationships, track news stories and identify emerging topics. However, several natural limitations of messages prevent some standard text mining tools to be employed with their full potentials. First, messages on Twitter are restricted to 140 characters. This is substantially different from traditional information retrieval and web search where documents are usually much longer. Second, within the short length, users invented many techniques to expand the semantics that are carried out by the messages. For example, when posting external URLs, users may use URL shortening services (e.g., http://www.bit.ly). In addition, users heavily use self-defined hash tags starting with ``\#'' to identify certain events or topics. Therefore, from the perspective of length (e.g., in characters), the content in messages is limited while it may convey rich meanings.

Topic models \cite{Blei2009} are powerful tools to identify latent text patterns in the content. By building low dimensional representations, semantic gaps among terms are narrowed and contextual structures in text corpus are captured. Topic models are applied in a wide spectrum of areas including recent work on Twitter (e.g., \cite{Ramage2010,Weng2010,Pennacchiotti2011}). Social media differs from other standard text domains (e.g., citation network, web pages), where topic models are usually utilized, in a number of ways. One important fact is that there is no standard method to train a topic model on Twitter. On particular, the unit of ``document'' on Twitter is not as clear as conventional information domains, yielding different strategies to form ``documents''. The choice of strategies are conditioned on the nature of applications. For example, on Twitter, we can treat each message as a document and obtain topics associated with messages. We can also group all messages per user as a single document and train a topic model on these virtual documents. Researchers typically adopt one strategy without any clear elaboration and experimental comparison. For instance, Weng et al. and Pennacchiotti et al.~\cite{Weng2010,Pennacchiotti2011} trained a topic model on aggregated users' messages while Ramage et al.~\cite{Ramage2010} used a slightly modified topic model on individual messages. Neither of them mentioned the other possibility. Indeed, to our knowledge, there is no empirical or theoretical study to show which method is more effective, or whether there exists some more powerful way to train the models. In addition, the basic assumption behind standard topic models where a document is a mixture of topics may not be appropriate for Twitter messages that the length of individual messages is limited. A more meaningful assumption might be that each message is only from one topic. However, no study has been done to show which assumption is more effective. Furthermore, the number of Twitter messages is huge. Any serious study on Twitter may easily face millions of messages, which are much larger than the standard text corpus where topic models are applied. Thus, it is an important practical issue to have scalable models on Twitter.

In this paper, we want to address the problem of how to effectively train a standard topic model in short text environments. Although our experiments are solely based on Twitter, we believe that some of the discussions can be also applied to other scenarios, such as chat logs, discussion boards and blog comments. More specifically, we want to answer these questions in the paper:
\begin{itemize}
\item If we use different aggregation strategies and train topic models, do we obtain the similar topics or the topics learned are totally different?
\item Can we learn a topic model faster yet still useful, without any modifications to standard models?
\item Can we shed some lights on how we can build new models to fully utilize the structure of short text environments?
\item How can we train topic models efficiently on Twitter?
\end{itemize}With a set of carefully designed experiments in both quantitative and qualitative perspective and two more real-world classification problems, in this paper, we make the following contributions:
\begin{itemize}
\item Topics learned by using aggregation strategies of the data are substantially different from each other.
\item Training a standard topic model on aggregated user messages would lead to a faster training process and result better quality.
\item Topic mixture distributions learned by topic models can be a good set of supplementary features in classification problems, making significantly improvement in overall classification performance.
\end{itemize}
The paper is organized as follows. In Section \ref{sec:related_work}, we outline some related work on the topic. In Section \ref{sec:methodology}, we introduce several methods to learn topic models on Twitter. Section \ref{sec:experiments} details our experiments and major conclusions. In Section \ref{sec:conclusion}, we summarize our contributions.

%===========================================================
\section{Related Work}\label{sec:related_work}
%===========================================================
In this section, we mainly review two lines of research, the state-of-the-art approaches in topic modeling related to this work and new topic models developed particular to Twitter. In addition, we also review some attempts to model short text messages in other domains.

Topic modeling is gaining increasingly attention in different text mining communities. From the seminal work done by Blei et al.~\cite{Blei2003} where they proposed Latent Dirichlet Allocation ({\tt LDA}), topic models are becoming standard tools in text mining, information retrieval and machine learning.  As a result, {\tt LDA} has been extended in a variety of ways, in particular for social networks and social media. For example, Chang et al.~\cite{Chang2009} proposed a novel probabilistic topic model to analyze text corpora and infer descriptions of the entities and of relationships between those entities on Wikipedia. McCallum et al.~\cite{McCallum2007} proposed a model to simultaneously discover groups among the entities and topics among the corresponding text. Zhang et al.~\cite{Zhang2007} introduced a model to incorporate {\tt LDA} into a community detection process. Similar work can be found in \cite{Liu2009} and \cite{Nallapati2008}

Related to this work, where we need to obtain topic mixture for both messages and authors, Rosen-Zvi et al.~\cite{Rosen-Zvi2004,Rosen-Zvi2010} introduced an author-topic model, which can flexibly model authors and their corresponding topic distributions. In their experiments, they found that the model outperforms {\tt LDA} when only small number of words are observed in the test documents, which is quite similar to micro-blogging environments. Ramage et al.~\cite{Ramage2009,Ramage2010} extended {\tt LDA} to a supervised form and studied its application in micro-blogging environment. However, no user topic distributions are obtained in this work. Recent work has studied the difference between Twitter and traditional media. Zhao et al.~\cite{Zhao2011} tried to obtain latent topics from Twitter and New York Times (NYT) news articles by using topic models. Two different topic models were used to learn the topics from the two sources separately and some heuristics were then applied to obtain both common and local topics. Rather than using heuristics, Hong et al.~\cite{Hong2011a} proposed a time-dependent topic model for news articles and Twitter messages and analyzed them simultaneously. Their model sheds lights on how information propagate from news to Twitter and trigger peaks. Weng et al. and Pennacchiotti et al.~\cite{Weng2010,Pennacchiotti2011} trained a {\tt LDA} model on aggregated users' messages to further obtain users' authority scores. Their work demonstrate successful stories by using simple aggregation strategies, compared to dedicated models, which are usually more complicated.

For short text message modeling, two basic approaches are proposed. One is to utilize topic models. For instance, Phan et al.~\cite{Phan2008} studied the problem of modeling short text through {\tt LDA} while their work mainly focused on how to apply it to Wikipedia and they did not provide any discussion on if there is other ways to train a same model. Another approach is to employ search engines directly. The basic idea is to use the rich information conveyed by search results to augment sparse short text. For example, Sahami et al.~\cite{Sahami2006} introduced a kernel function based on search engine results to compare the similarity of short text snippets. Later, Yih et al.~\cite{Yih2007} further extended the method by using relevance weighted vectors to represent short text and exploiting machine learning techniques to learn the weights.

Traditionally, topic models are applied to text corpora with hundreds or thousands of documents. Efforts have been made to scale models to millions of documents. Two directions have been pursued. The first direction is to parallelize one of current inference algorithms. usually Gibbs sampling, to multi-core and multi-machine fashion. Examples of this method include \cite{Newman2009,Smola2010,Liu2011}. The other direction is to develop intrinsic fast learning algorithms, usually taking advantages of existing online learning achievements. For instance, Hoffman et al.~\cite{Hoffman2010} proposed an online {\tt LDA} algorithm that is analogous to stochastic gradient descent. A similar method is developed for Hierarchical Dirichlet Process by Wang et al.~\cite{Wang2011}. A slightly different approach is to employ sequential Monte Carlo sampling techniques, such as \cite{Canini2009} and \cite{Ahmed2011}. However, this approach usually requires sophisticated data structures.

%==============================================================
\section{Topic Modeling on Twitter}\label{sec:methodology}
%==============================================================
In this section, we will introduce several methods to train topic models on Twitter and discuss their technical details. We first review two basic models, {\tt LDA} and Author-Topic model. Although both models are normally implemented by using Gibbs sampling, we discuss how these models can be trained more efficiently through online learning techniques. In addition, we discuss simple extensions to these two models where characteristics of Twitter have been considered. In the last, we compare several aggregation strategies when training basic models.

%==============================================================
\subsection{Basic Models}\label{sec:basic_model}
%==============================================================
We briefly review {\tt LDA} and Author-Topic model here. Detailed descriptions of models are in \cite{Blei2003} and \cite{Rosen-Zvi2010}. For Gibbs sampling of {\tt LDA}, please refer to \cite{Griffiths2004}.
%==============================================================
\subsubsection{Latent Dirichlet Allocation}\label{sec:lda}
%==============================================================
Latent Dirichlet Allocation ({\tt LDA})~\cite{Blei2003} is an unsupervised machine learning technique which identifies latent topic information in document collections. It uses a ``bag of words'' approach, which treats each document $d$ as a vector of words $\mathbf{w}_{d} =\{w_{d,1}, w_{d,2}, \cdots , w_{d,N_{d}}\}$. Each document $d$ in the collection is associated with a multinomial distribution over $T$ topics, which is denoted as $\theta_{d}$. Each topic $k$ is associated with a multinomial distribution over a fixed size of vocabulary, denoted as $\phi_{k}$. Both $\BoldGreek{\theta}$ and $\BoldGreek{\phi}$ have Dirichlet prior with hyper-parameters $\BoldGreek{\alpha}$ and $\BoldGreek{\beta}$ respectively. An imaginary generative process of the whole data collection is as follows. For each token in one document $d$, a topic index $z$ is
sampled from the multinomial distribution $\theta_{d}$ and a word $w$ from the multinomial distribution $\phi$
associated with topic $z$ is sampled consequently. This generative process is repeated $N_{d}$ times and $D$ times, where $N_{d}$ is the total number of words in the document $d$ and $D$ is the number of documents in the collection.
%==============================================================
\subsubsection{Author-Topic Model}\label{sec:at_model}
%==============================================================
The Author-Topic model ({\tt AT} model) is an extension of {\tt LDA}, which was first proposed in \cite{Rosen-Zvi2004} and further expanded in \cite{Rosen-Zvi2010}. The basic assumption behind {\tt AT} model is that each author $a$ is characterized by a distrusting over $T$ topics, denoted as $\BoldGreek{\theta}_{a}$ and each topic is associated with a multinomial distribution over words, denoted as $\BoldGreek{\phi}$. Each document is a mixture of words generated from different co-authors of the document. In other words, every word $w$ is associated with two latent variables: an author, $a$ and a topic, $z$.  Here, differing from {\tt LDA}, the observed variables for an individual document is the set of authors and the words in the document. Since the original {\tt AT} model assumes that each document is written by multiple authors, we only need a simplified version of {\tt AT} model for Twitter where each message has an explicit author. Therefore, the generative process of the modified version of {\tt AT} model is as follows:
\begin{enumerate}
\item For each document, given the author $a_{d}$.
\item For each word position $i$ in the document $d$
\item Conditioned on $\BoldGreek{\theta}_{a_{d}}$, choose a topic $z_{di}$.
\item Conditioned on $\BoldGreek{\phi}_{z_{di}}$, choose a word $w_{di}$.
\end{enumerate}For {\tt AT} model, no topic proportion distributions are obtained for each individual document, differing from {\tt LDA}. On the other hand, {\tt LDA} cannot have topic proportion distributions on the user level. Therefore, if we want to model documents and authors simultaneously, certain extension or special treatment is required.

%==============================================================
\subsection{Online Learning for {\tt LDA} and {\tt AT}}\label{sec:online_learning}
%==============================================================
% Algorithm
\begin{algorithm}[t]
\SetAlgoNoLine
%\KwIn{Node $\alpha$'s ID ($ID_{\alpha}$), and node $\alpha$'s neighbors' IDs within two communication hops.}
%\KwOut{The frequency number ($FreNum_{\alpha}$) node $\alpha$ gets assigned.}
Initialize all corpus level parameters, such as topic distributions\;
\Repeat{$\mbox{Stopping criterion is met}$}{
        \For{each document $d$}{
            // The E-step \;
            Update document and word level parameters until convergence\;
            // The M-step \;
            Re-calculate the learning rate by Equation \ref{eq:learning_rate}\;
            Update corpus level parameters by using the new learning rate (e.g., Equations \ref{eq:online_lda} and \ref{eq:online_at})\;
        }
}
\caption{A Basic Framework of Online Learning for Topic Models}
\label{alg:one}
\end{algorithm}
Both {\tt LDA} and {\tt AT} models are usually trained through Gibbs sampling. However, for larger datasets, conventional Gibbs sampling becomes prohibitively slow. Fast sampling algorithms are proposed, such as \cite{AlSumait2008} and \cite{Canini2009}. These algorithms are either based on heuristics or require complex data structures. For multi-core and multi-machine alternatives (e.g., \cite{Newman2009,Smola2010,Liu2011}), they are difficult to be implemented since most of them are not based on existing parallel computational platforms like MapReduce. In addition, large clusters are not always available to researchers. Thus, we do not explore these alternatives in this work. In order to train models on large scale corpora, we adopt online learning algorithms, which are instinct fast. The basic idea of online learning for topic models is to process data sequentially and update parameters as data coming in. Similar to standard online learning algorithms such as stochastic gradient decent, online learning algorithms for topic models can also converge to a local optimum much faster than batch algorithms. Online learning algorithms for topic models can be viewed as special cases of online EM algorithms \cite{Liang2009}.

Here, we derive online learning algorithms for both {\tt LDA} and {\tt AT} based on variational inference. For {\tt LDA}, the true posterior distribution is approximated by a simpler distribution $q(\mathbf{z}, \BoldGreek{\theta}, \BoldGreek{\phi})$, which is indexed by a set of free parameters. We choose a fully factorized distribution $q$ of the form
\begin{eqnarray}
q(\mathbf{z},\BoldGreek{\Theta}, \BoldGreek{\Phi}) = \prod_{k=1}^{K} q(\BoldGreek{\phi}_{k} \,|\, \BoldGreek{\lambda}_{k}) \Bigr\{\prod_{d=1}^{D} q(\BoldGreek{\theta}_{d} \, |\, \BoldGreek{\eta}_{d}) \prod_{i=1}^{N_{d}} q(z_{i} \, | \, \rho_{i})\Bigl\} \nonumber
\end{eqnarray}where the posterior over the per-word topic assignments $\mathbf{z}$ is parameterized by $\BoldGreek{\rho}$, the posterior over the per-document topic proportions $\BoldGreek{\theta}$ is parameterized by $\BoldGreek{\eta}$ and the posterior over the topics $\BoldGreek{\phi}$ is parameterized by $\BoldGreek{\lambda}$. The updating equations for the model can be derived as follows (details see \cite{Blei2003,Hoffman2010}):
\begin{eqnarray}
\rho_{i,k} & \propto & \exp \Bigr[\Psi(\eta_{d,k}) - \Psi(\sum_{j=1}^{K} \eta_{d,j}) \Bigl] \exp \Bigr[ \Psi(\lambda_{k,w_{i}})-\Psi(\sum_{j=1}^{V} \lambda_{k,j}) \Bigl] \nonumber \\
\eta_{d,k} & = & \alpha_{k} + \sum_{i=1}^{N_{d}} \rho_{i,k}  \,\, ; \,\, \lambda_{k,v} = \beta_{v} + \sum_{i=1}^{N_{d}} \sum_{v=1}^{V} \mathbb{I}(w_{i} == v)\rho_{i,k}
\end{eqnarray}The updates can be partitioned in to an E step---iteratively updating $\BoldGreek{\rho}$ and $\BoldGreek{\eta}$ for each document $d$ until convergence, holding $\BoldGreek{\lambda}$ fixed---and an M step---updating $\BoldGreek{\lambda}$ given new updated $\BoldGreek{\rho}$ for all documents. For online version, the E step is identical to the batch mode. In the M step, we update $\BoldGreek{\lambda}$ as follows:
\begin{equation}\label{eq:online_lda}
\widetilde{\lambda}_{k,v} = \beta_{v} + Dn_{d,v}\rho_{d_{v},k} \,\, ; \,\, \BoldGreek{\lambda} = (1-\nu_{d})\BoldGreek{\lambda} + \nu_{d}\widetilde{\BoldGreek{\lambda}}
\end{equation}where $D$ is the total number of documents and $\nu_{d}$ is learning rate which is defined as follows:
\begin{equation}\label{eq:learning_rate}
\nu_{d} = (\tau_{0} + d)^{-\kappa}
\end{equation}Here, $\tau_{0}$ and $\kappa$ are two parameters to be tuned. Note, the condition that $k \in (0.5, 1]$ is needed to guarantee convergence \cite{Hoffman2010,Wang2011}. The intuitive explanation of the updating rule is that new $\BoldGreek{\lambda}$ values are a weighted average of old values and updates from current documents. The weights are governed by an epoch dependent learning rate function. A common practise in online learning is to consider multiple documents per update to reduce noise, rather than just updating parameters based on a single observation. %Here, this means computing $\widetilde{\BoldGreek{\lambda}}$ using $S > 1$ documents like $\widetilde{\lambda}_{k,v} = \beta_{v} + \frac{D}{S}\sum_{s} n_{s,v}\rho_{s_{v},k}$.%The outline of the whole algorithm is shown in Figure.

For {\tt AT} model, we also adopt variational inference rather than Gibbs sampling introduced in their original paper \cite{Rosen-Zvi2004,Rosen-Zvi2010}. We introduce the following fully factorized variational distribution over all parameters:
\begin{equation}
q(\mathbf{z},\BoldGreek{\Theta}, \BoldGreek{\Phi}) = \prod_{k=1}^{K} q(\BoldGreek{\phi}_{k} \,|\, \BoldGreek{\lambda}_{k}) \prod_{a=1}^{A} q(\BoldGreek{\theta}_{a}\,|\,\BoldGreek{\eta}_{a})\prod_{d=1}^{D}\prod_{i=1}^{N_{d}} q(z_{d,i} \, | \, \rho_{d,i})
\end{equation}where $\BoldGreek{\phi}_{k}$ is a Dirichlet distribution, $\BoldGreek{\theta}_{a}$ is a Dirichlet distribution, and $\rho_{d,i}$ is a multinomial distribution. The lower bound we optimize is:
\begin{equation}
\log P(\mathbf{w} \, | \, \BoldGreek{\alpha}, \BoldGreek{\beta}, \mathcal{A}) \geq \mathbb{E}_{q}[\log P(\mathbf{w},\mathbf{z}, \BoldGreek{\Theta},\BoldGreek{\Phi} \, | \, \BoldGreek{\alpha} , \BoldGreek{\beta}, \mathcal{A})] - \mathbb{E}_{q} [\log q(\mathbf{z},\BoldGreek{\Theta}, \BoldGreek{\Phi} \, | \, \BoldGreek{\rho} , \BoldGreek{\eta}, \BoldGreek{\lambda})]
\end{equation}After expanding the expectations, we obtain the following updating equations (detailed derivations omitted due to space issue):
\begin{eqnarray}
\rho_{i,k} & \propto & \exp \Bigr[\Psi(\eta_{a_{d},k}) - \Psi(\sum_{j=1}^{K} \eta_{a_{d},j}) \Bigl] \exp \Bigr[ \Psi(\lambda_{k,w_{i}})-\Psi(\sum_{j=1}^{V} \lambda_{k,j}) \Bigl] \nonumber \\
\eta_{a,k} & = & \alpha_{k} + \sum_{d=1}^{D} \mathbb{I}(a_{d} = a) \sum_{i=1}^{N_{d}} \rho_{i,k} \,\, ; \,\, \lambda_{k,v} = \beta_{v} + \sum_{d=1}^{D} \sum_{i=1}^{N_{d}} \mathbb{I}(w_{i} = v) \rho_{i,k} \nonumber
\end{eqnarray}Similar to {\tt LDA}, we can also apply online learning techniques to {\tt AT} model. More specifically, in the E step, we iterate update $\BoldGreek{\xi}_{i}$ and $\BoldGreek{\rho}_{i}$ for each word position $i$ in document $d$. In the M step, we pretend the document $d$ appearing $D$ times in the corpus and have the following updating equations:
\begin{eqnarray}\label{eq:online_at}
\widetilde{\eta}_{a,k} &=& \alpha_{k} + D \sum_{i=1}^{N_{d}} \rho_{i,k} \,\, ; \,\, \BoldGreek{\eta} = (1-\nu_{d})\BoldGreek{\eta} + \nu_{d}\widetilde{\BoldGreek{\eta}} \nonumber \\
\widetilde{\lambda}_{k,v} &=& \beta_{v} + D \sum_{i=1}^{N_{d}} \mathbb{I}(w_{i} = v) \rho_{i,k} \,\, ; \,\, \BoldGreek{\lambda} = (1-\nu_{d})\BoldGreek{\lambda} + \nu_{d}\widetilde{\BoldGreek{\lambda}}
\end{eqnarray}the last line is identical to online {\tt LDA}. The basic framework for online learning is shown in Algorithm \ref{alg:one}.

%==============================================================
\subsection{Simple Extensions to Basic Models}\label{sec:extensions}
%==============================================================
In this sub-section, we explore some extensions to {\tt LDA} and {\tt AT}, which take some characteristics of Twitter in to consideration. The first extension is to relax the assumption that a document is a mixture of topics. In Twitter, since the length of each message is very limited. It is quite reasonable to assume that each message is only about one topic. Thus, relaxing this assumption, our models might better reflect the real scenario of Twitter. For {\tt LDA}, this means that each document has a single topic assignment and thus $\BoldGreek{\theta}_{d}$ for document $d$ becomes useless. Therefore, we essentially has a single multinomial distribution $\BoldGreek{\theta}$ over $T$ topics for the whole corpus. Each document belongs to a topic. Under this assumption, we have the following update equations:
\begin{eqnarray}
\rho_{d,k} & \propto & \exp \Bigr[ \Psi(\eta_{k}) - \Psi(\sum_{j=1}^{K} \eta_{j}) \Bigl] \exp \Bigr[ \sum_{i=1}^{N_{d}} \Bigr( \Psi(\lambda_{k,w_{i}})-\Psi(\sum_{j=1}^{V} \lambda_{k,j}) \Bigl) \Bigl] \nonumber \\
\eta_{k} & = & \alpha_{k} + \sum_{d=1}^{D} \rho_{d,k} \,\, ; \,\, \lambda_{k,v} = \beta_{v} + \sum_{d=1}^{D} \rho_{d,k} \sum_{i=1}^{N_{d}} \mathbb{I}(w_{i} = v)  \nonumber
\end{eqnarray}We denote this model as {\tt CT} (Document Clustering model). For {\tt AT} model, this relaxation leads to the following update equations:
\begin{eqnarray}
\rho_{d,k} & \propto & \exp \Bigr[ \Psi(\eta_{a_{d},k}) - \Psi(\sum_{j=1}^{K} \eta_{a_{d},j}) \Bigl] \exp \Bigr[ \sum_{i=1}^{N_{d}} \Bigr( \Psi(\lambda_{k,w_{i}})-\Psi(\sum_{j=1}^{V} \lambda_{k,j}) \Bigl) \Bigl] \nonumber \\
\eta_{a,k} & = & \alpha_{k} + \sum_{d=1}^{D} \mathbb{I}(a_{d} = a ) \rho_{d,k} \,\, ; \,\, \lambda_{k,v} = \beta_{v} + \sum_{d=1}^{D} \rho_{d,k} \sum_{i=1}^{N_{d}} \mathbb{I}(w_{i} = v) \nonumber
\end{eqnarray}We denote this model as {\tt AT-CT} (Author Topic Document Clustering model). This extension to original models results in much smaller parameter spaces.

In addition to the single topic assignment extension discussed above, for {\tt AT} model, we could further relax the original assumptions made in the model and therefore lead to new extensions. For instance, not all the messages generated by a user can reflect the user's characteristics. A common practise to reduce such noises is to introduce a ``background'' language model where terms in messages are either generated by topics or the background language models. Conventionally, a word-level variable will be introduced into models, which is to indicate where the word is generated from. Here, we follow the idea of the first extension where a message is only generated by a single topic, not a mixture of topics. Thus, for a given document $d$, we first decide whether this document is from topics or a background language model, by drawing a Bernoulli variable $x_{d}$ from a Beta distribution $\BoldGreek{\pi}$. If $x_{d}$ is equal to $0$, we draw the whole document from the background language model $\BoldGreek{\phi}^{B}$. If $x_{d}$ is equal to $1$, we choose $z_{d}$ from the author's topic proportion $\BoldGreek{\theta}$ and then generate terms from the corresponding topic. Based on this assumption, we introduce the following fully factorized variational distribution over all parameters:
\begin{eqnarray}
q(\mathbf{z},\mathbf{x}, \BoldGreek{\Theta}, \BoldGreek{\Phi}, \BoldGreek{\phi}^{B}, \BoldGreek{\pi}) = q(\BoldGreek{\pi} \, | \, \BoldGreek{\gamma}) q(\BoldGreek{\phi}^{B} \, | \, \BoldGreek{\lambda}^{B}) \prod_{k=1}^{K} q(\BoldGreek{\phi}_{k} \,|\, \BoldGreek{\lambda}_{k}) \prod_{a=1}^{A} q(\BoldGreek{\theta}_{a}\,|\,\BoldGreek{\eta}_{a})\prod_{d=1}^{D} q(x_{d} \, | \, \xi_{d}) q(z_{d} \, | \, \rho_{d}) \nonumber
\end{eqnarray}where $\BoldGreek{\phi}_{k}$ is a Dirichlet distribution, $\BoldGreek{\theta}_{a}$ is a Dirichlet distribution, $\BoldGreek{\xi}_{d}$ is a multinomial distribution (two-dimensional), $\BoldGreek{\lambda}^{B}$ is a multinomial distribution, $\BoldGreek{\gamma}$ is a Dirichlet distribution (two-dimensional) and $\BoldGreek{\rho}_{d}$ is a multinomial distribution. The updating equations are as follows (detailed derivations omitted due to space issue):
\begin{eqnarray}
\xi_{d,0} & \propto & \exp \Bigr[\Psi(\gamma_{0}) - \Psi(\gamma_{0}+\gamma_{1})\Bigl] \exp \Bigr[ \sum_{i=1}^{N_{d}} \sum_{v=1}^{V} w_{i,v} (\Psi(\lambda_{v}^{B})-\Psi(\sum_{j=1}^{V} \lambda_{j}^{B})) \Bigl] \nonumber \\
\xi_{d,1} & \propto & \exp \Bigr[\Psi(\gamma_{1}) - \Psi(\gamma_{0}+\gamma_{1})\Bigl] \exp \Bigr[ \sum_{i=1}^{N_{d}} \sum_{k=1}^{K} \sum_{v=1}^{V} \rho_{d,k} w_{i,v} (\Psi(\lambda_{k,v})-\Psi(\sum_{j=1}^{V} \lambda_{k,j})) \Bigl] \nonumber \\
\rho_{d,k} & \propto & \Bigr[ \Psi(\eta_{a_{d},k})-\Psi(\sum_{j=1}^{K} \eta_{a_{d},j}) \Bigl] \exp \Bigr[ \sum_{i=1}^{N_{d}} \sum_{v=1}^{V} \xi_{d,1} w_{i,v} (\Psi(\lambda_{k,v})-\Psi(\sum_{j=1}^{V} \lambda_{k,j})) \Bigl] \nonumber \\
\eta_{a,k} & = & \alpha_{k} + \sum_{d=1}^{D} \mathbb{I}(a_{d} = a) \sum_{i=1}^{N_{d}} \rho_{i,k} \,\, ; \,\, \pi_{s} = \gamma_{s} + \sum_{d=1}^{D} \xi_{d,s} \nonumber \\
\lambda_{k,v} & = & \beta_{v} + \sum_{d=1}^{D} \sum_{i=1}^{N_{d}} \xi_{d,1} \rho_{i,k}\mathbb{I}(w_{i} = v)  \,\, ; \,\, \lambda_{v}^{B} = \beta_{v} + \sum_{d=1}^{D} \sum_{i=1}^{N_{d}} \xi_{d,0} \mathbb{I}(w_{i} = v)  \nonumber 
\end{eqnarray}where the first line is the updating rule for the case where the document $d$ is generated from the background language model and the second line is the updating rule for the case where $d$ is generated from topics. We denote this model as {\tt BAT} model (Background Author Topic). Note, online learning techniques discussed in sub-section \ref{sec:online_learning} can be also applied to both extensions introduced in this sub-section.

%==============================================================
\subsection{Topic Modeling Schemes}\label{sec:tm_schemes}
%==============================================================
Recall that our goal is to infer topic proportional parameters (or topic indicator parameters) for both messages and authors in the corpus. Since all models introduced before can not achieve this goal directly, in this sub-section, we discuss several methods to tackle the problem.

For {\tt LDA} and its variants, three schemes can be used to obtain topic proportional parameters for both messages and authors. First, we discuss a very natural choice of training models. The process is as follows: 1) Train a model on all training messages, 2) Aggregate all training messages generated by the same user into a training profile for that user, 3) Aggregate all testing messages generated by the same user into a testing profile for that user, and 4) Taking training user profiles, testing user profiles and testing messages as ``new documents'', use the trained model to infer a topic mixtures for each of them. We denote this method as the {\tt MSG} scheme. Note that we do not combine all user profiles into a single set of user profiles simply because some users may be part of the training set, and thus the aggregation of all user profiles may give an unfair advantage to the model to achieve better performance. The second scheme is to train the model on aggregated user profiles, which leads to the following process: 1) Train a model on aggregated user profiles, each of which combines all training messages generated by the same user, 2) Aggregate all testing messages generated by the same user into testing user profiles, and 3) Taking training messages, testing user profiles and testing messages as ``new documents'', use the trained model to infer a topic mixture for each of them. We denote the method as the {\tt USER} scheme. The third scheme, which we denote as the {\tt TERM} scheme, is more unusual. The process is as follows: 1) For each term in the training set, aggregate all the messages that contain this term into a training term profile, 2) Train a model on all training term profiles, 3) Build user profiles in training and testing set respectively, and 4) Taking training messages, training user profiles, testing user profiles and testing messages as ``new documents'', use the trained model to infer a topic mixture for each of them. The rationale for this scheme is that on Twitter, users often use self-defined hash tags (i.e., terms starting with ``\#'') to identify certain topics or events. Building term profiles may allow us to obtain topics related to these hash tags directly.

These schemes each have their own advantages. For {\tt MSG}, it is straightforward and easily understandable but the training process is based on individual messages, whose content is very limited. The model may not have enough information to learn the topic patterns. More specifically, the occurrences of terms in one message play less discriminative role compared to lengthy documents (e.g., aggregated user profiles or term profiles) where the model has enough term counts to know how terms are related. For {\tt USER} scheme, and {\tt TERM} scheme, the models have enough content and might provide a more ``accurate'' result.

For {\tt AT} and its variants, we obtain user-level topic proportional parameters directly from the training process. Thus, in order to obtain document-level topic proportional parameters, we need to treat all documents as ``test documents'' and apply the trained model to them. Note, the {\tt USER} scheme is essentially equivalent to {\tt AT} model. However, due to the nature of online learning, for {\tt USER} scheme, we always process one author per iteration and for {\tt AT}, different documents by the same author are processed in different iterations. Therefore, the results by {\tt USER} and {\tt AT} might vary a lot. Our later experiments also validate this phenomena. 

There is another aspect of issues related to different schemes. Usually, the number of users is several magnitude less than the number of messages. Therefore, it would take significantly less time to train a model with the {\tt USER} scheme rather than the {\tt MSG} scheme. The same argument can be made for the {\tt TERM} scheme as well. In addition, the assumption of topic mixture of topic models might eventually lead to different optimal choice of $T$ (the number of topics) for different schemes. For the {\tt MSG} scheme, we are modeling the number of topics existing in messages. Since a message is short and the number of messages is huge, we usually need a larger number of topics to obtain a reasonable model. On the other hand, for {\tt USER} scheme, we are modeling the number of topics for users. We can arguably say that each user may only have a relatively small number of topics that they are interested in and the total number of users are comparatively smaller than the volume of messages. Hence, through our experiments, the optimal number of topics is usually smaller than its in {\tt MSG} scheme.

%==============================================================
\section{Experiments}\label{sec:experiments}
%==============================================================
\begin{table}
%\centering
\tbl{Users From Twitter Suggestions\label{table:twitter_suggestion}}{
\begin{tabular}{|c|c|c|} \hline
\textbf{Category ID} & \textbf{Category Name} & \textbf{\# of Users} \\ \hline \hline
0 & Art \& Design & 3 \\ \hline
1 & Books  &  3 \\ \hline
2 & Business & 8 \\ \hline
3 & Charity & 15 \\ \hline
4 & Entertainment & 42 \\ \hline
5 & Family &  4 \\ \hline
6 & Fashion &  5 \\ \hline
7 & Food \& Drink & 19 \\ \hline
8 & Funny & 23 \\ \hline
9 & Health & 9 \\ \hline
10 & Music & 43 \\ \hline
11 & News & 16 \\ \hline
12 & Politics & 27 \\ \hline
13 & Science & 4 \\ \hline
14 & Sports & 39 \\ \hline
15 & Technology & 22 \\ \hline
\end{tabular}}
\end{table}
In this section, we present the experimental evaluation of all methods discussed on Twitter data. We begin by describing some preprocessing steps of our data. %Then, we test a variety of schemes discussed in the previous section on two realistic tasks. By studying the results, we will show that topic modeling is a powerful tool for short text messages. In our experiments, 
Then, we demonstrate how these methods perform in terms of topic modeling. In particular, we focus on whether similar topics can be discovered or not and the quality of the topics obtained. In addition, we have two application tasks, whose performance can be potentially enhanced by topic modeling techniques:
\begin{itemize}
\item Predicting popular Twitter messages
\item Classifying Twitter users and corresponding messages into topical categories
\end{itemize}For the first task, we consider the number of times a message has been retweeted as a measure of popularity. Therefore, we convert the problem into predicting whether a message will be retweeted in the \textbf{future}. Since we only have an incomplete set of Twitter messages and we cannot directly recover complete retweet patterns, we need to construct a reasonable dataset from our sample. Consider a collection of messages, some of which are duplicates of others. Before we measure if two messages are ``similar'', we remove links, any word stating with the ``@'' character and non-latin characters from the messages. We convert all characters to lower case and calculate the hash value of all the messages. We group messages with same hash values together and sort them by time, forming different chains of messages. For all messages in the chain except the first, we further filter out those messages without ``RT''. In other words, it does not matter if the first message is a retweet, but all subsequent messages in the chain must be retweets. For all filtered chains, if there are $n$ messages in a particular chain, we take the first $n-1$ messages as ``positive instances'', meaning that they will be retweeted in the future, and the last one as ``negative instance''. In addition, all other messages which are not in any chains are also considered as ``negative instances''. Our task is to correctly predict all ``positive instances'' in the dataset. Our experiments employ the data from Twitter's APIs\footnote{http://dev.twitter.com/}. We collected messages from the first and second week of November 2009. After the preprocessing steps mentioned above, our dataset contains 1,992,758 messages and 514,130 users. In our experiments, we neither remove stop words nor perform stemming on the words. We replace all URLs with the word ``link'' and keep all hash tags. Therefore, we have 3,697,498 distinct terms for the two weeks of data.

The second task is more straightforward. In several Twitter directories (e.g., http://www.wefollow.com) and in the official Twitter site, lists of users with categories associated with them is provided. We take more than 250 verified users from the official Twitter Suggestions categories\footnote{http://twitter.com/invitations/suggestions} under the assumption that these verified accounts are recognized as valid by real people and organizations. The categories do not overlap. We monitored the latest 150 messages generated by these users and try to classify the messages and the account into their corresponding categories which we obtained from Twitter Suggestion, under the assumption that these verified users strongly adhere to their corresponding categories that most of the messages generated by them are in the same topic. Prior to attempting the two tasks, we also studied the topics learned by the models empirically mainly from two aspects: 1) Whether the topics obtained by different schemes are similar or not; and, 2) What is the quality of the topics. We compare the topics in both qualitative and quantitative ways. For the second task, we crawled 274 verified users of 16 categories from Twitter Suggestion and their last 150 messages if available. In order to classify users, we aggregate all the messages generated by the same user into a giant document, denote as a ``user profile''. Similarly, we do not remove stop words and do not perform stemming. Thus, the dataset contains 52,606 distinct terms and 50,447 messages in total. The detailed number of users per category is shown in Table \ref{table:twitter_suggestion}.

%==============================================================
\subsection{Evaluation Metrics \& Parameters Setting}
%==============================================================
For topic modeling, the following procedure is used to evaluate the performance of different models. First, we divide the dataset into the training set and the test set. Experiments are conducted in a five-fold cross-validation fashion. The first metric is perplexity, a common metric used in topic modeling community. The metric is to measure how well the trained model can explain the testing data. More on perplexity, please refer to references such as \cite{Blei2003,Hoffman2010}. We also compare topics discovered by different methods in a number of ways, which will be discussed in detail.

For two application tasks, both tasks are classification problems where the first one is to classify messages into retweets and non-retweets and the second is to classify messages and users into topical categories. For the first task, our basic evaluation scheme is to train the classifier on the first week and test it on the second week while for the second one, a simple cross-validation scheme is used. The vast majority of the instances in our dataset are negative ones (e.g., the messages will not be retweeted in the future). Therefore, a naive classifier may easily achieve more than 90\% accuracy by choosing every instance as negative, which does not make much sense in our case. Here, we use Precision, Recall and F-Measure (F$_1$ score) as the evaluation metric. Hence, we do not report any results based on accuracy for the first task. For the second task, we use classification accuracy as the evaluation metric. We not only look at the classification accuracy for each category but also care about the overall classification accuracy. The baseline method for both tasks is a classifier using {\tt TF-IDF} weighting values as the features.

Throughout the experiments, we use $L_{2}$-regularized Logistic Regression as our classifier\footnote{http://www.csie.ntu.edu.tw/\~{}cjlin/liblinear/}. All topic models used in the experiments have symmetric Dirichlet priors. We notice that asymmetric priors may lead to better results, suggested by \cite{Wallach2009}. However, in order to reduce the effect of optimizing hyper-parameters, we fix all of them to symmetric Dirichlet priors. More specifically, for $\beta$, we set it to $0.01$ in all experiments and for $\alpha$, we adopt the commonly used $50/T$ heuristics where $T$ is the number of topics. In terms of online learning, we use $\kappa=0.5$ and $\tau0=64$ throughout our experiments. In order to reduce the noise, we also use a batch size of $1024$ messages to update our parameters. In later experiments, we use {\tt MSG}, {\tt USER} and {\tt TERM} training schemes, as well as {\tt AT} (Author Topic model), {\tt CL} (Document Clustering model), {\tt AT-CL} (Author Topic Document Clustering model) and {\tt BAT} (Background Author Topic model).

%==============================================================
\subsection{Topic Modeling}
%==============================================================
\begin{figure}
\centering
\begin{tabular}{c}
\includegraphics[width=9cm]{comp} \\
\includegraphics[width=9cm]{tau}
\end{tabular}
\caption{The Average Minimal JS Divergence (Top) and The Average Kendall's $\tau$ (Bottom)}
\label{fig:js}
\end{figure}
In this section, we mainly study two questions: 1) whether different methods cause the model to learn different topics from the dataset; and, 2) what is the quality of topics learned from the dataset by different approaches. The dataset we used in this sub-section is the topical classification dataset described in Section 4.1.

For the first question, we conduct a pair-wise similarity measurement between topics obtained by different methods and compare them. Here, we mainly focus on the effect of different training schemes, namely the difference between {\tt MSG}, {\tt USER}, {\tt TERM} schemes and {\tt AT} model, which are basic models. Similar analysis is also applied to extensions we discussed. In order to answer the first question, we need to map topics learned by different schemes. Due to the ``exchangeable'' property of topic models \cite{Blei2003}, the topics learned from different runs of the models are not directly correspond, even for the exactly same settings. Therefore, a mapping process is required to find same or similar topics. In this work, we use Jensen-Shannon (JS) divergence to measure the similarity between topics. The JS divergence is a symmetric measure of the similarity of two pairs of distributions. The measure is 0 only for identical distributions and approaches infinity as the two differ more and more. Formally, it is defined as the average of the KL divergence of each distribution to the average of the two distributions $D_{JS} = \frac{1}{2} D_{KL}(P||R) + \frac{1}{2} D_{KL}(Q||R)$ where $R = \frac{1}{2} (P+Q)$ and $D_{KL}(A||B)$ represents the KL divergence between variable $A$ and $B$. In our case, the KL divergence is calculated as $D_{KL}(A||B) = \sum_{n=1}^{M} \phi_{na} \log \frac{\phi_{na}}{\phi_{nb}}$ where $M$ is the number of distinct term types and $\phi_{na}$ is the probability of term $n$ in topic $a$. For each topic $i$, we obtain a corresponding topic $j$ with the minimal JS divergence score where topic $i$ and $j$ are trained through different schemes. Let us first look at the results qualitatively. In Table \ref{table:js_divergence}, we list two topics identified by minimal JS divergence as ``similar topics'' where two models are trained on the dataset for the second task and the number of topics $T=10$. The upper part of the table shows the topic found by the {\tt MSG} scheme and the bottom part shows the topic obtained by the {\tt USER} scheme. All the terms shown in the table are the topic terms sorted by $\phi$ scores. In other words, these terms are generated by the topics with high probabilities. Not very surprisingly, the top terms found by different schemes do not match with each other exactly. However, by carefully reviewing the terms, we find that most of them are related to some news events (e.g., Haiti earthquake) and politics.

\begin{table}
\centering
\tbl{``Similar'' Topics Found by JS Divergence\label{table:js_divergence}}{
\begin{tabular}{|l|} \hline
\textbf{The Topic Obtained by {\tt MSG} scheme} \\ \hline
[link] our from help world their people news more haiti red photo every two \\
school end american change water million learn women through visit america fight \\
money far girls national wine save young office children giving earth month community \\
needs local trip relief future project malaria uk ones \#haiti number program \\
college south power donate launch between worth education full others students \\
history safe room group lives summer during california earthquake past charity \\ \hline \hline
\textbf{The Topic Obtained by {\tt USER} scheme} \\ \hline
[link] rt and we day on your is us help haiti are by from you new world with about \\
this have red people support at thanks join out will more great twitter can their \\
up water read video w check today were make work here get photo what please \\
last be women live kids an school children who save event vote now project relief \\
pls malaria life \#haiti friends every them has watch donate team thank follow sign \\
global text keep working thx do need free learn earthquake many community million \\ \hline
\end{tabular}}
\end{table}
In order to better quantify the difference between topics, we use two metrics based on JS divergence. One is to calculate the average divergence between ``similar'' topics, which we denote ``the average minimal JS divergence''. More specifically, for each topic $i$, we first find a ``similar'' topic $j$ with minimal JS divergence. Then, we calculate the average of JS divergence over all discovered ``similar'' topics. The average minimal JS divergence between different models is shown in the top of Figure \ref{fig:js}. In this figure, we see that there is obvious difference between topics learned by different schemes or models. Topics learned by the {\tt USER} scheme are substantially different from the topics learned by the {\tt MSG} scheme and JS divergence slightly increases with increasing number of topics. Compared to the {\tt USER} scheme, topics learned by the {\tt TERM} scheme and the AT model are closer to the topics of the {\tt MSG} scheme. Note that almost all the JS divergence values are far from $0$, which indicates that the probabilities of terms in each topic indeed differ apart.

\begin{figure}
\centering
\begin{tabular}{c}
\includegraphics[width=9cm]{per}\\
\includegraphics[width=9cm]{nmi1}
\end{tabular}
\caption{Perplexity (Above) and Normalized Mutual Information}
\label{fig:per}
\end{figure}
From JS divergence, we conclude that the probabilities learned are different but we do not know how these difference may influence the relative positions of terms ranked in the topics. Therefore, the second metric we use is to measure the difference between rankings of terms obtained by topics. As shown in Table \ref{table:js_divergence}, while some of the terms found by different schemes are all ranked highly (e.g., haiti, relief), the exact ranking position is not the same. By looking at the discrepancy between rankings, we can understand how topics deviate from each other and how different models agree with each other. Here, we use Kendall's $\tau$ to measure the agreement between rankings. Given two different rankings of the same $m$ items, Kendall's $\tau$ is defined as:$\tau = \frac{P-Q}{P+Q}$ where $P$ is the number of pairs of items in two rankings that are concordant and $Q$ is the number of pairs of items in two rankings that are not concordant. $\tau$ ranges from $-1$ to $1$, with $1$ meaning the two rankings are identical and $-1$ meaning one is in the reverse order of the other. If $\tau=0$, it means that 50\% of the pairs are concordant while 50\% of the pairs are discordant. We take the top $500$ terms ranked by ``similar'' models identified by minimal JS divergence and calculate the $\tau$ values. In the bottom of Figure \ref{fig:js}, we show the results of $\tau$ values between ``similar'' topics. Two immediate observations can be discovered. First, the disagreement between {\tt MSG} scheme and {\tt USER} scheme is substantially larger than other schemes. Second, as the number of topics increases, the disagreement increases.

Next, we would like to know the quality of topics found by the models. For this question, we will test all the models discussed in previous sections. The dataset we used is still the topical classification dataset containing sixteen categories. First, we look at the perplexity of different models, shown in the top of Figure \ref{fig:per}. The X-axis is the number of topics while the Y-axis is perplexity values. It is clear that perplexity decreases for all models, as the number of topics increases. For the particular dataset, the perplexity value achieves lowest point when the number of topics is around 40 to 100. In general, {\tt CL}, {\tt AT-CL} and {\tt BAT} achieve better performance on perplexity while {\tt TERM} scheme is significantly worse. In addition to perplexity, we can measure the quality by how likely the topics agree with the true category labels since we know the ground truth label of all the messages in the dataset (their categories). Here, we use Normalized Mutual Information (NMI), which can be defined as follows:$\mbox{NMI}(\Omega,\mathbb{C}) = \frac{I(\Omega,\mathbb{C})}{[H(\Omega) + H(\mathbb{C})]/2}$ where $I(\Omega,\mathbb{C})$ is mutual information between set $\Omega$ and $\mathbb{C}$ and $H(A)$ is the entropy. NMI is always a number between $0$ and $1$. NMI may achieve $1$ if the clustering results can exactly match category labels while $0$ if two sets are independent. Details of the calculation of NMI can be found in \cite{Manning2008}. For each message, we use the maximum value in topic mixture $\theta$ to determine its cluster, which leads to a ``hard'' clustering result. After this mapping process, we compute NMI with the labels and the results are shown in the bottom of Figure \ref{fig:per}. From the figure, we see that NMI values are low in general. In general, {\tt CL}, {\tt AT-CL} and {\tt BAT} achieve similar results on NMI, which is significantly better than simple schemes. Clusters assigned by the {\tt USER} scheme matches labels better than other schemes. In conclusion, topics obtained by different schemes usually vary substantially. As shown in the experiments, {\tt CL}, {\tt AT-CL} and {\tt BAT} models might achieve better agreement with predefined labels, if available.
%==============================================================
\subsection{Predicting Popular Messages}
%==============================================================
\begin{table}
%\centering
\tbl{The performance on retweet prediction\label{table:retweets}}{
\begin{tabular}{|l||c|c|c|} \hline
Method & Precision & Recall & F$_{1}$ \\ \hline \hline
{\tt TF-IDF}	& 0.4216 & 0.3999 	& 0.4105 \\ \hline
{\tt MSG} (100) & 0.5088 & 0.2837 	& 0.3643 \\ \hline
{\tt USER} (40) & 0.6075 	& 0.3677 & 0.4581 \\ \hline
{\tt TERM} (70) & 0.5292 	& 0.3061 & 0.3879 \\ \hline
{\tt CLUSTER} (100) & 0.5214 	& 0.3734 	& 0.4352 \\ \hline
{\tt AT-CLUSTER} (80) & 0.5306 	& 0.3517 	& 0.4230 \\ \hline
{\tt BAT} (80) & 0.6111	& 0.3648 & 0.4569 \\ \hline \hline
{\tt TF-IDF} + {\tt MSG}	& 0.5150 & 0.3546 	& 0.4200 \\ \hline
{\tt TF-IDF} + {\tt USER}	& 0.6142 & 0.3897 	& 0.4768 \\ \hline
{\tt TF-IDF} + {\tt TERM}	& 0.5303 & 0.3582 	& 0.4276 \\ \hline
{\tt TF-IDF} + {\tt CLUSTER}	& 0.4736 & 0.4322 	& 0.4520 \\ \hline
{\tt TF-IDF} + {\tt AT-CLUSTER}	& 0.5561 & 0.4202 	& 0.4788 \\ \hline
{\tt TF-IDF} + {\tt BAT}	& 0.5714 &0.4168 & 0.4820 \\ \hline
\end{tabular}}
\end{table}
In this section, we would like to see how the schemes and models discussed can influence  classification performance. Here, we consider the problem of predicting potential ``retweets''. Remember, we treat the problem as a classification problem where the input is a set of features and the output tells us whether the target message will be retweeted in the future or not.

We first use {\tt TF-IDF} weighting scores as features and train a Logistic Regression classifier. The result is shown in the first row of Table \ref{table:retweets}. Then, we train topic models according to the different schemes and obtain topic mixture $\theta$ for both messages and authors as introduced in the Section \ref{sec:methodology}. For different models, we only report the best performance and its corresponding number of topics. We only test the number of topics in the range of $20$ to $150$. The results are shown from the second row to the $7$-th row in Table \ref{table:retweets}. The first conclusion we can draw is that the results obtained by simple schemes ({\tt MSG} and {\tt TERM} schemes) are worse than the baseline, {\tt TF-IDF}, while only the topics trained by {\tt USER} scheme significantly outperform the baseline. All other extensions perform comparable to the baseline. In the last sub-section, we see that the topics trained by these extensions achieve higher NMI value, which implies that these models might more likely match the underlying category information. Although other schemes do not perform well, we notice that the Precision is improved by all models. If we argue that Precision is more valuable in this task (because once we make a ``positive'' decision, we have less chance to be wrong), we can conclude that topic models indeed help us.

Some literature \cite{Blei2003} suggested that if we solely use topic mixture as features, we may not achieve better performance than {\tt TF-IDF}. Thus, we combine topic model features and {\tt TF-IDF} features and obtain the results in the second half (from $8$-th row to the bottom) of the Table \ref{table:retweets}. The results are trained on a classifier using the best performing topic model features with {\tt TF-IDF} features. We can see that most of them improve performance and {\tt TF-IDF} with {\tt BAT} model outperforms the previous best one that only uses the topic features.

In this task, we see that although sometimes topic features may not outperform simple {\tt TF-IDF} features, it is good practice to combine them. Extension models consistently provide good results, compared to simple schemes.

%==============================================================
\subsection{User \& Message Classification}
%==============================================================
\begin{table}
\centering
\tbl{The performance on message classification\label{table:message_classification}}{
\begin{tabular}{|c||c|c|c|c|c|c|c|c|} \hline
Category & {\tt TF-IDF} & {\tt USER} ({\tt AT}) & {\tt MSG} & {\tt TERM} & {\tt CL} & {\tt AT-CL} & {\tt BAT} \\ \hline \hline
0	& 0.3000	& 0.5000	& 0.5000	& 0.3500	&0.5000	&0.3000	&0.5321 \\ \hline
1	& 0.2143	& 0.0000	& 0.3036	& 0.0000	&0.4123	&0.3036	&0.4224\\ \hline
2	& 0.2756	& 0.5128	& 0.1218	&0.1218	    &0.6102	&0.5755	&0.5534\\ \hline
3	& 0.5909	& 0.9583	& 0.9583	&0.5909	    &0.8843	&0.8342	&0.8266\\ \hline
4	& 0.4722	& 0.8223	& 0.6934	&0.5332	    &0.8329	&0.8082	&0.8301\\ \hline
5	& 0.1250	& 0.0000	& 0.0000	&0.1013	    &0.1347	&0.1552	&0.1517\\ \hline
6	& 0.2577	& 0.3814	& 0.1753	&0.1502	    &0.4551	&0.4337	&0.4451\\ \hline
7	& 0.3553	& 0.8899	& 0.8899	&0.3748	    &0.9017	&0.9101	&0.9115\\ \hline
8	& 0.3459	& 0.9082	& 0.8894	&0.5023	    &0.9045	&0.9138	&0.9093\\ \hline
9	& 0.6471	& 0.7386	& 0.8693	&0.7019	    &0.8714	&0.8863	&0.9008\\ \hline
10	& 0.5544	& 0.8718	& 0.8277	&0.5264	    &0.8763	&0.8845	&0.8554\\ \hline
11	& 0.4026	& 0.8636	& 0.7403	&0.4933	    &0.9091	&0.8771	&0.8039\\ \hline
12	& 0.5350	& 0.8132	& 0.7749	&0.6881	    &0.8122	&0.8519	&0.8112\\ \hline
13	& 0.3553	& 0.5263	& 0.5263	&0.4761	    &0.5547	&0.5733	&0.4863\\ \hline
14	& 0.6220	& 0.9330	& 0.9732	&0.6876	    &0.9702	&0.9614	&0.9771\\ \hline
15	& 0.4185	& 0.9022	& 0.8451	&0.5913	    &0.9500	&0.9662	&0.9121\\ \hline \hline
Average & 0.4045 & 0.6639 & 0.6305 & 0.4306 & 0.7237 & 0.7022 & 0.7081 \\ \hline
\end{tabular}}
\end{table}
In this section, we demonstrate results of the second task, classifying messages and authors into topical categories. For message classification, recall that we have 274 users from 16 categories in the dataset. For each user, we assume that all the messages generated by this user fall into the same category as the user. Therefore, for message classification, we use 90\% of messages for training and 10\% for testing and report the results on 5-fold cross validation. The baseline method is to use the {\tt TF-IDF} weighting scores as features to train the classifier, which is shown in Table \ref{table:message_classification}. Note that the category ids correspond to the categories introduced in Table \ref{table:twitter_suggestion}. The overall accuracy is around 47\% where the high performance is achieved in ``Health'' and ``Sports'' categories.

Again, similar to the first task, we use the topic mixture $\theta$ for both messages and users learned by topic models as features. We only report the best performance with its number of topics while we test the topic numbers from $10$ to $150$. Results obtained by different models are shown in Table \ref{table:message_classification}. Note, the overall accuracy for all models is significantly improved and some results are even almost twice as accurate as raw {\tt TF-IDF} features. In addition, the result obtained by {\tt TERM} scheme is significantly worse than others. However, we also note that the classifier using features from simple schemes results in zero accuracy in some categories. For instance, category $1$ (``Books'') and category $5$ (``Family'') are two cases where the classifier does not achieve one valid instance. One potential reason for this phenomenon is that the number of instances in these categories are significantly smaller than other categories, which prevent the classifier and these simple schemes to learn enough information about them. Results obtained by extension models are steadily better than simple schemes and the highest performance is achieved by {\tt CL} model.

Now, let us turn to the problem of classifying users into topical categories. Similar as message classification, we split 90\% of messages and aggregate the messages in training set for each user to build the user profiles. So, the training user profiles and testing profiles are always different and do not mixed. Again, {\tt TF-IDF} is calculated as features for user profiles, which are aggregations of all messages generated by the same user. The baseline is shown in Table \ref{table:user_classification}. Surprisingly, the performance is very high, almost twice higher than the baseline in message classification. For category ``Business'' and ``Charity'', the classifier distinguished all instances successfully. In fact, in our experiments, the classifier trained on topic features obtained by almost all models performs much worse than the baseline where only {\tt BAT} is slightly better. We only report the best performing results in Table \ref{table:user_classification}. We notice that not only the overall accuracy is not as good as {\tt TF-IDF} features but using topic features also results in several zero accuracy in different categories. One reason is again the content in those categories is limited. An interesting point is that if we combine {\tt TF-IDF} features with topic features, the overall performance is still around 90\% (in fact, only with marginal improvement). Remember, for user profiles, we crawled the latest 150 updates for each user, if available. Therefore, for most users, the profile already contain enough information to learn. This situation is significantly different from message classification where we have the problem of sparsity.

Compared to the results on message classification where topic features play an important role to improve the performance and user classification where topic features fail to outperform the baseline, we believe that topic models can help us model short text while for longer content, more sophisticated models might be required to improve performance (e.g., Supervised LDA \cite{Blei2007}, Label LDA \cite{Ramage2009}).
\begin{table}
\centering
\tbl{The performance on user classification\label{table:user_classification}}{
\begin{tabular}{|c||c|c|c|c|c|c|c|c|} \hline
Category & {\tt TF-IDF} & {\tt USER} ({\tt AT}) & {\tt MSG} & {\tt TERM} & {\tt CL} & {\tt AT-CL} & {\tt BAT} \\ \hline \hline
0	&0.5000	&0.0000	&0.6667	&0.0000	&0.6667	&0.6667	&0.6667 \\ \hline
1	&0.6667	&0.0000	&0.6667	&0.0000	&0.3333	&0.3333	&0.6667 \\ \hline
2	&1.0000	&0.0000	&0.5000	&0.0000	&0.6250	&0.6250	&0.6250\\ \hline
3	&1.0000	&0.5333	&0.5333	&0.3333	&0.6667	&0.6667	&0.8667\\ \hline
4	&0.9756	&0.5610	&0.9524	&0.2381	&0.9524	&0.9524	&0.9762\\ \hline
5	&0.5000	&0.0000	&1.0000	&0.0000	&0.2500	&0.5000	&0.7500\\ \hline
6	&0.4000	&0.0000	&0.6000	&0.0000	&0.6000	&0.8000	&0.8000\\ \hline
7	&0.7895	&0.1053	&0.7895	&0.3158	&0.5263	&0.5263	&0.8421\\ \hline
8	&0.8261	&0.0000	&0.8696	&0.4348	&0.8261	&0.8696	&0.9130\\ \hline
9	&0.8750	&0.5000	&0.5556	&0.5556	&0.7778	&0.6667	&0.7778\\ \hline
10	&0.9767	&0.6279	&0.6977	&0.5349	&0.7674	&0.9070	&0.8140\\ \hline
11	&0.8750	&0.0000	&0.6250	&0.0000	&0.9375	&0.8750	&0.9375\\ \hline
12	&1.0000	&0.7600	&0.7407	&0.7037	&0.8148	&0.9259	&0.9259\\ \hline
13	&0.5000	&0.0000	&0.5000	&0.2500	&0.7500	&0.7500	&0.7500\\ \hline
14	&0.9474	&0.7895	&0.8974	&0.8462	&0.9231	&0.8974	&0.8718\\ \hline
15	&0.8636	&0.3182	&0.8636	&0.9091	&0.9091	&0.9545	&0.9545\\ \hline
Average & 0.7935 & 0.2622 & 0.7161 & 0.3201 & 0.7079 & 0.7448 & 0.8211 \\ \hline
\end{tabular}}
\end{table}

%==============================================================
\section{Discussion \& Summary}\label{sec:conclusion}
%==============================================================
We discussed a number of training schemes and extensions to standard topic models to address the issues of short text modeling especially in microblogging environments in this paper, shedding some lights on how research on topic models can be conducted for short text scenarios. More specifically, through our experiments, we demonstrate that the effectiveness of trained topic models can be highly influenced by the length of the ``documents'', namely a better model can be trained by aggregating short messages and relax the basic assumptions of topic models. In addition, it is beneficial to incorporate a background model as {\tt BAT} does. Our empirical study presented that topic modeling approach can be very useful for short text either as solely used features or as complement features for multiple real-world tasks. We also showed that when content information is already large enough (e.g., in user classification), topic models become less effective compared to simple {\tt TF-IDF} scores. Moreover, through the experiments, we showed that the simple extension does yield better modeling for messages and users and indeed it is better than {\tt LDA} and {\tt AT} with simple training schemes.

In this paper, we conducted extensive qualitative and quantitative experiments on three proposed schemes based on standard {\tt LDA} and three extended models. We compared a number of aspects about these schemes and models including how the topics learned by these models differ from each other and their quality. In addition, we showed how topic models can help other applications, such as classification problems. In the experiments, we demonstrated that topic models learned from aggregated messages by the same user may lead to better performance in classification problems and some simple extensions to standard models can improve performance in general.

\bibliographystyle{acmsmall}
\bibliography{source}
\begin{comment}
%==============================================================
\section{Appendix A: Variational Inference for {\tt AT}}\label{sec:vf_at}
%==============================================================
!!! The derivation here is for the original AT model

Here, we derive detailed equations of variational inference for {\tt AT} model. First, the joint distribution is:
\begin{equation}
P(\mathbf{w} | \BoldGreek{\Theta}, \BoldGreek{\Phi}, \mathcal{A}) = \prod_{d=1}^{D} \prod_{i=1}^{N_{d}} \frac{1}{\mathbf{A}_{d}} \sum_{a \in \mathbf{a}_{d}} \sum_{k=1}^{K} \phi_{k,v_{w_{i}}} \theta_{a,k}
\end{equation}We introduce the following fully factorized variational distribution over all parameters:
\begin{equation}
q(\mathbf{x},\mathbf{z},\BoldGreek{\Theta}, \BoldGreek{\Phi}) = \prod_{k=1}^{K} q(\BoldGreek{\phi}_{k} \,|\, \BoldGreek{\lambda}_{k}) \prod_{a=1}^{A} q(\BoldGreek{\theta}_{a}\,|\,\BoldGreek{\eta}_{a})\prod_{d=1}^{D}\prod_{i=1}^{N_{d}} q(x_{d,i} \, | \,  \xi_{d,i}) q(z_{d,i} \, | \, \rho_{d,i})
\end{equation}where $\BoldGreek{\phi}_{k}$ is a Dirichlet distribution, $\BoldGreek{\theta}_{a}$ is a Dirichlet distribution, $\xi_{d,i}$ is a uniform distribution and $\rho_{d,i}$ is a multinomial distribution. The lower bound we optimize is:
\begin{equation}
\log P(\mathbf{w} \, | \, \BoldGreek{\alpha}, \BoldGreek{\beta}, \mathcal{A}) \geq \mathbb{E}_{q}[\log P(\mathbf{w},\mathbf{z},\mathbf{x},\BoldGreek{\Theta},\BoldGreek{\Phi} \, | \, \BoldGreek{\alpha} , \BoldGreek{\beta}, \mathcal{A})] - \mathbb{E}_{q} [\log q(\mathbf{x},\mathbf{z},\BoldGreek{\Theta}, \BoldGreek{\Phi} \, | \, \BoldGreek{\xi}, \BoldGreek{\rho} , \BoldGreek{\eta}, \BoldGreek{\lambda})]
\end{equation}We expand the lower bound as follows:
{\small{\begin{eqnarray}
&& l =\sum_{d=1}^{D} \sum_{i=1}^{N_{d}} \Biggr\{ \mathbb{E}_{q}[\log P(x_{i} | \mathbf{A}_{d})] +  \mathbb{E}_{q}[\log P(z_{i} | x_{i}, \BoldGreek{\Theta})] + \mathbb{E}_{q}[\log P(w_{i} | z_{i}, \BoldGreek{\Phi})] \Biggl\} + \sum_{k=1}^{K} \mathbb{E}_{q}[\log P(\phi_{k} | \BoldGreek{\beta})] \nonumber \\
&& + \sum_{a=1}^{A} \mathbb{E}_{q}[\log P(\theta_{a} | \BoldGreek{\alpha})] - \sum_{d=1}^{D} \sum_{i=1}^{N_{d}} \mathbb{E}_{q}[\log q(z_{i},x_{i})] - \sum_{k=1}^{K} \mathbb{E}_{q}[\log q(\BoldGreek{\phi}_{k})] - \sum_{a=1}^{A} \mathbb{E}_{q}[\log q(\BoldGreek{\theta}_{a})] \nonumber \\
&& = \log \frac{1}{A_{d}} \sum_{d=1}^{D} \sum_{i=1}^{N_{d}} \sum_{a=1}^{A_{d}} \xi_{i,a} + \sum_{d=1}^{D} \sum_{i=1}^{N_{d}} \sum_{a=1}^{A_{d}} \sum_{k=1}^{K} \xi_{i,a} \rho_{i,k} \Bigr[ \Psi(\eta_{a,k}) - \Psi(\sum_{j=1}^{K} \eta_{a,j}) \Bigl] \nonumber \\
&& + \sum_{d=1}^{D} \sum_{i=1}^{N_{d}} \sum_{k=1}^{K} \sum_{v=1}^{V} w_{i,v} \rho_{i,k} \Bigr[ \Psi(\lambda_{k,v}) - \Psi(\sum_{j=1}^{V} \lambda_{k,j}) \Bigl] +
\frac{1}{C(\BoldGreek{\beta})}\sum_{k=1}^{K} \sum_{v=1}^{V} (\beta_{v}-1) \Bigr[ \Psi(\lambda_{k,v}) - \Psi(\sum_{j=1}^{V} \lambda_{k,j}) \Bigl] \nonumber \\
&& + \frac{1}{C(\BoldGreek{\alpha})}\sum_{a=1}^{A}\sum_{k=1}^{K}(\alpha_{k}-1) \Bigr[ \Psi(\eta_{a,k}) - \Psi(\sum_{j=1}^{K} \eta_{a,j}) \Bigl]  - \sum_{d=1}^{D} \sum_{i=1}^{N_{d}} \sum_{a=1}^{A_{d}} \xi_{i,a} \log \xi_{i,a} - \sum_{d=1}^{D} \sum_{i=1}^{N_{d}} \sum_{k=1}^{K} \rho_{i,k} \log \rho_{i,k} \nonumber \\
&& - \sum_{k=1}^{K} \Bigr[ \sum_{v=1}^{V} (\lambda_{k,v}-1) (\Psi(\lambda_{k,v}) - \Psi(\sum_{j=1}^{V} \lambda_{k,j}) ) - \sum_{v=1}^{V} \log \Gamma(\lambda_{k,v}) + \log \Gamma(\sum_{v=1}^{V} \lambda_{k,j}) \Bigl] \nonumber \\
&& - \sum_{a=1}^{A} \Bigr[ \sum_{k=1}^{K} (\eta_{a,k}-1) (\Psi(\eta_{a,k}) - \Psi(\sum_{j=1}^{K} \eta_{a,j}) ) - \sum_{k=1}^{K} \log \Gamma(\eta_{a,k}) + \log \Gamma(\sum_{k=1}^{K} \eta_{a,j})\Bigl]
\end{eqnarray}}}Taking the derivatives with respect to variational parameters and setting them to zero, we can solve the updating equations as follows:
\begin{eqnarray}
\xi_{i,a} & \propto & \exp \Bigr[ \sum_{k=1}^{K} \rho_{i,k}( \Psi(\eta_{a,k}) - \Psi(\sum_{j=1}^{K} \eta_{a,j})) \Bigl] \nonumber \\
\rho_{i,k} & \propto & \exp \Bigr[ \sum_{a=1}^{A_{d}} \xi_{i,a} (\Psi(\eta_{a,k}) - \Psi(\sum_{j=1}^{K} \eta_{a,j})) \Bigl] \exp \Bigr[ \Psi(\lambda_{k,w_{i}})-\Psi(\sum_{j=1}^{V} \lambda_{k,j})) \Bigl] \nonumber \\
\eta_{a,k} & = & \alpha_{k} + \sum_{d=1}^{D} \sum_{i=1}^{N_{d}} \xi_{i,a} \rho_{i,k} \nonumber \\
\lambda_{k,v} & = & \beta_{v} + \sum_{d=1}^{D} \sum_{i=1}^{N_{d}} \mathbb{I}(w_{i} = v) \rho_{i,k} \nonumber
\end{eqnarray}

Since the author of all word positions is observed in Twitter, we only have three sets of parameters in the model: $\mathbf{z}$, $\BoldGreek{\Theta}$ and $\BoldGreek{\Phi}$. We introduce the following fully factorized variational distribution over all parameters:
\begin{equation}
q(\mathbf{z},\BoldGreek{\Theta}, \BoldGreek{\Phi}) = \prod_{k=1}^{K} q(\BoldGreek{\phi}_{k} \,|\, \BoldGreek{\lambda}_{k}) \prod_{a=1}^{A} q(\BoldGreek{\theta}_{a}\,|\,\BoldGreek{\eta}_{a})\prod_{d=1}^{D}\prod_{i=1}^{N_{d}} q(z_{d,i} \, | \, \rho_{d,i})
\end{equation}where $\BoldGreek{\phi}_{k}$ is a Dirichlet distribution, $\BoldGreek{\theta}_{a}$ is a Dirichlet distribution, and $\rho_{d,i}$ is a multinomial distribution. The lower bound we optimize is:
\begin{equation}
\log P(\mathbf{w} \, | \, \BoldGreek{\alpha}, \BoldGreek{\beta}, \mathcal{A}) \geq \mathbb{E}_{q}[\log P(\mathbf{w},\mathbf{z}, \BoldGreek{\Theta},\BoldGreek{\Phi} \, | \, \BoldGreek{\alpha} , \BoldGreek{\beta}, \mathcal{A})] - \mathbb{E}_{q} [\log q(\mathbf{z},\BoldGreek{\Theta}, \BoldGreek{\Phi} \, | \, \BoldGreek{\rho} , \BoldGreek{\eta}, \BoldGreek{\lambda})]
\end{equation}We expand the lower bound (the right hand side) as follows:
{\small{\begin{eqnarray}
&& l =\sum_{d=1}^{D} \sum_{i=1}^{N_{d}} \mathbb{E}_{q}[\log P(w_{z}, z_{i} | a_{d}, \BoldGreek{\Theta}, \BoldGreek{\Phi})] + \sum_{k=1}^{K} \mathbb{E}_{q}[\log P(\phi_{k} | \BoldGreek{\beta})] + \sum_{a=1}^{A} \mathbb{E}_{q}[\log P(\theta_{a} | \BoldGreek{\alpha})] \nonumber \\
&& - \sum_{d=1}^{D} \sum_{i=1}^{N_{d}} \mathbb{E}_{q}[\log q(z_{i})] - \sum_{k=1}^{K} \mathbb{E}_{q}[\log q(\BoldGreek{\phi}_{k})] - \sum_{a=1}^{A} \mathbb{E}_{q}[\log q(\BoldGreek{\theta}_{a})] \nonumber \\
&& = \sum_{d=1}^{D} \sum_{i=1}^{N_{d}} \sum_{k=1}^{K} \rho_{i,k} \Bigr[ \Psi(\eta_{a_{d},k}) - \Psi(\sum_{j=1}^{K} \eta_{a_{d},j}) \Bigl]  + \sum_{d=1}^{D} \sum_{i=1}^{N_{d}} \sum_{k=1}^{K} \sum_{v=1}^{V} w_{i,v} \rho_{i,k} \Bigr[ \Psi(\lambda_{k,v}) - \Psi(\sum_{j=1}^{V} \lambda_{k,j}) \Bigl] \nonumber \\
&& + \frac{1}{C(\BoldGreek{\beta})}\sum_{k=1}^{K} \sum_{v=1}^{V} (\beta_{v}-1) \Bigr[ \Psi(\lambda_{k,v}) - \Psi(\sum_{j=1}^{V} \lambda_{k,j}) \Bigl] + \frac{1}{C(\BoldGreek{\alpha})}\sum_{a=1}^{A}\sum_{k=1}^{K}(\alpha_{k}-1) \Bigr[ \Psi(\eta_{a,k}) - \Psi(\sum_{j=1}^{K} \eta_{a,j}) \Bigl]  \nonumber \\
&& - \sum_{d=1}^{D} \sum_{i=1}^{N_{d}} \sum_{k=1}^{K} \rho_{i,k} \log \rho_{i,k} - \sum_{k=1}^{K} \Bigr[ \sum_{v=1}^{V} (\lambda_{k,v}-1) (\Psi(\lambda_{k,v}) - \Psi(\sum_{j=1}^{V} \lambda_{k,j}) ) - \sum_{v=1}^{V} \log \Gamma(\lambda_{k,v}) \nonumber \\
&& + \log \Gamma(\sum_{v=1}^{V} \lambda_{k,j}) \Bigl] - \sum_{a=1}^{A} \Bigr[ \sum_{k=1}^{K} (\eta_{a,k}-1) (\Psi(\eta_{a,k}) - \Psi(\sum_{j=1}^{K} \eta_{a,j}) ) - \sum_{k=1}^{K} \log \Gamma(\eta_{a,k}) + \log \Gamma(\sum_{k=1}^{K} \eta_{a,j})\Bigl] \nonumber
\end{eqnarray}}}Taking the derivatives with respect to variational parameters and setting them to zero, we can solve the updating equations as follows:
\begin{eqnarray}
\rho_{i,k} & \propto & \exp \Bigr[ (\Psi(\eta_{a,k}) - \Psi(\sum_{j=1}^{K} \eta_{a,j})) \Bigl] \exp \Bigr[ \Psi(\lambda_{k,w_{i}})-\Psi(\sum_{j=1}^{V} \lambda_{k,j})) \Bigl] \nonumber \\
\eta_{a,k} & = & \alpha_{k} + \sum_{d=1}^{D} \mathbb{I}(a_{d} = a) \sum_{i=1}^{N_{d}} \rho_{i,k} \nonumber \\
\lambda_{k,v} & = & \beta_{v} + \sum_{d=1}^{D} \sum_{i=1}^{N_{d}} \mathbb{I}(w_{i} = v) \rho_{i,k} \nonumber
\end{eqnarray}
%==============================================================
\section{Appendix B: Variational Inference for {\tt bAT}}\label{sec:vf_bat}
%==============================================================
We introduce the following fully factorized variational distribution over all parameters:
\begin{eqnarray}
q(\mathbf{z},\mathbf{x}, \BoldGreek{\Theta}, \BoldGreek{\Phi}, \BoldGreek{\phi}^{B}, \BoldGreek{\pi}) = q(\BoldGreek{\pi} \, | \, \BoldGreek{\gamma}) q(\BoldGreek{\phi}^{B} \, | \, \BoldGreek{\lambda}^{B}) \prod_{k=1}^{K} q(\BoldGreek{\phi}_{k} \,|\, \BoldGreek{\lambda}_{k}) \prod_{a=1}^{A} q(\BoldGreek{\theta}_{a}\,|\,\BoldGreek{\eta}_{a})\prod_{d=1}^{D} q(x_{d} \, | \, \xi_{d}) q(z_{d} \, | \, \rho_{d}) \nonumber
\end{eqnarray}where $\BoldGreek{\phi}_{k}$ is a Dirichlet distribution, $\BoldGreek{\theta}_{a}$ is a Dirichlet distribution, and $\rho_{d,i}$ is a multinomial distribution. The lower bound we optimize is:
\begin{eqnarray}
\log P(\mathbf{w} \, | \, \BoldGreek{\alpha}, \BoldGreek{\beta}, \mathcal{A}) \geq \mathbb{E}_{q}[\log P(\mathbf{w},\mathbf{z}, \mathbf{x}, \BoldGreek{\Theta},\BoldGreek{\Phi}, \BoldGreek{\phi}^{B}, \BoldGreek{\pi} \, | \, \BoldGreek{\alpha} , \BoldGreek{\beta}, \BoldGreek{\zeta}, \mathcal{A})] - \mathbb{E}_{q} [\log q(\mathbf{z},\mathbf{x}, \BoldGreek{\Theta}, \BoldGreek{\Phi}, \BoldGreek{\phi}^{B}, \BoldGreek{\pi})] \nonumber
\end{eqnarray}We expand the lower bound as follows:
{\small{\begin{eqnarray}
l &=& \sum_{d=1}^{D} \mathbb{E}_{q} [\log P(x_{d} \, | \,  \BoldGreek{\pi})] + \sum_{d=1}^{D} \mathbb{E}_{q}[\log P(z_{d} \, | \, a_{d},\BoldGreek{\Theta})] + \sum_{d=1}^{D} \sum_{i=1}^{N_{d}} \mathbb{E}_{q}[\log P(w_{i} \, | \, x_{d}, z_{d}, \BoldGreek{\Phi},\BoldGreek{\phi}^{B})] \nonumber \\
&+& \sum_{k=1}^{K} \mathbb{E}_{q}[\log P(\BoldGreek{\phi}_{k} \, | \, \BoldGreek{\beta})] + \sum_{a=1}^{A} \mathbb{E}_{q}[\log P(\BoldGreek{\theta}_{a} \, | \, \BoldGreek{\alpha})] + \mathbb{E}_{q} [\log P(\BoldGreek{\phi}^{B} \, | \, \BoldGreek{\beta})] + \mathbb{E}_{q} [ \log P(\BoldGreek{\pi} \, | \, \BoldGreek{\zeta})] \nonumber \\
&-&\sum_{d=1}^{D} \mathbb{E}_{q} [\log q(x_{d})] - \sum_{d=1}^{D} \mathbb{E}_{q} [\log q(z_{d})]  - \sum_{k=1}^{K} \mathbb{E}_{q}[\log q(\BoldGreek{\phi}_{k})] - \sum_{a=1}^{A} \mathbb{E}_{q}[\log q(\BoldGreek{\theta}_{a})] \nonumber \\
&-& \mathbb{E}_{q} [\log q(\BoldGreek{\phi}^{B})] - \mathbb{E}_{q} [ \log q(\BoldGreek{\pi})] \nonumber \\
&=& \sum_{d=1}^{D} \sum_{s=0}^{1} \xi_{d,s} \Bigr[ \Psi(\gamma_{s}) - \Psi(\gamma_{0} + \gamma_{1}) \Bigl] + \sum_{d=1}^{D} \sum_{k=1}^{K} \rho_{d,k} \Bigr[ \Psi(\eta_{a_{d},k}) - \Psi(\sum_{j=1}^{K} \eta_{a_{d},j})\Bigl] \nonumber \\
&+& \sum_{d=1}^{D} \sum_{i=1}^{N_{d}} \sum_{v=1}^{V} \xi_{d,0} w_{i,v} \Bigr[ \Psi(\lambda_{v}^{B}) - \Psi(\sum_{j=1}^{V} \lambda_{j}^{B})\Bigl] + \sum_{d=1}^{D} \sum_{i=1}^{N_{d}} \sum_{k=1}^{K} \sum_{v=1}^{V} \xi_{d,1} \rho_{d,k} w_{i,v} \Bigr[ \Psi(\lambda_{k,v}) - \Psi(\sum_{j=1}^{V} \lambda_{k,j})\Bigl] \nonumber \\
&+& \frac{1}{C(\BoldGreek{\beta})}\sum_{k=1}^{K} \sum_{v=1}^{V} (\beta_{v}-1) \Bigr[ \Psi(\lambda_{k,v}) - \Psi(\sum_{j=1}^{V} \lambda_{k,j}) \Bigl] + \frac{1}{C(\BoldGreek{\alpha})}\sum_{a=1}^{A}\sum_{k=1}^{K}(\alpha_{k}-1) \Bigr[ \Psi(\eta_{a,k}) - \Psi(\sum_{j=1}^{K} \eta_{a,j}) \Bigl] \nonumber \\
&+& \frac{1}{C(\BoldGreek{\beta})} \sum_{v=1}^{V} (\beta_{v}-1) \Bigr[ \Psi(\lambda_{v}^{B}) - \Psi(\sum_{j=1}^{V} \lambda_{j}^{B}) \Bigl] + \frac{1}{C(\BoldGreek{\zeta})} \sum_{s=0}^{1} (\zeta_{s}-1) \Bigr[ \Psi(\gamma_{s}) - \Psi(\gamma_{0}+\gamma_{1}) \Bigl] \nonumber \\
&-& \sum_{d=1}^{D} \sum_{s=0}^{1} \xi_{d,s} \log \xi_{d,s} - \sum_{d=1}^{D} \sum_{k=1}^{K} \rho_{d,k} \log \rho_{i,k} - \sum_{k=1}^{K} \Bigr[ \sum_{v=1}^{V} (\lambda_{k,v}-1) (\Psi(\lambda_{k,v}) - \Psi(\sum_{j=1}^{V} \lambda_{k,j}) ) \nonumber \\
&-&  \sum_{v=1}^{V} \log \Gamma(\lambda_{k,v}) + \log \Gamma(\sum_{v=1}^{V} \lambda_{k,j}) \Bigl] - \sum_{a=1}^{A} \Bigr[ \sum_{k=1}^{K} (\eta_{a,k}-1) (\Psi(\eta_{a,k}) - \Psi(\sum_{j=1}^{K} \eta_{a,j}) ) - \sum_{k=1}^{K} \log \Gamma(\eta_{a,k}) \nonumber \\
&+& \log \Gamma(\sum_{k=1}^{K} \eta_{a,j})\Bigl] - \Bigr[ \sum_{v=1}^{V} (\lambda_{v}^{B}-1) (\Psi(\lambda_{v}^{B}) - \Psi(\sum_{j=1}^{V} \lambda_{j}^{B}) )-\sum_{v=1}^{V} \log \Gamma(\lambda_{v}^{B}) + \log \Gamma(\sum_{v=1}^{V} \lambda_{j}^{B}) \Bigl] \nonumber \\
&-& \Bigr[ \sum_{s=0}^{1} (\gamma_{s}-1) (\Psi(\gamma_{s}) - \Psi(\gamma_{0}+\gamma_{1}))-\sum_{s=0}^{1} \log \Gamma(\gamma_{s}) + \log \Gamma(\gamma_{0}+\gamma_{1}) \Bigl]
\end{eqnarray}}}Taking the derivatives with respect to variational parameters and setting them to zero, we can solve the updating equations as follows:
\begin{eqnarray}
\xi_{d,0} & \propto & \exp \Bigr[\Psi(\gamma_{0}) - \Psi(\gamma_{0}+\gamma_{1})\Bigl] \exp \Bigr[ \sum_{i=1}^{N_{d}} \sum_{v=1}^{V} w_{i,v} (\Psi(\lambda_{v}^{B})-\Psi(\sum_{j=1}^{V} \lambda_{j}^{B})) \Bigl] \nonumber \\
\xi_{d,1} & \propto & \exp \Bigr[\Psi(\gamma_{1}) - \Psi(\gamma_{0}+\gamma_{1})\Bigl] \exp \Bigr[ \sum_{i=1}^{N_{d}} \sum_{k=1}^{K} \sum_{v=1}^{V} \rho_{d,k} w_{i,v} (\Psi(\lambda_{k,v})-\Psi(\sum_{j=1}^{V} \lambda_{k,j})) \Bigl] \nonumber \\
\rho_{d,k} & \propto & \Bigr[ \Psi(\eta_{a_{d},k})-\Psi(\sum_{j=1}^{K} \eta_{a_{d},j}) \Bigl] \exp \Bigr[ \sum_{i=1}^{N_{d}} \sum_{v=1}^{V} \xi_{d,1} w_{i,v} (\Psi(\lambda_{k,v})-\Psi(\sum_{j=1}^{V} \lambda_{k,j})) \Bigl] \nonumber \\
\eta_{a,k} & = & \alpha_{k} + \sum_{d=1}^{D} \mathbb{I}(a_{d} = a) \sum_{i=1}^{N_{d}} \rho_{i,k} \nonumber \\
\lambda_{k,v} & = & \beta_{v} + \sum_{d=1}^{D} \sum_{i=1}^{N_{d}} \xi_{d,1} \rho_{i,k}\mathbb{I}(w_{i} = v)  \nonumber \\
\lambda_{v}^{B} &=& \beta_{v} + \sum_{d=1}^{D} \sum_{i=1}^{N_{d}} \xi_{d,0} \mathbb{I}(w_{i} = v)  \nonumber \\
\pi_{s} &=& \gamma_{s} + \sum_{d=1}^{D} \xi_{d,s} \nonumber
\end{eqnarray}
\end{comment}
\end{document}

