%!TEX root=main.tex


\section{Problem Definition}\label{sec:ProblemDefinition}

\noindent{\bf Data Model.} Let $G=(V,E)$ be an undirected social graph, where $V$ is the set of vertices representing users in the network and $E$ is the set of edges in $G$ denoting social links. For each vertex $v$, $R_v$ is a set of records attached to $v$ (e.g., microblogs published by the user). A record $r \in R_v$ is formulated as $r=\langle r.v,r.W,r.t \rangle$ where:
\begin{itemize}
\item $r.v$ shows which vertex this record belongs to.
\item $r.W$ is a set of tokens contained in $r$.
\item $r.t$ denotes the time when the record is published by $r.v$.
\end{itemize}

%\footnote{We use node and user interchangeably throughout the paper.}

%Many ways have been examined to quantify the social distance and one widely adopted approach is the \textit{Jaccard Distance} \cite{SocialAnnotations:Li:WWW:2007,SocialDiscovery:Li:WWW:2008,InformationForaging:Pirolli:COGS:2005,Folksonomies:Specia:ESWC:2007,SocialAnalytics:Aggarwal:2011}.

Each edge is associated with a weight, which quantifies
the social distance between the two vertices.
In this paper, we adopt the \textit{Jaccard Distance}
which has been widely used \cite{SocialAnnotations:Li:WWW:2007,SocialDiscovery:Li:WWW:2008,InformationForaging:Pirolli:COGS:2005,Folksonomies:Specia:ESWC:2007,SocialAnalytics:Aggarwal:2011}; however,
our method can be easily extended to other functions. The weight between user $v$ and its  neighbor $v'$, denoted as $S(v,v')$, is the \textit{Jaccard Distance} between their neighbor sets $N(v)$ and $N(v')$ where $N(x)=\{n|(x,n)\in E\}$.
\vspace{-2mm}
\begin{equation}\label{eq:neighbor_weight}
S(v,v') = 1 - \frac{|N(v) \cap N(v')|}{| N(v)\cup N(v')|} \; s.t. (v,v') \in E
\end{equation}
\vspace{-2mm}



\noindent\begin{figure}[t]
\vspace{-6mm}
\begin{minipage}[c]{0.23\textwidth}
\includegraphics[width=\textwidth]{pics/social_graph}
\end{minipage}
\begin{minipage}[c]{0.27\textwidth}
    \scalebox{0.75}{
    \begin{tabular}{|l|c|l|}
    \hline
    rid:user & \bf{TS} & \multicolumn{1}{|c|}{Keywords} \\ \hline
    $r0$:$u_{10}$ & 0.1 & (\quotes{icde},0.8),(\quotes{nus},0.1) \\ \hline
    $r1$:$u_1$ & 0.1 & (\quotes{icde},0.9)\\ \hline
    $r2$:$u_7$ & 0.1 & (\quotes{icde},0.1),(\quotes{nus},0.5) \\ \hline
    $r3$:$u_2$ & 0.2 & (\quotes{icde},0.6),(\quotes{nus},0.2) \\ \hline
    $r4$:$u_3$ & 0.3 & (\quotes{icde},0.7),(\quotes{nus},0.2) \\ \hline
    $r5$:$u_5$ & 0.4 & (\quotes{icde},0.4)\\ \hline
    $r6$:$u_4$ & 0.6 & (\quotes{icde},0.8)\\ \hline
    $r7$:$u_7$ & 0.7 & (\quotes{icde},0.8), (\quotes{nus},0.1) \\ \hline
    $r8$:$u_2$ & 0.7 & (\quotes{icde},0.7)\\ \hline
    $r9$:$u_5$ & 0.8 & (\quotes{icde},0.2),(\quotes{nus},0.2) \\ \hline
    $r10$:$u_9$ & 0.9 & (\quotes{icde},0.1),(\quotes{nus},0.4) \\ \hline
    $r11$:$u_{11}$ & 1.0 & (\quotes{icde},0.7)\\ \hline
    \end{tabular}
    }
\end{minipage}
\caption{Social network example including records posted where the record is ordered by time from old to recent.}
\label{fig:social_graph}
\end{figure}
\vspace{-2mm}


%The only assumption we made is the similarity distance function is a proper metric %which satisfies the triangle inequality, because such constraint is reasonable in %nature and adopted widely in this area. \rtext{Add existing work which incorporated %triangle inequality.} The triangle inequality in the social network can be %represented as:
%\begin{equation}
%S(v_a,v_c) \leq \Phi (S(v_a,v_b) , S(v_b,v_c)) \;\; \forall v_a,v_b,v_c \in V
%\end{equation}
%$\Phi$ is the aggregation function and in our case is the sum operator.

\noindent{\bf Query Model.}  A top-$k$ query $q$ on the social graph $G$ is represented as a vector $q=\langle q.v,q.W,q.t,q.k \rangle$ where:
 \begin{itemize}
 \item $q.v$ is the query user.
 \item $q.W$ is the set of query keywords.
 \item $q.t$ is the time when the query is submitted.
 \item $q.k$ is the number of desired output records.
 \end{itemize}

\noindent{\bf Ranking Model.} Given a query $q$, our objective is to find a set of $q.k$ records with the highest relevance. To quantify the relevance between each record and the query, we should consider the following aspects.


\noindent (1) {\it Social Relevance}: The social distance for two vertices $v\leftrightarrow\hspace{-1mm}v'$ is adopted as the shortest distance \cite{Qiao:2013:TNK:2536206.2536217,Bahmani:2012:PMB:2187836.2187891,Singla:2008:YCS:1367497.1367586,PersonSearch:Sihem:2008:VLDB,PersonSearch:Schenkel:2008:SIGIR}.

\vspace{-2mm}
\begin{equation}
S(v,v') = \min_{\text{path}~ v=v_0...v_k=v'} \sum_{i=0..k-1}S(v_i,v_{i+1})
\end{equation}
\vspace{-2mm}

\noindent The social relevance is computed as $\mathbf{SR}(v,v')$$=$$\max(0, 1-\frac{S(v,v')}{maxD})$ where $maxD$ is the
user-tolerable maximal distance.

\Comment{
By default, we  \reminder{LGL: I prefer $\mathbf{SD}=\max(0, 1-\frac{S(v,v')}{maxDist})$ where $maxDist$ is the user-tolerable maximal distance. By selecting a set of pivot vertices: $V_p$, we use $2\min_{v \in V_p}{\max_{v' \in V}S(v,v')}$ as an approximated normalizer as computing all-pairs distances is infeasible for large networks.}
}

\noindent (2) {\it Textual Relevance}: We adopt the well-known tf-idf based approach~\cite{TextSimilarity:Zobel}. Let $tf_{w,r}$ denote the frequency of keyword $w$ in $r$ whereas $idf_w$ is the inverse frequency of $w$ in the entire document collection. We represent textual relevance as a cosine similarity between $q.W$ and $r.W$:

\vspace{-2mm}
\begin{equation}
\mathbf{TS}(q.W,r.W) = \sum_{w \in q.W} tf_{w,r} \cdot idf_w
\label{eq:TermFrequency}
\end{equation}
%\begin{align}
%\mathbf{TS}(q.W,r.W)
%&= \sum_{w \in q.W} tf_{w,r} \cdot idf_w \label{eq:TermFrequency} \\
%&=\sum_{w \in q.W}
%(\frac{z_{w,r}}{\sqrt{\sum_{w \in r.W} z_{w,r}^2}})
%\cdot
%(\frac{z_w}{\sqrt{\sum_{w \in q.W} z_w^2}}) \nonumber
%\end{align}
\vspace{-2mm}

\noindent Specifically, $tf_{w,r}=z_{w,r}/ \sqrt{\sum_{w \in r.W} z_{w,r}^2}$ where $z_{w,r}$ is the number of occurrences of $w$ in $r.W$; and $idf_w=z_w/ \sqrt{\sum_{w \in q.W} z_w^2}$, where $z_w = ln(1+|R|/df_w)$, $|R|$ is the total number of records posted and $df_w$ gives the number of records that contain $w$. 

\Comment{\reminder{you should first introduce tf and idf and then introduce other two notations.!!}}


%\begin{figure}[t]
%\includegraphics[width=0.25\textwidth]{pics/social_graph}
%\caption{Social Network Example}
%\label{fig:social_graph}
%\end{figure}
%\begin{table}[t]
%    \centering
%    \caption{Example of documents in the social network, the table is ordered by time from old to recent.}
%    \label{tab:RecordExample}
%    \begin{tabular}{|c|c|c|l|}
%    \hline
%    RID & UID & \bf{TS} & \multicolumn{1}{|c|}{Keywords} \\ \hline
%    $r0$ & $u_{10}$ & 0.1 & (\quotes{vldb},0.8),(\quotes{nus},0.1) \\ \hline
%    $r1$ & $u_1$ & 0.1 & (\quotes{vldb},0.9)\\ \hline
%    $r2$ & $u_7$ & 0.1 & (\quotes{vldb},0.1),(\quotes{nus},0.5) \\ \hline
%    $r3$ & $u_2$ & 0.2 & (\quotes{vldb},0.6),(\quotes{nus},0.2) \\ \hline
%    $r4$ & $u_3$ & 0.3 & (\quotes{vldb},0.7),(\quotes{nus},0.2) \\ \hline
%    $r5$ & $u_5$ & 0.4 & (\quotes{vldb},0.4)\\ \hline
%    $r6$ & $u_4$ & 0.6 & (\quotes{vldb},0.8)\\ \hline
%    $r7$ & $u_7$ & 0.7 & (\quotes{vldb},0.8), (\quotes{nus},0.1) \\ \hline
%    $r8$ & $u_2$ & 0.7 & (\quotes{vldb},0.7)\\ \hline
%    $r9$ & $u_5$ & 0.8 & (\quotes{vldb},0.2),(\quotes{nus},0.2) \\ \hline
%    $r10$ & $u_9$ & 0.9 & (\quotes{vldb},0.1),(\quotes{nus},0.4) \\ \hline
%    $r11$ & $u_{11}$ & 1.0 & (\quotes{vldb},0.7)\\ \hline
%    \end{tabular}
%\end{table}

\noindent (3) {\it Time Relevance}: The time freshness score $\mathbf{TF}$ is the normalized time difference between $q.t$ and $r.t$. In particular, let $t_{min}$ be the pre-defined oldest system time, then

\vspace{-2mm}
\begin{equation}
\mathbf{TF}(q.t,r.t) = \frac{q.t-t_{min}}{r.t-t_{min}}.
\end{equation}
\vspace{-2mm}

\noindent {\bf Overall Ranking Function}. Now, with the
social, textual and time relevances normalized to [0,1],
our overall ranking function is a linear combination of these three
components.
%which is a standard in keyword search related domain \cite{IRTree,NUS:TI}.


\vspace{-4mm}
\begin{equation}\label{eq:RankingFunction}
\Re(q,r)
\hspace{-1mm}=\hspace{-1mm}
\alpha \mathbf{TS}(q.W,r.W)
\hspace{-1mm}+\hspace{-1mm}
\beta \mathbf{SR}(q.v,r.v)
\hspace{-1mm}+\hspace{-1mm} \gamma  \mathbf{TF} (q.t,r.t)
\end{equation}
\vspace{-4mm}

\noindent where $\alpha,\beta,\gamma$ are user preference parameters for generic weighting functions, and $\alpha,\beta,\gamma \in [0,1]$. \reminder{it is better to give a hint about how to set the parameters!}

\begin{example}\label{exmp:rankmodel}
Fig. \ref{fig:social_graph} is an example social network where all documents posted are listed. For each record $r_i$, its user id(UID), time score (\textbf{TS}), keywords and their frequencies are included. Suppose $\alpha=\beta=\gamma=1$ and $u_1$ expects to get the top-1 record that contains \quotes{icde}.
By Equation \ref{eq:RankingFunction},
$r11$ is the desired result as $\Re(q_{u_1},r11) = 1.0+(1.0-0.4)+0.7=2.3$ has the maximum value among all records.
\end{example}

