% THIS IS SIGPROC-SP.TEX - VERSION 3.1
% WORKS WITH V3.2SP OF ACM_PROC_ARTICLE-SP.CLS
% APRIL 2009
%
% It is an example file showing how to use the 'acm_proc_article-sp.cls' V3.2SP
% LaTeX2e document class file for Conference Proceedings submissions.
% ----------------------------------------------------------------------------------------------------------------
% This .tex file (and associated .cls V3.2SP) *DOES NOT* produce:
%       1) The Permission Statement
%       2) The Conference (location) Info information
%       3) The Copyright Line with ACM data
%       4) Page numbering
% ---------------------------------------------------------------------------------------------------------------
% It is an example which *does* use the .bib file (from which the .bbl file
% is produced).
% REMEMBER HOWEVER: After having produced the .bbl file,
% and prior to final submission,
% you need to 'insert'  your .bbl file into your source .tex file so as to provide
% ONE 'self-contained' source file.
%
% Questions regarding SIGS should be sent to
% Adrienne Griscti ---> griscti@acm.org
%
% Questions/suggestions regarding the guidelines, .tex and .cls files, etc. to
% Gerald Murray ---> murray@hq.acm.org
%
% For tracking purposes - this is V3.1SP - APRIL 2009

\documentclass{acm_proc_article-sp}

\usepackage{times}
\usepackage{helvet}
\usepackage{courier}
\usepackage{amssymb,amsmath}
\usepackage{algorithmic}
\usepackage{algorithm}
\usepackage{subfigure}
\usepackage{graphicx}
%\usepackage[lofdepth,lotdepth]{subfig}


\DeclareGraphicsExtensions{.pdf,.png,.jpg,.mps}
\usepackage{mystyle}
\begin{document}

\title{Location-sensitive Resources Recommendation in Social Tagging Systems} 
%\subtitle{[Blind Review]
%\titlenote{A full version of this paper is available as
%\textit{Author's Guide to Preparing ACM SIG Proceedings Using
%\LaTeX$2_\epsilon$\ and BibTeX} at
%\texttt{www.acm.org/eaddress.htm}}}
%
% You need the command \numberofauthors to handle the 'placement
% and alignment' of the authors beneath the title.
%
% For aesthetic reasons, we recommend 'three authors at a time'
% i.e. three 'name/affiliation blocks' be placed beneath the title.
%
% NOTE: You are NOT restricted in how many 'rows' of
% "name/affiliations" may appear. We just ask that you restrict
% the number of 'columns' to three.
%
% Because of the available 'opening page real-estate'
% we ask you to refrain from putting more than six authors
% (two rows with three columns) beneath the article title.
% More than six makes the first-page appear very cluttered indeed.
%
% Use the \alignauthor commands to handle the names
% and affiliations for an 'aesthetic maximum' of six authors.
% Add names, affiliations, addresses for
% the seventh etc. author(s) as the argument for the
% \additionalauthors command.
% These 'additional authors' will be output/set for you
% without further effort on your part as the last section in
% the body of your article BEFORE References or any Appendices.

%\numberofauthors{0} %  in this sample file, there are a *total*
% of EIGHT authors. SIX appear on the 'first-page' (for formatting
% reasons) and the remaining two appear in the \additionalauthors section.
%
%\author{
% You can go ahead and credit any number of authors here,
% e.g. one 'row of three' or two rows (consisting of one row of three
% and a second row of one, two or three).
%
% The command \alignauthor (no curly braces needed) should
% precede each author name, affiliation/snail-mail address and
% e-mail address. Additionally, tag each line of
% affiliation/address with \affaddr, and tag the
% e-mail address with \email.
%
% 1st. author


%}
% There's nothing stopping you putting the seventh, eighth, etc.
% author on the opening page (as the 'third row') but we ask,
% for aesthetic reasons that you place these 'additional authors'
% in the \additional authors block, viz.

\date{30 July 1999}
% Just remember to make sure that the TOTAL number of authors
% is the number that will appear on the first page PLUS the
% number that will appear in the \additionalauthors section.

\maketitle
\begin{abstract}
Social tagging systems have attracted increasing popularity on the web in past years. Users could search resources effectively through tag-based keyword matching techniques. As more and more people intend to upload resources using mobile devices, location information associated with resources is available in many real systems such as Flickr, Picasa and so on, which is not used by most of the algorithms. However, this information is helpful to improve the searching results hopefully. In this paper, we consider the meaning change of words among different areas, intend to generate various resources lists for different regions and return the appropriate one according to where the user is searching. We propose to divide the world into several regions for a given query, which may refer to different concepts in different regions. After location partition is done, we show how to integrate location dimension into some existing algorithms to predict the quality of resources for a given query. By using the real-world datasets, we empirically show that our location-sensitive resources recommendation algorithms significantly outperform some state-of-art algorithms. 
\end{abstract}

% A category with the (minimum) three required fields
\category{H.3.3}{Information Storage and Retrieval}{Information Search and Retrieval}
%A category including the fourth, optional field follows...
%\category{D.2.8}{Software Engineering}{Metrics}[complexity measures, performance measures]

\terms{Algorithms, Experimentation, Performance}

\keywords{ranking, resources recommendation, location-sensitive} % NOT required for Proceedings
\begin{figure*}
\centering
\includegraphics[width=0.75\textwidth]{af1_crop} 
\caption{An Example of Tag Polysemy Induced by Location Factor}\label{afig:1}
\end{figure*}
\section{Introduction}
Social tagging systems have attracted increasing popularity on the web in recent years, such as YouTube, Flickr, Last.fm and so on. Unlike traditional information systems, a set of users can assign various tags to resources based on their interests. Resources are described, distinguished by different tags and searched through tags conveniently. The past several years have witnessed the rapid growth of social tagging systems in terms of both the numbers of users and resources. For example, YouTube  had more than 48.2 million users\footnote{http://www.numberof.net/number-of-youtube-users/} and about 797 million\footnote{http://www.numberof.net/number-of-videos-on-youtube/} videos had been posted in YouTube as of February 2010.
 
Collaborative filtering is the basic and popular technique for the  recommendation systems, which uses matrix factorization such as SVD to approximate and predict \cite{DBLP:journals/computer/KorenBV09}. Recent research interests cover the social recommendation \cite{DBLP:conf/sigir/MaKL09,DBLP:conf/recsys/JamaliE10}, which tend to employ probabilistic matrix factorization with gradient descent optimization to improve the prediction quality. Studies focusing on social tagging systems have been increasing rapidly since it becomes popular all over the world. Many efforts have been done on two key aspects to improve the users' experience: tag recommendation and appropriate resources searching techniques. The objective of tag recommendation is to suggest tags for users to annotate a given resource, e.g., videos and photos. Several studies  \cite{DBLP:conf/www/SigurbjornssonZ08,DBLP:conf/mm/ChenCCTHW08} have presented various approaches to recommend tags for some popular tagging systems, i.e., Flickr, Delicious, Last.fm and so on. Hotho et al. \cite{DBLP:conf/esws/HothoJSS06} also propose personalized tag recommendation which takes the user's tagging history into account to make the recommendation person-sensitive.

On the other side, several researches are interested in  searching relevant resources in social tagging systems. LSI is the basic and fundamental algorithm for recommendation and searching, which uses a mathematical technique called Singular value decomposition (SVD) to recognize patterns and relationships between terms. \comment{One of state-of-art techniques is FolkRank[], which seems to be a modified version of PageRank[]. FolkRank tries to evaluate the importance weight of every resource and selects resources with higher weights based on the assumption that  key taggers with important tags would make the annotated resources important.} One of state-of-art techniques is CubeLSI \cite{DBLP:conf/icde/BiLKC11} which is a third-order extension of LSI trying to incorporate the dimension of users by employing the extended version of SVD called Tensor decomposition. CubeLSI tends to address the problems of polysemy and synonymy induced by users.

Recently, some studies \cite{DBLP:conf/sigmod/LuLC11,DBLP:conf/sigmod/CaoCJO11} consider location is also a crucial factor which affects the searching results. \cite{DBLP:conf/sigmod/LuLC11,DBLP:conf/sigmod/CaoCJO11} tend to evaluate a resource based on the combination of its location proximity with textual similarity, which means they try to make the locations of the returned resource and the searcher as close as possible.
However, our work is significantly different from the previous ones as we consider that the impact of location is word-sensitive. Figure~\ref{afig:1} shows an example of searching "sand" in different places in the world. \comment{query "mouse" may refer to different meanings according to where the search is conducted. If the person searches around the zoo, it is more likely that the searcher expects to get resources about the real animal. However, the information about Mickey mouse may be a better choice for the searcher in Disneyland. The searcher may want to get resources about computer device or equipment mouse when searching in a computer shop. In this case, the word "mouse" is associated with different objects and concepts in different areas.}Query "sand" may refer to different meanings according to where the search is conducted. If the user searches in Africa, it is more likely that the searcher expects to get resources about the desert and dunes. However, the information about beach, ocean and sand may be a better choice for the searcher in Hawaii. The searcher may want to get resources about Marina Bay Sands which is one of the famous Singapore casino hotels when searching in Singapore. In this case, the word "sand" is associated with different objects and concepts in different areas. Another word "NBA" appears to be global, which means that people all over the world consider it as the preeminent basketball league in North America. As a result, we can evaluate text similarity of resources around the world regardless of location information in this case. From the above example, we find that it is crucial to generate the location partition such that the query refers to the same object within a region and implies different concepts among different regions.  \cite{DBLP:conf/sigmod/LuLC11,DBLP:conf/sigmod/CaoCJO11}  focus on efficiency (query time) and there is few analysis and little experimental results about the quality of the returned resources. \cite{DBLP:conf/sigmod/LuLC11,DBLP:conf/sigmod/CaoCJO11}  may not be able to address the problems of synonymy and polysemy in tags since they consider the text documents only. To our best knowledge, we do not find studies taking polysemy which is induced by location factor into account when searching resources in social tagging systems. Therefore, in this paper, we try to incorporate location information for the resources searching and address the problems introduced by involving location information. We aim to automatically divide the whole world into several regions for a given query such that the query within a region means more or less the same while it refers to different concepts among different regions.  After generating area division, we show how to extend some existing 3D (item, tag and user) models to 4D version including location dimension to realize the location-sensitive recommendation.

The rest of the paper is organized as follows. Section \ref{relatedwork} reviews what related previous works have been done on recommendation and tagging systems. Section \ref{prob} presents the formal definition of the problem we focus on. Section \ref{moti} gives a motivating example about why and how to utilize location information. Section \ref{sec:LDR} describes our approach of location partition and incorporating location dimension for existing models. Section \ref{exp} shows how our algorithm improves the searching results at different locations. Section \ref{conclusion} concludes our paper and suggests the promising future work.


\section{RELATED WORK}\label{relatedwork}
 
The recommendation systems have attracted quite a lot of attention, which mainly rely on collaborative filtering using the mathematical technique called matrix factorization (MF) \cite{DBLP:journals/computer/KorenBV09,DBLP:conf/nips/SalakhutdinovM07}. Given a user-item matrix, MF aims to find a low-ranked approximation to fit it by minimizing the loss function. With the approximation, the recommendation system can predict the ratings for missing entries in the original user-item matrix.

With the increasing popularity, there is much work on social tagging systems. One way to improve the systems is to employ tag recommendation when users want to annotate resources. Sigurbj{\"o}rnsson and Zwol \cite{DBLP:conf/www/SigurbjornssonZ08} collect the information of tag co-occurrence  to measure the similarities between tags and then use tag aggregation to make the similar tags in one set. Based on the tag aggregation the recommendation system is able to recommend tags with the given tags. 

Heymann, Ramage, and Garcia-Molina \cite{DBLP:conf/sigir/HeymannRG08} try to predict tags based on the page text, anchor text, surrounding hosts and other applied tags. Heymann, Ramage, and Garcia-Molina \cite{DBLP:conf/sigir/HeymannRG08} present an entropy-based metric to capture the generality of the tags and analyze the performance of the tag prediction. Heymann, Ramage, and Garcia-Molina \cite{DBLP:conf/sigir/HeymannRG08} also find that tag-based association rule can get high-precision predictions with reasonable understandings into the relationships between tags.

Personalized tag recommendation has been proposed to involve user information to make the recommendation more diverse according to different searchers. FolkRank \cite{DBLP:conf/esws/HothoJSS06}, as an adaption of PageRank \cite{DBLP:journals/cn/BrinP98}, proposes an undirected, weighted and tripartite graph to present resources, tags and users. FolkRank iteratively updates weights for vertices and finally ranks tags based on the weights. This weight-propagation strategy assumes that votes cast by important users with important resources would make the tags important. 
Rendle et al. \cite{DBLP:conf/kdd/RendleMNS09} propose to rank tags based on tucker decomposition (TD) and optimize the model parameters using the ranking criteria AUC (area under the ROC-curve). In \cite{DBLP:conf/uai/RendleFGS09}, the Bayesian personalized ranking (BPR) framework is introduced for the model optimization. \comment{As a special case of RTF, canonical decomposition (CD) model is introduced in [] to get a better runtime complexity while the prediction quality is sacrificed.} Another pairwise interaction tensor factorization (PITF) model \cite{DBLP:conf/wsdm/RendleS10} is proposed to guarantee both the prediction performance and runtime complexity. From the observation, we find that it is somehow similar to rank resources for a give query and rank tags for a specific resource. Techniques for tag recommendation can also be employed for resources searching problems. In the later section, we will show how to make use of BPR framework and extend TD and PITF with location dimension to rank resources for a given query.

As an important research filed of social tagging systems, however, searching relevant resources draws fewer attention. Besides the basic Latent Semantic Indexing (LSI) method, CubeLSI has been proposed as an extension of LSI \cite{DBLP:conf/icde/BiLKC11} which integrates users as another dimension to detect users' effect for tag assignments. Unlike traditional LSI, CubeLSI tends to measure the query and resource similarity at the concept level. CubeLSI employs tucker decomposition to extract tag relationships and generates concepts consisting of a set of tags by clustering. Then the matching between queries and resources is carried out at the semantic concept level instead of the tag level.
Some studies \cite{DBLP:conf/sigmod/LuLC11,DBLP:conf/sigmod/CaoCJO11} try to detect resources relevant with the query and as close as possible to a given location. Lu, Lu, and Cong \cite{DBLP:conf/sigmod/LuLC11} present a hybrid index tree called IUR-tree which appears to combine location proximity with textual similarity. Based on the IUR-tree, Lu, Lu, and Cong \cite{DBLP:conf/sigmod/LuLC11} further design a branch-and-bound search algorithm to get the closest and relevant resources. Our study is significantly different from these works in 2 ways: (1) We aim at studying location impact in social tagging systems rather than in general database systems relying on resources' content. (2) We propose a query-driven location division algorithm to generate location dimension information for every query.

\section{PROBLEM STATEMENT}\label{prob}
In this section, we describe the problem we focus on in this study and introduce some notations and definitions used in this paper.

A social tagging system consists of four types of components: a set of users (taggers) $U$, a set of tags $T$ containing $N_T$ tags, a set of resources $R$ and a set of locations $L$, where $l=(l_o,l_a)\in L$. $l_o$ and $l_a$ are longitude and latitude respectively. A set of tagging information is given as $S\subseteq U\times T\times R\times L$. A quadruple $(u, t, r, l)\in S$ means that a user $u\in U$ assigns a tag $t\in T$ to a resource $r\in R$ at location $l\in L$. In this paper, we are interested in recommending a list of resources for a given query $q$, where $q=(u_q, t_q, l_q)$ indicates that $u_q$ searches $t_q$ at $l_q$. This problem can be formulated as a ranking problem and aims to get a score function $\hat{Y}: U\times T\times L\times R\rightarrow \mathbb{R}$ which could give a score $\hat{y}_{u,t,l,r}$ for every $(u,t,l,r)$ tuple. Finally, we will return a set of top $N$ resources, which is defined as:
\begin{equation}\label{eq1}
\mathcal{TN}(u_q,t_q,l_q,N):=\arg\max^{N}_{r\in R}\hat{y}_{u_q,t_q,l_q,r},
\end{equation}
where $N$ is the number of resources to be returned.
\section{A MOTIVATING EXAMPLE}\label{moti}
In this section, we use an example to show how location information affects and improves the search results and the motivation of our techniques to solve location-sensitive resources recommendation.
\subsection{Tensor Representation}
\comment{\begin{figure}[h]
\centering
 \subfloat[aa][Records data from Flickr]{
   \begin{tabular}{|c|c|c|c|}\hline
Tagging Records&Resource&Tag&Location\\ \hline
1&$r_1$&$t_1$&$l_1$\\ \hline
2&$r_1$&$t_2$&$l_1$\\ \hline
3&$r_2$&$t_1$&$l_1$\\ \hline
4&$r_2$&$t_2$&$l_1$\\ \hline
5&$r_3$&$t_1$&$l_1$\\ \hline
6&$r_3$&$t_2$&$l_1$\\ \hline
7&$r_4$&$t_1$&$l_2$\\ \hline
8&$r_4$&$t_3$&$l_2$\\ \hline
9&$r_5$&$t_1$&$l_3$\\ \hline
10&$r_5$&$t_4$&$l_3$\\ \hline
\end{tabular}
 }
 \subfloat[bb][Corresponding Tensor $S\in\{0,1\}^\(3\times3\times3\)$]{
  \includegraphics[width=0.4\textwidth]{eg1}\label{fig:subfig1}
 }
\end{figure}}
\begin{figure*}[t]
\centering
\comment{\subfigure[Records data from Flickr]{\label{aeg1}\includegraphics[width=0.5\textwidth]{eg2-crop}}}
\subtable[Records Data from Flickr]{\label{aeg1}
\begin{tabular}{|c|c|c|c|}\hline
Tagging Records&Resource&Tag&Location\\ \hline
1&$r_1$&$t_1$&$l_1$\\ \hline
2&$r_1$&$t_2$&$l_1$\\ \hline
3&$r_2$&$t_1$&$l_1$\\ \hline
4&$r_2$&$t_2$&$l_1$\\ \hline
5&$r_3$&$t_1$&$l_1$\\ \hline
6&$r_3$&$t_2$&$l_1$\\ \hline
7&$r_4$&$t_1$&$l_2$\\ \hline
8&$r_4$&$t_3$&$l_2$\\ \hline
9&$r_5$&$t_1$&$l_3$\\ \hline
10&$r_5$&$t_4$&$l_3$\\ \hline
\end{tabular}}

\subfigure[Two-dimensional Data Aggregating over Location Dimension]{\label{aeg3}\includegraphics[width=0.35\textwidth,height=0.285\textwidth]{EG2N-crop}}
\subfigure[Three-dimensional Data Generated from Original Dataset]{\label{aeg2}\includegraphics[width=0.45\textwidth]{eg1-crop}}
%\caption{Two-dimensional Data Aggregating over Location Dimension}\label{aeg3}
\caption{Different Tensor Representation of Dataset}\label{aeg}
\end{figure*}
\comment{
\begin{figure}
\centering
\includegraphics[width=0.285\textwidth,height=0.285\textwidth]{eg3-crop} 
\caption{Two-dimensional Data Aggregating over Location Dimension}\label{aeg3}
\end{figure}}

To make the example easy to understand and the data visualized, we ignore the user factor at this point and focus on the resource, tag and location information. Figure~\ref{aeg1} shows some tagging records collected from Flickr, a social photo sharing system. The data is about 4 tags: $t_1$ (sand), $t_2$ (beach), $t_3$ (desert) and $t_4$ (marina) and 3 locations: $l_1$ (Hawaii), $l_2$ (Africa) and $l_3$ (Singapore). Each row of the data represents a tagging record that a tag $t$ is assigned to a resource $r$ at location $l$. Thus, each row can be represented as: $\exists u$ such that $(u,t,r,l)\in S$. In this example, the number of resources $|R|$ is 5, the number of tags $|T|$ is 4, the number of locations $|L|$ is 3 and the number of tag assignment $|S|$ is 10.

Unlike traditional Information Retrieval systems, which represent the collected data using a resource-term matrix, our method prefers to use a tensor $\mathcal{F}\in\{0,1\}^{|R|\times|T|\times|L|}$ to represent the gathered data. This tensor can be seen as a higher-order or extended matrix. The value of each entry in this tensor $\mathcal{F}$ is determined by
\begin{equation}\label{aeq1}
\mathcal{F}_{r,t,l}=
\begin{cases}
1\text{, if $\exists u$ such that $(u,t,r,l)\in S$}\\
0 \text{, otherwise}
\end{cases}.
\end{equation}
Figure~\ref{aeg3} visualizes the tensor $\mathcal{F}\in\{0,1\}^{5\times4\times3}$ got from the data in Figure~\ref{aeg1}. For example, $\mathcal{F}_{4,3,2}=1$ means resource $r_4$ at location $l_2$ has been annotated with tag $t_3$. Based on $\mathcal{F}$, we can obtain tagging assignment information at a particular location, For instance,
\begin{equation}\label{aeq2}
\mathcal{F}_{:,:,1}=\begin{pmatrix} 
  1     & 1&0&0\\ 
  1     & 1&0&0\\
1     & 1&0&0\\
0 &0&0&0\\ 
0 &0&0&0
\end{pmatrix},
\end{equation}
which stores all the tagging records for resources in location $l_1$. Therefore, it offers us exhaustive information to analyze the co-occurrence of and relationships between tags to extract possible concepts at this specific location. Similarly, we have
\begin{equation}\label{aeq3}
\mathcal{F}_{:,:,2}=\begin{pmatrix} 
  0     & 0&0&0\\ 
    0     & 0&0&0\\ 
  0     & 0&0&0\\ 
1 &0&1&0\\ 
0     & 0&0&0
\end{pmatrix} \text{ and }
\mathcal{F}_{:,:,3}=\begin{pmatrix} 
  0     & 0&0&0\\ 
    0     & 0&0&0\\ 
  0     & 0&0&0\\ 
0 &0&0&0\\ 
1    & 0&0&1
\end{pmatrix}
\end{equation}
On the other hand, Figure~\ref{aeg3} shows the two-dimensional matrix used by traditional information retrieval. We use $F\in\{0,1\}^{|R|\times|T|}$ to denote this matrix. In particular, $F_{:,1}=(1,1,1,1,1)$, $F_{:,2}=(1,1,1,0,0)$, $F_{:,3}=(0,0,0,1,0)$ and $F_{:,4}=(0,0,0,0,1)$ carry the assignment information of tags $t_1$, $t_2$, $t_3$ and $t_4$ respectively.
\subsection{Why Integrating Location Dimension}
In this section, we illustrate how location information helps us solve tag polysemy problem. Normally, Information retrieval methods group highly related tags to a concept. In order to extract the concept a specific tag belongs to, we try to measure the similarity of tag pairs. Higher similarity implies the tags are more likely to belong to the same concept.

In this example, we aim at extracting the concept which tag $t_1$ (sand) belongs to. First, we consider use traditional information retrieval representation, the two-dimensional matrix $F$. We can get the distance among $t_1$, $t_2$, $t_3$ and $t_4$ as follows:
\begin{equation}\label{aeq4}
d_{1,2}=||F_{:,1}-F_{:,2}||_2=\sqrt{2}.
\end{equation}
\begin{equation}\label{aeq5}
d_{1,3}=||F_{:,1}-F_{:,3}||_2=\sqrt{4}.
\end{equation}
\begin{equation}\label{aeq6}
d_{1,4}=||F_{:,1}-F_{:,4}||_2=\sqrt{4}.
\end{equation}
Based on Eqs.~(\ref{aeq4}-\ref{aeq6}), we can see that tag pair $t_1$ (sand) and $t_2$  (beach) has higher similarity (lower distance). Therefore, tags  sand and beach are grouped together and represent a concept. Note that this concept is generated regardless of location information. This means tag sand is more related to beach than to desert even in Africa, which is counter-intuitive.

As a comparison, three-dimensional tensor $\mathcal{F}$ enables us to calculate $D_(i,j,k)$, the distance between tags $t_i$ and $t_j$ at a particular location $l_k$. For instance, we have
\begin{equation}\label{aeq7}
D_{1,2,2}=||\mathcal{F}_{:,1,2}-\mathcal{F}_{:,2,2}||_2=1;
\end{equation}
\begin{equation}\label{aeq8}
D_{1,3,2}=||\mathcal{F}_{:,1,2}-\mathcal{F}_{:,3,2}||_2=0;
\end{equation}
\begin{equation}\label{aeq9}
D_{1,2,3}=||\mathcal{F}_{:,1,3}-\mathcal{F}_{:,2,3}||_2=1;
\end{equation}
\begin{equation}\label{aeq10}
D_{1,4,3}=||\mathcal{F}_{:,1,3}-\mathcal{F}_{:,4,3}||_2=0;
\end{equation}
From Eqs.~(\ref{aeq7}-\ref{aeq10}), we find that tags $t_3$ (desert) and $t_4$ (marina) have higher similarity (lower distance) with $t_1$ (sand), compared to $t_2$ (beach) at location $l_2$ (Africa) and $l_3$ (Singapore) respectively. Thus, the system groups sand and desert together as a concept in Africa and searches resources which are more related to this concept rather than the tag sand when the query happens in Africa to solve the polysemy problem. As a result, given 2 resources, both of which have tag sand, the system prefers the one annotated with desert rather than beach. Similarly, the system chooses to return resources which match the concept consists of sand and marina instead of that containing sand and beach in Singapore.

We have seen that directly including  location dimension is meaningful and can help improve the search results, we still have some problems when making use of location information though.  First, the location in dataset is represented by longitude and latitude pairs, which are continuous real numbers. Thus, if we use them directly, the location dimension will become infinite. We are not able to get enough information of tag co-occurrence and similarity at a particular location. How to handle this problem is essential for utilizing location information. Furthermore, after solving the former problem and getting the tensor including the location dimension, we have to figure out how to make use of it to do searching. The given example just gives a rough idea and we still have to go deeper and offer a formal approach. In the following section, we present our proposed method to address these two problems.

\section{LOCATION-SENSITIVE RESOURCES RECOMMENDATION}\label{sec:LDR}
In this section, we will present how to rank existing resources with location information. Our approach has two stages. At the first stage, we try to realize location division for a given query. The goal of this stage is to divide the whole world into several regions such that the given query refers to the same concept within a region while the meaning of the query is different among regions. Consequently, the location information is discrete since we look at the regions instead of longitude and latitude pairs. After location division is done, the location dimension is generated and every region corresponds to a distinct value in location dimension. Afterwards, we will present an algorithm utilizing the tensor which includes the location dimension to calculate  the score function $\hat{Y}$. We will talk about it in details later.

\subsection{Location Division}
We try to give a division of the world $L=L_1\oplus L_2\oplus\dots\oplus L_k$, where $k$ is the number of possible regions for a specific tag $t_q$\footnote{in this study, we consider all the searching words in a query as a tag} associated with the given query $q=(u_q,t_q,l_q)$. At last of this section, a function $f_L(\cdot)$: $(t,l)\rightarrow L_i (1\leq i \leq k)$ for every $t\in T$ and $l\in L$ is generated, which means for a given $t$, $f_L$ maps $l$ into one region $L_i$. Intuitively, we consider that if two locations are close, people in these two locations are more likely to have the same understanding of a tag. On the other hand, if a tag always co-occurs with the same other tags in two locations, it tends to be reasonable to assign these two locations into the same region. Thus, the following work tries to generate a dataset consisting of instances including location information and relationships of $t_q$ with other tags. For every $r,l$ pair such that $(u,t_q,r,l)\in S$ for some $u$, we have an instance $\mathbf{x}=(f_T(t_q, t_1,r,l),f_T(t_q,t_2,r,l),\dots,f_T(t_q,t_{N_T},r,l),l)$ where $f_T(t_j,t_k,r,l)$ is defined as follows:
\begin{equation}\label{eq2}
f_T(t_j,t_k,r,l)=\frac{c(t_k,r,l)-c(t_j,r,l)}{\max|c(t_k,r,l)-c(t_j,r,l)|},
\end{equation}
where $c(t,r,l)$ is the number of occurrences of $t$ in $r$ at $l$. Eq.(\ref{eq2}) measures the differences of $t_q$ with all the other tags and scales them to the interval [-1,1]. Every instance $\mathbf{x}$ consists of the relationships between $t_q$ and other tags and the location information. Thus, our dataset for location division is denoted as:
\begin{equation}\label{eq3}
\begin{split}
 \mathbf{X}=&\{\mathbf{x}=(f_T(t_q, t_1,r,l),f_T(t_q,t_2,r,l),\dots,f_T(t_q,t_{N_T},r,l),l)\\
&|\exists (u,t_q,r,l)\in S\}.
\end{split}
\end{equation}
\comment{
The advantage of this dataset generating approach is that $\mathbf{X}$ contains both positive and negative information for the following estimation, which means it includes $f_T(t_q,t_j,r,l)$ of situation where $(u,t_q,r,l)\in S\wedge(u,t_j,r,l)\notin S$ and $f_T(t_q,t_k,r,l)$ of situation where $(u,t_q,r,l)\notin S\wedge(u,t_k,r,l)\in S$.}
Note that we only consider the resources which have been annotated with $t_q$. This is because  if a tag does not appear at a location, it gives us little to no information about what it really refers to there. Thus we just ignore those resources.
Based on these observations, we want to create partitions of $\mathbf{X}$ to maximize a posteriori (MAP) estimates of parameters (we will introduce it soon). Accordingly, we have the divisions of the locations associated with $\mathbf{x}\in \mathbf{X}$. Later, we will show how to map other locations into the regions generated above.

Suppose the dataset $ \mathbf{X}$ is the mixture of k multivariate normal distributions of $d$ dimension (we will give details of how to determine k), and let $ \mathbf{Z}=(z_1,z_2,\dots,z_{N_\mathbf{X}})$ be the latent variables that determine which distributions the observations come from. $N_\mathbf{X}$ is the number of instances in $\mathbf{X}$. Thus we have:
\begin{equation}\label{eq4}
\mathbf{X}|(\mathbf{Z}=i)\sim \mathcal{N}_d(\boldsymbol{\mu}_i,\Sigma_{i}), 1\leq i \leq k.
\end{equation}
Let $\mathcal{U}=[\boldsymbol{\mu}_i]$ and $\boldsymbol{\Sigma}=[\Sigma_i]$. Also we denote that:
\begin{equation}\label{eq5}
P(\mathbf{Z}=i)=\varphi_{i}, 1\leq i \leq k,
\end{equation}
such that:
%\begin{equation}\label{eq6}
$\sum_{i=1}^k\varphi_{i}=1.$
%\end{equation}
Let $\boldsymbol{\varphi}=[\varphi_{i}]\in\mathbb{R}_{+}^{ k\times 1}$. The model parameters to be estimated is denoted as:
\begin{equation}\label{eq7}
\Theta=(\boldsymbol{\varphi},\mathcal{U},\boldsymbol{\Sigma}).
\end{equation}
The likelihood function is:
\begin{equation}\label{eq8}
\begin{split}
L(\Theta;\mathbf{X},\mathbf{Z})&=P(\mathbf{X},\mathbf{Z}|\Theta)\\
&=\prod_{j=1}^{N_\mathbf{X}}\sum_{i=1}^{k}\mathbb{I}(z_j=i)\varphi_{i} f_P(\mathbf{x}_j;\boldsymbol{\mu}_i,\Sigma_i),
\end{split}
\end{equation}
where $\mathbb{I}$ is the indicator function and $f_P$ is the probability density function of a multivariate normal.
Now we choose expectation-maximization (EM) algorithm to find MAP estimates of $\Theta$. Thus, we will get following equations to update $\boldsymbol{\varphi},\mathcal{U},\boldsymbol{\Sigma}$:
\comment{Thus, for E step, we have:
\begin{equation}\label{eq10}
\begin{split}
Q(\Theta|\Theta^{(t)})&=E(\log L(\Theta;\mathbf{X},\mathbf{Z}))\\
&=\sum_{j=1}^{N_\mathbf{X}}\sum_{i=1}^{k}P_{ji}[\log\varphi_{i}-\frac{d}{2}\log(2\pi)
 -\frac{1}{2}\log|\Sigma_i|\\
&-\frac{1}{2}(\mathbf{x}_j-\boldsymbol{\mu}_i)^\top\Sigma_i^{-1}(\mathbf{x}_j-\boldsymbol{\mu}_i)],
\end{split}
\end{equation}
where $P_{ji}$ is the conditional probability of $\mathbf{x}_j$ belonging to the $i$-th distribution, which is determined by Bayes theorem and defined as:
\begin{equation}\label{eq11}
P_{ji}=P(z_j=i|\mathbf{x}_j;\Theta^{(t)})=\frac{\varphi_{i}^{(t)}f_P(\mathbf{x}_j;\boldsymbol{\mu}_i^{(t)},\Sigma_i^{(t)})}{\sum_{i=1}^{k}\varphi_{i}^{(t)}f_P(\mathbf{x}_j;\boldsymbol{\mu}_i^{(t)},\Sigma_i^{(t)})}.
\end{equation}

In the M step, it aims to maximize $Q(\Theta|\Theta^{(t)})$ as a function of $\Theta^{(t)}$ in order to get the estimate $\Theta^{(t+1)}$. As for $\boldsymbol{\varphi}$, we have:}
\begin{equation}\label{eq12}
\begin{split}
\boldsymbol{\varphi}^{(t+1)}&=\arg\max_{\boldsymbol{\varphi}}Q(\Theta|\Theta^{(t)})\\
&=\arg\max_{\boldsymbol{\varphi}}(\sum_{j=1}^{N_\mathbf{X}}\sum_{i=1}^{k}P_{ji}^{(t)}\log \varphi_{i}^{t}),
\end{split}
\end{equation}
\comment{
So, we have:
\begin{equation}\label{eq13}
\varphi_{i}^{(t+1)}=\frac{\sum_{j=1}^{N_\mathbf{X}}P_{ji}^{(t)}}{\sum_{j=1}^{N_\mathbf{X}}\sum_{i=1}^{k}
P_{ji}^{(t)}}=\frac{\sum_{j=1}^{N_\mathbf{X}}P_{ji}^{(t)}}{N_{\mathbf{X}}}.
\end{equation}

Then we consider the estimates of $\mathcal{U}$ and $\boldsymbol{\Sigma}$.
\begin{equation}\label{eq14}
\begin{split}
(\boldsymbol{\mu}_i^{(t+1)},\Sigma_i^{(t+1)})&=\arg\max_{\boldsymbol{\mu}_i,\Sigma_i}Q(\Theta|\Theta^{(t)})\\
&=\arg\max_{\boldsymbol{\mu}_i,\Sigma_i}\sum_{j=1}^{N_{\mathbf{X}}}P_{ji}\\
&[ -\frac{1}{2}\log|\Sigma_i|
-\frac{1}{2}(\mathbf{x}_j-\boldsymbol{\mu}_i)^\top\Sigma_i^{-1}(\mathbf{x}_j-\boldsymbol{\mu}_i)].
\end{split}
\end{equation}

Further, we have equations for updating $\mathcal{U}$ and $\boldsymbol{\Sigma}$ as follows:
}
\begin{equation}\label{eq15}
\boldsymbol{\mu}_i^{(t+1)}=\frac{\sum_{j=1}^{N_\mathbf{X}}P_{ji}^{(t)}\mathbf{x}_j}{\sum_{j=1}^{N_\mathbf{X}}P_{ji}^{(t)}}
\end{equation}
and
\begin{equation}\label{eq16}
\Sigma_i^{(t+1)}=\frac{\sum_{j=1}^{N_\mathbf{X}}P_{ji}^{(t)}(\mathbf{x}_j-\boldsymbol{\mu}_i^{(t+1)})(\mathbf{x}_j-\boldsymbol{\mu}_i^{(t+1)})\top}{\sum_{j=1}^{N_\mathbf{X}}P_{ji}^{(t)}},
\end{equation}
where $P_{ji}$ is the conditional probability of $\mathbf{x}_j$ belonging to the $i$-th distribution, which is determined by Bayes theorem and defined as:
\begin{equation}\label{eq11}
P_{ji}^{(t)}=P(z_j=i|\mathbf{x}_j;\Theta^{(t)})=\frac{\varphi_{i}^{(t)}f_P(\mathbf{x}_j;\boldsymbol{\mu}_i^{(t)},\Sigma_i^{(t)})}{\sum_{i=1}^{k}\varphi_{i}^{(t)}f_P(\mathbf{x}_j;\boldsymbol{\mu}_i^{(t)},\Sigma_i^{(t)})}.
\end{equation}
When finishing EM algorithm, we already have the partitions $L_1,L_2,$ $\dots,L_k$ of $l$ which is associated with some $\mathbf{x}\in \mathbf{X}$ by using the following equation:
\begin{equation}\label{eq17}
f_L(t_q,l)=\arg\max_{L_i}\varphi_if_P(\mathbf{x};\boldsymbol{\mu}_i,\Sigma_i).
\end{equation}
For the other locations $l$, which do not have $(u,t,r,l)\in S$ for any $u$, $t$ and $r$, we determine the region for them by:
\begin{equation}\label{eq18}
f_L(t_q,l)=\arg\min_{L_i}\min_{l_j\in L_i}D(l_j,l),
\end{equation}
where $D(l_j,l)$ is the Euclidean distance.
\textbf{Algorithm~\ref{alg:1}} summarizes the above process and gives procedure of how to generate location division.

\begin{algorithm}[H]
\caption{Location Division Algorithm by Maximizing a Posteriori (\textbf{LD$_\textrm{{MAP}}$})}
 \label{alg:1}
\begin{algorithmic}[1]
\REQUIRE{$t_q$: tag used for location division; $U$: a set of users; $T$: a set of tags; $R$: a set of resources; $L$: a set of locations; $S$: a set of tagging information.
}
\ENSURE{the predictive function $f_L$.}



\STATE Generate the dataset $\mathbf{X}$ using Eqs.~(\ref{eq2}-\ref{eq3}) with $t_q$, $U$, $T$, $R$, $L$, $S$;



\STATE k=0;

\REPEAT
\STATE k=k+1;
\STATE Initialize $\Theta=(\boldsymbol{\varphi},\mathcal{U},\boldsymbol{\Sigma})$;
	\REPEAT
\STATE Update $P_{ji}$ in light of Eq.~(\ref{eq11});
\STATE Update $\boldsymbol{\varphi}$ in light of Eq.~(\ref{eq12});
\STATE Update $\mathcal{U}$ in light of Eq.~(\ref{eq15});
\STATE Update $\boldsymbol{\Sigma}$ in light of Eq.~(\ref{eq16});
           \UNTIL convergence
\UNTIL $L(\Theta;\mathbf{X},\mathbf{Z})$ does not increase

\STATE For $\forall l\in L$ and $l$ is associated with some $\mathbf{x}\in \mathbf{X}$, get $f_L(t_q,l)$ in light of Eq.~(\ref{eq17});

\STATE For $\forall l\in L$ but $l$ is not associated with any $\mathbf{x}\in \mathbf{X}$, get $f_L(t_q,l)$ in light of Eq.~(\ref{eq18});

\RETURN $f_L$ 
\end{algorithmic}
\end{algorithm}
%\vspace{-5.5mm}
\subsection{Location-sensitive Resources Ranking}
In this subsection, we aim at getting the $\hat{Y}$ at a region level. $\hat{y}_{u,t,L_i,r}$ is calculated for every $u$, $t$, $L_i$ and $r$ rather than $\hat{y}_{u,t,l,r}$. Let $f_L(t_q,l_q)=L_q$, we can rewrite our objective function in Eq.~(\ref{eq1}) as:
\begin{equation}\label{neq1}
\mathcal{TN}(u_q,t_q,L_q,N):=\arg\max^{N}_{r\in R}\hat{y}_{u_q,t_q,L_q,r},
\end{equation}
In the following paper, we will present some models to approximate and predict $\hat{Y}$. In this case, $\hat{Y}$ relies on the model parameters and we extend Bayesian ranking introduced in \cite{DBLP:conf/uai/RendleFGS09} to optimize the model parameters based on some observations. After the model parameters are updated, $\hat{Y}$ can be worked out. We first describe how we extend Bayesian ranking and apply it to determine parameters based on our generated datasets. Note that the extended Bayesian ranking is not constrained to models which we choose to predict $\hat{Y}$. Afterwards, two models are shown to approximate $\hat{Y}$ and the procedure of using extended Bayesian ranking to update the corresponding models' parameters will be given.
\subsubsection{Model Parameters Estimation}
As we assume that for a generated region, $t_q$ means somehow the same, we map $(u,t,l,r)$ to $(u,t,\l,r)$ where $f_L(t_p,l)=\l$ for every $(u,t,l,r)\in S$. Denote $S_L=\lbrace(u,t,\l,r)|\exists(u,t,l,r)\in S\wedge f_L(t_q,l)=\l\rbrace$. Intuitively, if $(u,t,L_i,r_1)\in S_L$ but $(u,t,L_i,r_2)$ $\notin S_L$, $\hat{y}_{u,t,\l,r_1}$ should be larger than $\hat{y}_{u,t,\l,r_2}$. Thus, with the observations as follows:
\begin{equation}\label{eq19}
\mathbf{Y}=\lbrace(u,t,L_i,r_1,r_2)|(u,t,L_i,r_1)\in S \wedge (u,t,L_i,r_1)\notin S \rbrace,
\end{equation}
we try to optimize the model parameters that score function $\hat{Y}$ relies on based on Bayes' theorem:
\begin{equation}\label{eq20}
p(\hat{Y}|\mathbf{Y}) \propto p(\mathbf{Y}|\hat{Y})p(\hat{Y}).
\end{equation}
Assuming the independence of the tag assignments, this results in the MAP estimator of $\hat{Y}$:
\begin{equation}\label{eq21}
\overline{\hat{Y}}=\arg\max_{\hat{Y}}\ln \prod_{(u,t,L_i,r_1,r_2)\in \mathbf{Y}}p((u,t,L_i,r_1,r_2)|\hat{Y})p(\hat{Y}).
\end{equation}
Model with model parameters $\boldsymbol{\Theta}$ which $\hat{Y}$ relies on is plugged in. We derive an estimator for $p((u,t,L_i,r_1,r_2)|\hat{Y})$ by using the score function $\hat{Y}$:
\begin{equation}\label{eq22}
p((u,t,L_i,r_1,r_2)|\hat{Y})=\sigma(\hat{y}_{u,t,\l,r_1,r_2}(\boldsymbol{\Theta})),
\end{equation}
where $\sigma$ is the logistic function $\sigma(x) =\frac{1}{1+e^{(-x)}}$ and $\hat{y}_{u,t,\l,r_1,r_2}(\boldsymbol{\Theta})$ $=\hat{y}_{u,t,\l,r_1}(\boldsymbol{\Theta})-\hat{y}_{u,t,\l,r_2}(\boldsymbol{\Theta})$. For convenience, we will write $\hat{y}_{u,t,\l,r_1,r_2}$ short for $\hat{y}_{u,t,\l,r_1,r_2}(\boldsymbol{\Theta})$. As for the prior $p(\boldsymbol{\Theta})$, we assume that the parameters are drawn from the normal distribution $\boldsymbol{\Theta} \sim \mathcal{N}(0,\sigma_{\boldsymbol{\Theta}}^{2}\mathbf{I})$. Thus, Eq.~(\ref{eq21}) can be reformulated as:
\begin{equation}\label{eq23}
\begin{split}
\overline{\boldsymbol{\Theta}}&=\arg\max_{\boldsymbol{\Theta}}\ln \prod_{(u,t,L_i,r_1,r_2)\in \mathbf{Y}}\sigma(\hat{y}_{u,t,\l,r_1,r_2})p(\boldsymbol{\Theta})\\
&=\arg\max_{\boldsymbol{\Theta}}\sum_{(u,t,\l,r_1,r_2)\in \mathbf{Y}}\ln\sigma(\hat{y}_{u,t,\l,r_1,r_2})-\lambda_{\boldsymbol{\Theta}}\lVert\boldsymbol{\Theta}\rVert^{2}_{F}.
\end{split}
\end{equation}
Since computing the full gradients is very time consuming as $\mathbf{Y}$ is very large, we choose to perform stochastic gradient descent on the randomly drawn cases. The gradient of Eq.~(\ref{eq23}) for a given case $(u,t,\l,r_1,r_2)$ with respect to a model parameter $\theta\in\boldsymbol{\Theta}$ is:
\begin{equation}\label{eq24}
\begin{split}
&\frac{\partial}{\partial\theta}(\ln\sigma(\hat{y}_{u,t,\l,r_1,r_2})-\lambda_{\boldsymbol{\Theta}}\lVert\boldsymbol{\Theta}\rVert^{2}_{F})\\ \propto& (1-\sigma(\hat{y}_{u,t,\l,r_1,r_2}))\frac{\partial}{\partial\theta}\hat{y}_{u,t,\l,r_1,r_2}-\lambda_{\theta}\theta.
\end{split}
\end{equation}
From Eq. (\ref{eq24}), we can see that only gradient of $\hat{y}_{u,t,\l,r_1,r_2}$ needs to be computed. \textbf{Algorithm~\ref{alg:2}} concludes the process of how to use Bayesian theorem and MAP to optimize model parameters.
\begin{algorithm}[H]
\caption{ Model Optimization by Using Bayesian Theorem and MAP (\textbf{BPR-OPT})}
 \label{alg:2}
\begin{algorithmic}[1]
\REQUIRE{$\mathbf{Y}$: observation dataset ;  $\boldsymbol{\Theta}$: model parameters.
}
\ENSURE{the predictive model parameters $\overline{\boldsymbol{\Theta}}$.}



\STATE Initialize $\boldsymbol{\Theta}$;


\REPEAT
\STATE Draw $(u,t,\l,r_1,r_2)$ from $\mathbf{Y}$;

\STATE $\boldsymbol{\Theta}\leftarrow \boldsymbol{\Theta}+$

$\alpha( (1-\sigma(\hat{y}_{u,t,\l,r_1,r_2}))\frac{\partial}{\partial\boldsymbol{\Theta}}\hat{y}_{u,t,\l,r_1,r_2}-\lambda_{\boldsymbol{\Theta}}\boldsymbol{\Theta})$;
\UNTIL convergence



\RETURN $\overline{\boldsymbol{\Theta}}$ 
\end{algorithmic}
\end{algorithm}
\subsubsection{Models for Predicting $\hat{Y}$}
Factorization models are witnessed as a very popular and successful model class for recommendation and resources searching systems \cite{DBLP:conf/nips/SalakhutdinovM07,DBLP:conf/kdd/RendleMNS09}. One of the prominent models is Tucker decomposition which has been widely used in recommendation systems. Thus, instead of simplely using Euclidean distance to compute the similarity of tag pairs and matching resources with generated concepts in section~\ref{moti}, we choose to employ Factorization models. In the following, we will describe and extend existing 3D Tucker decomposition and one of its variation models to 4D versions to approximate $\hat{Y}$ and give details of how these models' parameters can be learned with \textbf{BPR-OPT}. All these models try to present $\hat{Y}$ which can be seen as the 4-dimensional tensor.
\subsubsection{Four-dimensional Tucker Decomposition}
Tucker Decomposition (TD) factorizes a high-order cube into a core tensor and one factor matrix for each dimension:
\begin{equation}\label{neq2}
\hat{Y}^{TD}=\hat{C}\times\hat{U}\times\hat{T}\times\hat{L}\times\hat{R},
\end{equation}
which has the following model parameters:
\begin{equation}\label{neq3}
\begin{split}
\hat{C}\in \mathbb{R}^{k_u\times k_t\times k_l \times k_r}, \hat{U}\in \mathbb{R}^{|U|\times k_u},\\
\hat{T}\in \mathbb{R}^{|T|\times k_t},\hat{L}\in \mathbb{R}^{k\times k_l},\hat{R}\in \mathbb{R}^{|R|\times k_r}.
\end{split}
\end{equation}
In order to derive the gradient of model parameters in a more clear way, we give an alternative equivalent representation of Eq. (\ref{neq2}):
\begin{equation}\label{eq25}
\hat{y}_{u,t,\l,r}^{\text{TD}}=\sum_{\tilde{u}}\sum_{\tilde{t}}\sum_{\tilde{\l}}\sum_{\tilde{r}}\hat{c}_{\tilde{u},\tilde{t},\tilde{\l},\tilde{r}}\hat{u}_{u,\tilde{u}}\hat{t}_{t,\tilde{t}}\hat{\l}_{\l,\tilde{\l}}\hat{r}_{r,\tilde{r}}.
\end{equation}
For learning the model parameters using \textbf{BPR-OPT}, the gradients $\frac{\partial}{\partial\theta}\hat{y}_{u,t,\l,r_1,r_2}$ used in Eq.~(\ref{eq24}) are:
\begin{equation}\label{eq26}
\begin{split}
\frac{\partial\hat{y}_{u,t,\l,r}^{\text{TD}}}{\partial\hat{c}_{\tilde{u},\tilde{t},\tilde{\l},\tilde{r}}}&=\hat{u}_{u,\tilde{u}}\hat{t}_{t,\tilde{t}}\hat{\l}_{\l,\tilde{\l}}\hat{r}_{r,\tilde{r}}\\
\frac{\partial\hat{y}_{u,t,\l,r}^{\text{TD}}}{\partial\hat{u}_{u,\tilde{u}}}&=\sum_{\tilde{t}}\sum_{\tilde{\l}}\sum_{\tilde{r}}\hat{c}_{\tilde{u},\tilde{t},\tilde{\l},\tilde{r}}\hat{t}_{t,\tilde{t}}\hat{\l}_{\l,\tilde{\l}}\hat{r}_{r,\tilde{r}}\\
\frac{\partial\hat{y}_{u,t,\l,r}^{\text{TD}}}{\partial\hat{t}_{t,\tilde{t}}}&=\sum_{\tilde{u}}\sum_{\tilde{\l}}\sum_{\tilde{r}}\hat{c}_{\tilde{u},\tilde{t},\tilde{\l},\tilde{r}}\hat{u}_{u,\tilde{u}}\hat{\l}_{\l,\tilde{\l}}\hat{r}_{r,\tilde{r}}\\
\frac{\partial\hat{y}_{u,t,\l,r}^{\text{TD}}}{\partial\hat{\l}_{\l,\tilde{\l}}}&=\sum_{\tilde{u}}\sum_{\tilde{t}}\sum_{\tilde{r}}\hat{c}_{\tilde{u},\tilde{t},\tilde{\l},\tilde{r}}\hat{u}_{u,\tilde{u}}\hat{t}_{t,\tilde{t}}\hat{r}_{r,\tilde{r}}\\
\frac{\partial\hat{y}_{u,t,\l,r}^{\text{TD}}}{\partial\hat{r}_{r,\tilde{r}}}&=\sum_{\tilde{u}}\sum_{\tilde{t}}\sum_{\tilde{\l}}\hat{c}_{\tilde{u},\tilde{t},\tilde{\l},\tilde{r}}\hat{u}_{u,\tilde{u}}\hat{t}_{t,\tilde{t}}\hat{\l}_{\l,\tilde{\l}}.
\end{split}
\end{equation}
Let $k_{max}=min(k_u,k_t,k,k_r)$. Thus, the runtime complexity of TD in this case is $O(k_{max}^4)$. 
\comment{
\begin{algorithm}[H]
\caption{ \textbf{BPR-OPT} for TD}
 \label{alg:3}
\begin{algorithmic}[1]
\REQUIRE{$\mathbf{Y}$: observation dataset ;  $\hat{c}_{\tilde{u},\tilde{t},\tilde{\l},\tilde{r}}$, $\hat{u}_{u,\tilde{u}}$, $\hat{t}_{t,\tilde{t}}$, $\hat{\l}_{\l,\tilde{\l}}$, $\hat{r}_{r,\tilde{r}}$: model parameters.
}
\ENSURE{the predictive model parameters $\hat{c}_{\tilde{u},\tilde{t},\tilde{\l},\tilde{r}}$, $\hat{u}_{u,\tilde{u}}$, $\hat{t}_{t,\tilde{t}}$, $\hat{\l}_{\l,\tilde{\l}}$, $\hat{r}_{r,\tilde{r}}$.}



\STATE Initialize  $\hat{c}_{\tilde{u},\tilde{t},\tilde{\l},\tilde{r}}$, $\hat{u}_{u,\tilde{u}}$, $\hat{t}_{t,\tilde{t}}$, $\hat{\l}_{\l,\tilde{\l}}$, $\hat{r}_{r,\tilde{r}}$;


\REPEAT
\STATE Draw $(u,t,\l,r_1,r_2)$ from $\mathbf{Y}$;

\STATE $\hat{y}_{u,t,\l,r_1,r_2}=\hat{y}_{u,t,\l,r_1}-\hat{y}_{u,t,\l,r_2}$;

\STATE $\gamma=1-\sigma(\hat{y}_{u,t,\l,r_1,r_2})$;

\FOR{$i_1=1 \to k_u$}
	\FOR{$i_2=1 \to k_t$}
		\FOR{$i_3=1 \to k$}
			\FOR{$i_4=1 \to k_r$}
				\STATE $\hat{c}_{\tilde{u},\tilde{t},\tilde{\l},\tilde{r}}=\hat{c}_{\tilde{u},\tilde{t},\tilde{\l},\tilde{r}}+\alpha(\gamma(\hat{u}_{u,\tilde{u}}\hat{t}_{t,\tilde{t}}\hat{\l}_{\l,\tilde{\l}}\hat{r}_{r_1,\tilde{r}}-\hat{u}_{u,\tilde{u}}\hat{t}_{t,\tilde{t}}\hat{\l}_{\l,\tilde{\l}}\hat{r}_{r_2,\tilde{r}})-\lambda\cdot \hat{c}_{\tilde{u},\tilde{t},\tilde{\l},\tilde{r}} )$;
			\ENDFOR
		\ENDFOR
	\ENDFOR
\ENDFOR
\UNTIL convergence



\RETURN $\overline{\boldsymbol{\Theta}}$ 
\end{algorithmic}
\end{algorithm}
}

\subsubsection{Four-dimensional Pairwise Interaction Tensor Factorization}

A serious drawback of TD is the high time complexity. This results in a variation Pairwise Interaction Tensor Factorization (PITF) proposed in \cite{DBLP:conf/wsdm/RendleS10}. PITF just considers the two-way interactions between $r$ and other dimensions. As we extend it to 4D version, the following equation can be got:
\begin{equation}\label{eq27}
\hat{y}_{u,t,\l,r}^{\text{PITF}}=\sum_{p}\hat{u}_{u,p}\cdot\hat{r}_{r,p}^{U}+\sum_{p}\hat{t}_{t,p}\cdot\hat{r}_{r,p}^{T}
+\sum_{p}\hat{\l}_{\l,p}\cdot\hat{r}_{r,p}^{L}
\end{equation}
with model parameters:
\begin{equation*}
\begin{split}
\hat{U}\in \mathbb{R}^{|U|\times p}, \hat{T}\in \mathbb{R}^{|T|\times p}, \hat{L}\in \mathbb{R}^{k\times p},\\
\hat{R}^{U}\in\mathbb{R}^{|R|\times p}, \hat{R}^{T}\in\mathbb{R}^{|R|\times p}, \hat{R}^{L}\in\mathbb{R}^{|R|\times p}.
\end{split}
\end{equation*}
From Eq.~(\ref{eq27}), we can derive the gradients of PITF as follows:
\begin{equation}\label{eq28}
\begin{split}
\frac{\partial \hat{y}_{u,t,\l,r}^{\text{PITF}}}{\partial \hat{u}_{u,p}}=\hat{r}_{r,p}^{U}, 
\frac{\partial \hat{y}_{u,t,\l,r}^{\text{PITF}}}{\partial \hat{t}_{t,p}}=\hat{r}_{r,p}^{T}, 
\frac{\partial \hat{y}_{u,t,\l,r}^{\text{PITF}}}{\partial \hat{\l}_{\l,p}}=\hat{r}_{r,p}^{L}, \\
\frac{\partial \hat{y}_{u,t,\l,r}^{\text{PITF}}}{\partial \hat{r}_{r,p}^{U}}=\hat{u}_{u,p}, 
\frac{\partial \hat{y}_{u,t,\l,r}^{\text{PITF}}}{\partial \hat{r}_{r,p}^{T}}=\hat{t}_{t,p}, 
\frac{\partial \hat{y}_{u,t,\l,r}^{\text{PITF}}}{\partial \hat{r}_{r,p}^{L}}=\hat{\l}_{\l,p}.
\end{split}
\end{equation}
Apparently, the runtime of \textbf{BPR-OPT} using PITF model is linear in $O(p)$. After we get the gradients of parameters, we can utilize them for updating these parameters according to Eq.~(\ref{eq24}) when running \textbf{BPR-OPT}. After \textbf{BPR-OPT} is done, $\hat{Y}$ could be computed based on the parameters in light of Eq.~(\ref{eq25}) or Eq.~(\ref{eq27}). Finally, $\mathcal{TN}(u_q,t_q,L_q,N)$ can be worked out using $\hat{Y}$.



\section{EXPERIMENTS}\label{exp}
Now we empirically evaluate the effectiveness and efficiency of the algorithms proposed in Section ~\ref{sec:LDR}.

\subsection{DATASETS}
We conduct experiments on data collected from two social tagging systems: Flickr\footnote{http://www.flickr.com/} and Picasa\footnote{https://picasaweb.google.com}. These two systems are photo sharing systems which allow users to annotate photos with tags. Since the raw data is very noisy and sparse, we do some cleaning and preprocessing. First we remove the system-generated tags such as "uploaded:by=instagram". To eliminate some outliers, then we use p-core\footnote{p-core of $S$ is the largest subset of $S$ with the property that every user, every tag and every resource appears at least p times and every $L_i$ has size not smaller than p.} approach. For both two datasets we use 10-core. The data details after cleaning are shown in Table~\ref{tbl1}.
\begin{table}
\centering
\caption{Data Statistics}\label{tbl1}

\begin{tabular}{|c|c|c|c|c|}\hline
Dataset&$|U|$&$|T|$&$|R|$&$|S|$\\ \hline
Flickr&8,199&28,049&26,522&323,527\\ \hline
Picasa&509&1,545&1,041&18,135\\ \hline
\end{tabular}
\end{table}






\subsection{EVALUATION METHODOLOGY}
%We use the similar way described in []. Per user one tagging information $s\in S$ is randomly removed from the train set %$S_{train}$ and put into the test set $S_{test}$. 
For comparison, we use 3 metrics to evaluate the methods quality.The first one is Mean Reciprocal Rank (MRR). Reciprocal rank of a query is the multiplicative inverse of the rank of the first relevant resource. Mean Reciprocal Rank is the average of the reciprocal ranks of queries in a query set $Q$.  MRR is computed as:
\begin{equation}\label{aeq11}
MRR=\frac{1}{Q}\sum^{|Q|}_{i=1}\frac{1}{rank_i}.
\end{equation}
The second evaluation method is Precision at $N$ ($P@N$), which is the precision at cut-off $N$ and denoted as:
\begin{equation}\label{aeq12}
P@N=\frac{\sum^N_{i=1}rel(i)}{N},
\end{equation}
where $rel(i)$ is an indicator function equaling 1 if the $i$-th retrieved resource is relevant to the query, zero otherwise. Overall $P@N$ is given by the average of $P@N$ over all queries in a query set $Q$.
The last metric is normalized discounted cumulative gain (NDCG) \cite{DBLP:journals/tois/JarvelinK02}, which is computed as follows:
\begin{equation*}
\text{NDCG@}N=Z_N\sum_{i=1}^{N}\frac{(2^{r(i)}-1)}{\log(i+1)},
\end{equation*}
\comment{ F-measure in TopN-lists scheme is used as the evaluation metrics.
\begin{equation*}
\begin{split}
P(S_{test},N)&=\avg_{(u,t,\l)  
}\frac{|Top(u,t,\l)\cap\lbrace r|(u,t,\l,r)\in S_{test}\rbrace|}{N}\\
R(S_{test},N)&=\avg_{(u,t,\l)  
}\frac{|Top(u,t,\l)\cap\lbrace r|(u,t,\l,r)\in S_{test}\rbrace|}{|\lbrace r|(u,t,\l,r)\in S_{test}\rbrace|}\\
F1(S_{test},N)&=\frac{2P(S_{test},N)R(S_{test},N)}{P(S_{test},N)+R(S_{test},N)}.
\end{split}
\end{equation*}}
where $Z_N$ is the normalization factor to make the optimal NDCG@$N$ score to be 1. $r(i)$ is 1 if the $i$-th returned resource is relevant and 0 otherwise. $@N$ means we evaluate based on the resources which are ranked top $N$. Similarly, overall NDCG@$N$ is given by averaging the NDCG@$N$ scores over all queries in a query set $Q$.

We invite 10 users to participate the experiments. Each user submits 10 queries. Thus we have 100 queries in total. Each query consists of keywords and the location information, which indicates where the user proposes the query. As we aim to verify whether location factor has an impact on retrieval results, we explictly encourage users enter queries containing  same keyword but different location information. Then the users determine a score for each returned resource based on relevance. Score one is given if the returned resource is relevant and zero otherwise.
\comment{
We randomly divide $S$ into 10 folds and run 10-fold cross-validation on it which means every time, one fold is used as test set $S_{test}$ and the rest 9 folds are train set $S_{train}$. After split is done, \textbf{LD$_\textrm{{MAP}}$} and \textbf{BPR-OPT} are performed based on $S_{train}$ to get the prediction. Then the recommendation quality is measured on $S_{test}$. Normalized discounted cumulative gain (NDCG) \cite{DBLP:journals/tois/JarvelinK02} is used as the performance metric. The NDCG@$N$ score is 
For every $(u,t,\l), \exists (u,t,\l,r)\in S_{test}$, we compute NDCG@$N$ over $\mathcal{TN}(u,t,\l,N)$ and report the average over all splits.}

\subsection{OTHER METHODS}
LSI, CubeLSI \cite{DBLP:conf/icde/BiLKC11}, TD \cite{DBLP:conf/uai/RendleFGS09} and PITF \cite{DBLP:conf/wsdm/RendleS10} methods are selected for comparison. 

LSI is a traditional IR method which uses SVD to identify patterns in the relationships between tags and concepts based on two-dimensional resource-tag matrix. Apparently, User and Location information has been ignored by LSI. Thus, we can use LSI as a baseline to see whether including user and location dimension is helpful to improve the retrieval results. 

CubeLSI is essentially the same as LSI except that CubeLSI incorporates user dimension to generate a third-order tensor and performs tensor decomposition, which can be seen as a higher-order SVD. More details on CubeLSI can be got in  \cite{DBLP:conf/icde/BiLKC11}.

Tucker Decomposition (TD) is a factorial model which has been proposed to solve retrieval problems. \cite{DBLP:conf/uai/RendleFGS09} also incorporates user information and makes use of the third-order tensor.  \cite{DBLP:conf/uai/RendleFGS09} is a learning-to-rank method which tries to use MAP based on some observation to optimize model parameters. Readers are referred to \cite{DBLP:conf/uai/RendleFGS09} for more details on it.

PITF is a modification of TD to improve the efficiency since TD has high time complexity, which makes it not applicable for large-scale data. In contrast to TD, PITF just models the two-way interactions between users, tags and resources. More details of PITF can be found in \cite{DBLP:conf/wsdm/RendleS10}.



All the above methods don't utilize location information. However, location dimension has been claimed to be useful to improve the retrieval results in this paper. Thus, it is interesting to compare our methods, which employ location dimension, against the above ones, which don't.
Our algorithms are \ALGA\ and \ALGB\, which utilize four-dimensional tensor including location dimension. Both of them perform \textbf{LD$_\textrm{{MAP}}$} initially, then run \textbf{BPR-OPT} using 4-dimensional TD and PITF models respectively. In the following experiments, the hyper-parameters are $\alpha=0.05$ and $\lambda_{\theta}=5\cdot20^{-5}$. Initialize $\mathcal{U}$ and $\boldsymbol{\Sigma}$ with K-means \cite{journals/tit/Lloyd82} results and $\varphi_k=\frac{|cluster(k)|}{|\mathbf{X}|}$. Parameters of TD and PITF are initialized with $\mathcal{N}(0,0.01)$.

\begin{figure*}
\subfigure[Flickr]{\label{fig1a}\includegraphics[width=0.45\textwidth]{FNN-crop}}
\subfigure[Picasa]{\label{fig1a}\includegraphics[width=0.45\textwidth]{PNN-crop}}
\caption{NDCG$@N$ Comparison over 2 Datasets}\label{fig:1}
%\end{figure*}
%\begin{figure*}
\subfigure[Flickr]{\label{fig2a}\includegraphics[width=0.45\textwidth]{FPN-crop}}
\subfigure[Picasa]{\label{fig2a}\includegraphics[width=0.45\textwidth]{PPN-crop}}
\caption{$P@N$ Comparison over 2 Datasets}\label{fig:2}
%\begin{figure*}
\subfigure[Flickr]{\label{fig3a}\includegraphics[width=0.45\textwidth]{FMRR-crop}}
\subfigure[Picasa]{\label{fig3a}\includegraphics[width=0.45\textwidth]{PMRR-crop}}
\caption{$MRR$ Comparison over 2 Datasets}\label{fig:3}
%\end{figure*}
\end{figure*}
\subsection{Comparison Results}
Figures~\ref{fig:1}-\ref{fig:3} illustrate NDCG$@N$, $P@N$ and $MRR$ scores for the 6 methods over two datasets. From Figures~\ref{fig:1}-\ref{fig:3}, it is clear that our algorithms consistently outperform all the other compared methods on both 2 datasets in terms of all the three metrics. Further more, \ALGA\ and \ALGB\ suppress TD, PITF and CubeLSI by more than 5\%. Moreover, from the comparisons of \ALGA\ with TD and \ALGB\ with PITF, we can see that incorporating location information brings great improvement in retrieved resources quality. As a baseline, LSI has a comparatively low prediction quality without using additional information. TD, PITF and CubeLSI make use of users information and get improvements. Our algorithms \ALGA\ and \ALGB\ further integrate location information and improve performance notably.
As for \ALGA\ and \ALGB\ , it may be surprising that these 2 algorithms have approximate performance on both 2 datasets since \ALGB\ seems to lose some information or relationship among $U$, $T$, $L$ and $R$. This result implies that \ALGB\ does not sacrifice performance quality for speeding up.

Figure~\ref{fig:5} gives a concrete example of comparison among returned resources. One user submits the same keyword to search but the location is different. We perform \ALGA\ on dataset Flickr and get the returned resources for each query (keyword location pair). Figure~\ref{fig:5} shows the returned results which are ranked 1 for each query. For example, for query (sand, Hawaii), \ALGA\ returns resources about beach and sand. On the other hand, \ALGA\ recommends resources with tags sand and dune when the query is conducted in Morocco. A photo about Marina Bay Sands is returned when the query happens in Singapore. From the user study results, we find that the user gives high score for these three results. This example coincides with what we have claimed, which is location information does has an impact on the retrieval results and it can improve the retrieval performance.

\subsection{Parameter Sensitivity}
Finally, as number of factorization dimensions is an important factor  for performance, Figure~\ref{fig:4} shows some test results of factorization dimension impact on performance over dataset Flickr. We run LPITF with an increasing number of factorization dimensions $k$ from 16 to 128. Figure~\ref{fig:4} indicates that ranking quality converges at 64 dimensions as line of k being 128 overlaps line of k being 64. Thus, for the comparison experiments, we set $k$ to 64 for all the 6 methods.
\begin{figure}
\includegraphics[width=0.425\textwidth]{KNN-crop} 
\caption{Impact of Factorization Dimensions Over Flickr}\label{fig:4}
\end{figure}
\begin{figure*}
\centering
\includegraphics[width=0.825\textwidth]{final_example-crop} 
\caption{Retrieval Results Comparison Using \ALGA\ as Searching Method Over Flickr}\label{fig:5}
\end{figure*}
\comment{
\begin{figure}
\includegraphics[width=0.425\textwidth]{KN} 
\caption{Impact of Factorization Dimensions Over Flickr}\label{fig:2}
\end{figure}
}
\section{CONCLUSION AND FUTURE WORK}\label{conclusion}
In this paper, we consider the location information impact on the resources recommendation. We propose a tag driven location partition algorithm to divide the world into several regions for a given query. After location partition is done, we incorporate location dimension into some existing algorithms to predict the quality of resources for the given query. The experimental results illustrate that our approaches outperform all the other compared methods on both two real-world datasets.
In the future, we plan to study how to better deal with queries having more than one word since in this paper we simply consider a query as a tag no matter how many words it contains. We will apply our approaches to more social tagging systems to test their quality.

%\newpage 
\comment{
\subsubsection*{Acknowledgements} 
 
Use unnumbered third level headings for the acknowledgements title.
All acknowledgements go at the end of the paper.}
 \bibliographystyle{abbrv}
\bibliography{LRS}
\balancecolumns
% That's all folks!
\end{document}
