\documentclass[fleqn]{beamer}
%\beamertemplateshadingbackground{red!10}{blue!10}
\usepackage{beamerthemebars}
\usepackage{pgf,pgfarrows,pgfnodes,pgfautomata,pgfheaps,pgfshade}
\usepackage[fleqn]{amsmath}
%\usepackage{amssymb}

\usepackage{graphicx}

\title{Crawling the Web}
\author{Jun Zhang}
\date{\today}

\pgfdeclareimage[height=.6cm]{logo}{webcrawler}
\pgfdeclareimage[width=8.5cm]{net-graph}{net-graph}
\pgfdeclareimage[width=5.5cm]{basic-crawler}{basic-crawler-en}
\pgfdeclareimage[width=6.5cm]{parallel-crawler}{parallel-crawler}
\pgfdeclareimage[width=8.5cm]{diff-focused-general}{diff-focused-general}
\pgfdeclareimage[width=7.5cm]{taxonomies}{taxonomies}
\pgfdeclareimage[width=8.5cm]{context-graph}{context-graph}

\logo{\pgfuseimage{logo}}

\begin{document}
\frame{\titlepage}
\section*{Outline}

\frame{\tableofcontents}

\section[Motivation]{Motivation: Gathering Data}
\frame
{
  \frametitle{Motivation: Gathering Data}
  \begin{itemize}
    \item Build the database for search engine or IR system
    \item Get information from other parts of an organization
    \item Get information from web sites
    \item Build a corpus of documents for a particular task
    \item Build a large corpus for text mining
  \end{itemize}
}

\section{Introduction}
\frame
{
  \frametitle{Web is a Big Graph}
  \begin{figure}
    \centering
    \pgfuseimage{net-graph}
  \end{figure}
}

\frame
{
  \frametitle{Web is a Big Graph}
  \begin{itemize}
  \item  Use graph to model the web
    \begin{itemize}
    \item Web pages $p$ can be viewed as nodes in the graph
    \item Hyperlinks $(p,q,a)$ can be viewed as
      edges in the graph
    \end{itemize}
  \item Issues in gathering data from the web
    \begin{itemize}
    \item The web is large and dynamic
    \item How to select interested sub graphs     
    \end{itemize}
  \end{itemize}
}

\section{General Crawler}
\frame
{
  \frametitle{Crawl ``All'' Web Pages}
  \begin{itemize}
    \item Problem: How to visit each node in the graph once and only once
    \item Solution: employ a breadth first search algorithm
      \begin{enumerate}
        \item Start from some seed URLs
        \item Fetch them and scan for new out-linking URLs
        \item Fetch these newly founded pages in turn 
        \item and so on \ldots
      \end{enumerate}
  \end{itemize}
}

\subsection[A Simple One]{Implement a Simple Web Crawler}

\frame
{
  \frametitle{Simple General Crawler}
  \begin{figure}
    \centering
    \pgfuseimage{basic-crawler}
  \end{figure}  
}

\frame
{
  \frametitle{Components Need to be Implemented}
  \begin{itemize}
    \item {\large Frontier:} the to-do list of crawler
    \item {\large History:} shows the path of crawler
    \item {\large Page Repository:} store and organize the pages fetched
    \item {\large HTTP client:} fetching a page (attention to the {\em
      Robot Exclusion Protocol} and spider traps)
    \item {\large HTML parser:} URLs extraction and canonicalization
  \end{itemize}  
}

\subsection[Known Crawlers]{The Known Crawler Implementations}

\frame
{
  \frametitle{The Google Crawler}
  \begin{itemize}
    \item Mentioned a little in an early paper
    \item A single URL server
    \item A set of crawlers
    \item Each crawler managed a set of open connections in parallel
  \end{itemize}
}

\frame
{
  \frametitle{Compaq's Mercator}
  \begin{itemize}
    \item {\large Distributed:} running in multiple machines
    \item {\large Scalable:} be able to cope with a rapidly growing web
    \item {\large Polite:} do not overload web servers
    \item {\large Continuous:} a priority-based mechanism for schedule URL downloads
    \item {\large Extensible:} a component-based architecture
    \item {\large Portable:} written in Java, running on every platforms in theory
  \end{itemize}
}


\frame
{
  \frametitle{Parallel Crawler}
  \begin{figure}
    \centering
    \pgfuseimage{parallel-crawler}
  \end{figure}
}

\section{Focused Crawler}


\frame
{
  \frametitle{What is Focused Crawling?}
  \begin{itemize}
    \item The web is too large to crawl in time
    \item Try to crawl within a specific topic
      \begin{itemize}
        \item Store only pages ``relevant'' to the topic
        \item Avoid crawling irrelevant pages
      \end{itemize}
  \end{itemize}

  \begin{figure}
    \centering
    \pgfuseimage{diff-focused-general}
    %    \caption{ \tiny{The difference between focused crawler and general crawler}}
  \end{figure}
  
%  The difference between focused crawler and general crawler 
}

\subsection{Specify Topics}

\frame
{
  \frametitle{Specify Topics}
  Employ an taxonomy tree to partition the web
  \begin{figure}
    \centering
    \pgfuseimage{taxonomies}
    %    \caption{ \tiny{The difference between focused crawler and general crawler}}
  \end{figure}
  \begin{itemize}
    \item Each node represents a topic, example pages are also provided
    \item The edge in the tree represents subtopic relation
    \item Users mark ``good'' topics in the tree
  \end{itemize}
}

\subsection[Known Work]{Known Work in the Academic}

\frame
{
  \frametitle{Score URLs in Frontier}
  \begin{itemize}
    \item Estimate URLs using the known information
      \begin{itemize}
        \item URL in an relevant page always points to an relevant
          page
        \item Surrounding text of an URL is very useful
        \item Citation of a page can be used in finding authority pages and hubs
      \end{itemize}
    \item Sort URLs in frontier by the scores of each URL
    \item Frontier is currently a priority queue
    \item No support for ``{\em tunneling}''
      \begin{itemize}
      \item {\em Tunneling:} a crawler reaches some relevant page on a
        path which does not only consist of relevant pages.
      \item Such paths will be pruned
      \end{itemize}
  \end{itemize}
}

\frame
{
  \frametitle{Naive Best First Crawling}
  Use the similarity between a page $p$ and topic $t$ to score URLs in $p$

  \begin{equation}
    sim(p,t)=\frac{\sum_{k \in p\cap t}f_{k,p}f_{k,t}}{\sqrt{\sum_{k \in p}{f_{k,p}^2}\sum_{k \in t}{f_{k,t}^2}}}
  \end{equation}
  \begin{itemize}
    \item $f_{k,d}$ is the term frequency of $k$ in $d$
    \item for each $(p,q_i,a_i)$ in $p$ : $score(p,q_i,a_i)=sim(p,t)$
  \end{itemize}
}

\frame
{
  \frametitle{SharkSearch}
  Considering the surrounding text of an URL: $l=(p,q,a)$ and
  inherited scores from ancestors: $inherited(l)$

\begin{displaymath}
  score(l) = \gamma\cdot inherited(l)+(1-\gamma)\cdot neighborhood(l)
\end{displaymath}
\begin{displaymath}
  inherited(l) =
  \begin{cases}
    \delta\cdot sim(p,t),      &\text{if $sim(p,t)>0$;}\\
    \delta\cdot inherited(p),  &\text{otherwise.}
  \end{cases}
\end{displaymath}
\begin{displaymath}
  context(l) =
  \begin{cases}
     1,                   &\text{if $anchor(l)>0$;}\\
     sim(t,aug\_context), &\text{otherwise.}
  \end{cases}
\end{displaymath}
\begin{displaymath}
  neighborhood(l)=\beta\cdot anchor(l)+(1-\beta)\cdot context(l)
\end{displaymath}


%%   \begin{align}
%%     &score(l) = \gamma\cdot inherited(l)+(1-\gamma)\cdot neighborhood(l)\\
%%     &inherited(l) =
%%   \begin{cases}
%%     \delta\cdot sim(p,t),      &\text{if $sim(p,t)>0$;}\\
%%     \delta\cdot inherited(p),  &\text{otherwise.}
%%   \end{cases}\\    
%%     &context(l) =
%%   \begin{cases}
%%      1,                   &\text{if $anchor(l)>0$;}\\
%%      sim(t,aug\_context), &\text{otherwise.}
%%   \end{cases}\\
%%   neighborhood(l)=\beta\cdot anchor(l)+(1-\beta)\cdot context(l)
%%   \end{align}
}

\frame
{
  \frametitle{PageRank}
  \begin{itemize}
    \item Used by Google to rank the pages retrieved
    \item Guide the crawlers and evaluate the pages
    \item Can find authority pages and hubs in the web
  \end{itemize}
  For page $p$, URLs $(q_1,p,a_1),(q_2,p,a_2),\ldots,(q_n,p,a_n)$ exist, then
$$
    IR(p)=(1-d) + d(IR(q_1)/c_1+IR(q_2)/c_2+\cdots+IR(q_n)/c_n)
$$
}



\frame
{
  \frametitle{HITS}

$$
Authority(p) = \sum_{(q,p)\in E}Hub(q)
$$
$$
Hub(p) = \sum_{(q,p)\in F}Authority(q)
$$
}



\frame
{
  \frametitle{Reinforcement Learning and Focused Crawling}
  \begin{itemize}
    \item Reinforcement Learning:
      \begin{itemize}
        \item $s\in \mathcal{S}$: the set of states
        \item $a\in \mathcal{A}$: the set of actions
        \item $T:\mathcal{S}\times \mathcal{A}\rightarrow \mathcal{S}$: a state-action transform function
        \item $R:\mathcal{S}\times \mathcal{A}\rightarrow\mathfrak{R}$: a reward function
        \item Learning a policy $\pi:\mathcal{S}\rightarrow\mathcal{A}$ to maximize the over time reward
      \end{itemize}
    \item Focused Crawling
      \begin{itemize}
        \item The pages relevant to topic are rewards
        \item Pick which URL from frontier is the action
        \item The goal is to find as many relevant pages as possible
      \end{itemize}
  \end{itemize}
}

\frame
{
  \frametitle{Focused Crawling by a Classifier and a Distiller}
  \begin{itemize}
    \item A taxonomy tree is given at the beginning
    \item Example URLs are given by users to show their interests
    \item Classifier is used to assign the downloaded pages to the node of the tree
    \item Distiller is used to find good hubs      
  \end{itemize}
}

\frame
{
  \frametitle{Context Focused Crawler}
  A Naive Bayes classifier is trained to estimate link distance
  \begin{enumerate}
  \item Build the {\em Context Graph } for each seed URL by the help of a search engine
  \item Combine the context graphs to get {\em Merged Context Graph}
  \item Learn a classifier for each layer in the graph
  \item When crawling, use the classifiers to decide is it worth to
    expand a path from a node
  \end{enumerate}
}
\frame
{
  \frametitle{A Context Graph}
  \begin{figure}
    \pgfuseimage{context-graph}
  \end{figure}
}


\section[Evaluation]{Evaluation of Focused Crawlers}
\frame
{
  \frametitle{Evaluation of Focused Crawlers}
  \begin{itemize}
    \item Page importance
      \begin{itemize}
        \item Keywords in document
        \item Similarity to the topic
        \item Classifier score
        \item Retrieval system rank
        \item Link-based popularity
      \end{itemize}
    \item Summary analysis
      \begin{itemize}
        \item Recall is hard to measure(the Web is the collection)
        \item Average relevance (something like precision)
        \item Target recall
          $$\text{target\_recall}=\frac{|\mathcal{P}_t\cap\mathcal{P}_c|}{|\mathcal{P}_t|}$$
      \end{itemize}
  \end{itemize}
}
\frame
{
  \begin{center}
    {\huge Thanks!}
  \end{center}
}
\end{document}
