%%%%%%%%
% Amitabh: fixed to 11pt, lettersize for PODC 2012. 1 title page + 10 pages + references + appendix
%%%%%%%%

% Amitabh, May 2: Typesetting to ACM al style for PODC Brief Announcement

% sig-alternate.tex

% Alternate ACM SIG Proceedings document using LaTeX2e

% Author: G.K.M. Tobin / Gerry Murray

% based upon LaTeX2.09 Guidelines, 9 June 1996

% Revisions:  1 September 1999

%             21 October 1999

%             1 July 2000


\documentclass{sig-alt-release2}

%\begin{document}
%\documentclass[11pt]{article}
%\documentclass{sig-alternate}
\usepackage{algorithm}
\usepackage{algorithmic}
\usepackage{subfigure}
\usepackage{epsfig,amsthm,amsmath,color, amsfonts}
%\usepackage{epsfig,color}

% Added: Amitabh

%\usepackage{graphicx,subfigure,geometry,boxedminipage}
\usepackage{graphicx,subfigure,boxedminipage}
%\usepackage{tikz}
\usepackage{paralist}
%\usepackage{boxedminipage}
\usepackage{setspace}

\newcommand{\xxx}[1]{\textcolor{red}{#1}}
%\usepackage{fullpage}
\usepackage{framed}

%\usepackage{epsf}
%\usepackage{hyperref}



%%Amitabh: The setlength commmands below seem to be conflicting with the geometry command below, selecting geometry for now

%\setlength{\textheight}{9.4in} \setlength{\textwidth}{6.55in}
%\setlength{\textheight}{9.2in} \setlength{\textwidth}{6.55in}
%\setlength{\topmargin}{0in}

%\voffset=-0.9in
%\hoffset=-0.8in
%

%sets the geometry to 1 in margins as asked by IPDPS/ PODC
%\geometry{left=1in,right=1in,top=1in,bottom=1in}



\DeclareGraphicsExtensions{.pdf}


%%% Copied from http://www.terminally-incoherent.com/blog/2007/02/04/latex-making-floatng-text-boxes/
%%%%

%    \renewcommand{\topfraction}{0.9}
%    \renewcommand{\bottomfraction}{0.8}
%    \setcounter{topnumber}{2}
%    \setcounter{bottomnumber}{2}
%    \setcounter{totalnumber}{4}
%    \setcounter{dbltopnumber}{2}
%    \renewcommand{\dbltopfraction}{0.9}
%    \renewcommand{\textfraction}{0.07}
%    \renewcommand{\floatpagefraction}{0.7}
%% floatpagefraction MUST be less than topfraction !!
%    \renewcommand{\dblfloatpagefraction}{0.7}

%%%
%%%

\renewcommand{\algorithmiccomment}[1]{(#1)}



\newtheorem{theorem}{Theorem}[section]
%\newtheorem{definition}[theorem]{Definition}
\newtheorem{corollary}[theorem]{Corollary}
\newtheorem{proposition}[theorem]{Proposition}
\newtheorem{lemma}[theorem]{Lemma}
\newtheorem{claim}[theorem]{Claim}
\newtheorem{conjecture}[theorem]{Conjecture}
%\newtheorem{example}[theorem]{Example}
\newtheorem{remark}[theorem]{Remark}
\theoremstyle{definition}\newtheorem{example}[theorem]{Example}
\theoremstyle{definition}\newtheorem{definition}[theorem]{Definition}
\theoremstyle{observation}\newtheorem{observation}[theorem]{Observation}

\newcommand{\comment}[1]{}
\newcommand{\QED}{\mbox{}\hfill \rule{3pt}{8pt}\vspace{10pt}\par}
%\newcommand{\eqref}[1]{(\ref{#1})}
\newcommand{\theoremref}[1]{(\ref{#1})}
\newenvironment{proof1}{\noindent \mbox{}{\bf Proof:}}{\QED}
%\newenvironment{observation}{\mbox{}\\[-10pt]{\sc Observation.} }%
%{\mbox{}\\[5pt]}

\def\m{{\rm min}}
%\def\m{\bar{m}}
\def\eps{{\epsilon}}
\def\half{{1\over 2}}
\def\third{{1\over 3}}
\def\quarter{{1\over 4}}
\def\polylog{\operatorname{polylog}}
\newcommand{\ignore}[1]{}
\newcommand{\eat}[1]{}
\newcommand{\floor}[1]{\left\lfloor #1 \right\rfloor}
\newcommand{\ceil}[1]{\left\lceil #1 \right\rceil}

\newcommand{\algorithmsize}[0]{}

\newcommand{\cF}{{\mathcal{F}}}
\newcommand{\cR}{{\mathcal{R}}}

%---------------------
%  SPACE SAVERS
%---------------------

%\usepackage{times}
%\usepackage[small,compact]{titlesec}
%\usepackage[small,it]{caption}

\newcommand{\squishlist}{
 \begin{list}{$\bullet$}
  { \setlength{\itemsep}{0pt}
     \setlength{\parsep}{3pt}
     \setlength{\topsep}{3pt}
     \setlength{\partopsep}{0pt}
     \setlength{\leftmargin}{1.5em}
     \setlength{\labelwidth}{1em}
     \setlength{\labelsep}{0.5em} } }
\newcommand{\squishend}{
  \end{list}  }

%---------------------------------
% FOR MOVING PROOFS TO APPENDIX
%\usepackage{answers}
%%\usepackage[nosolutionfiles]{answers}
%\Newassociation{movedProof}{MovedProof}{movedProofs}
%\renewenvironment{MovedProof}[1]{\begin{proof}}{\end{proof}}

\def\e{{\rm E}}
\def\var{{\rm Var}}
\def\ent{{\rm Ent}}
\def\eps{{\epsilon}}
\def\lam{{\lambda}}
\def\bone{{\bf 1}}

\newcommand{\degree}{\mathrm{degree}}
\newcommand{\algoOne}{\textbf{Algo 1}}
\newcommand{\algoTwo}{\textbf{Algo 2}}
\newcommand{\G}{\mathrm{G}}
\newcommand{\V}{\mathrm{V}}
\newcommand{\E}{\mathrm{E}}
\newcommand{\h}{\mathrm{h}}
%\newcommand{\den}{\mathrm{den}}
\newcommand{\den}{\rho}

%------------------------
% Comments
%------------------------


%\def\danupon#1{\marginpar{$\leftarrow$\fbox{D}}\footnote{$\Rightarrow$~{\sf #1 --Danupon}}}
%\def\amitabh#1{\marginpar{$\leftarrow$\fbox{AT}}\footnote{$\Rightarrow$~{\sf #1 --Amitabh}}}
%\def\atish#1{\marginpar{$\leftarrow$\fbox{ADS}}\footnote{$\Rightarrow$~{\sf #1 --Atish}}}


\def\danupon#1{}
\def\amitabh#1{}
\def\atish#1{}



\begin{document}


\conferenceinfo{PODC'12,} {July 16--18, 2012, Madeira, Portugal.} 
\CopyrightYear{2012} 
\crdata{978-1-4503-1450-3/12/07} 
\clubpenalty=10000 
\widowpenalty = 10000

\title{Brief Announcement: Maintaining Large Dense Subgraphs on Dynamic Networks}


%\author{
%Atish {Das Sarma}\thanks{eBay Research Labs, eBay Inc., San Jose, CA, USA. E-mail: {\tt atish.dassarma@gmail.com}}
%%%
%\and Ashwin Lall\thanks{Department of Mathematics and Computer Science, Denison University, Granville, OH, USA.}
%%%
%\and Danupon Nanongkai\thanks{Theory and Applications of Algorithms Research Group, University of Vienna, Vienna, Austria. Email: {\tt danupon@gmail.com}}
%%%
%\and Amitabh Trehan\thanks{Information Systems group, Faculty of Industrial Engineering and Management, Technion - Israel Institute of Technology, Haifa, Israel - 32000. \hbox{E-mail}:~{\tt amitabh.trehaan@gmail.com}
%Supported by a Technion fellowship.
%}
%}


\numberofauthors{4} %  in this sample file, there are a *total*
% of EIGHT authors. SIX appear on the 'first-page' (for formatting
% reasons) and the remaining two appear in the \additionalauthors section.
%
\author{
% You can go ahead and credit any number of authors here,
% e.g. one 'row of three' or two rows (consisting of one row of three
% and a second row of one, two or three). 
%
% The command \alignauthor (no curly braces needed) should
% precede each author name, affiliation/snail-mail address and
% e-mail address. Additionally, tag each line of
% affiliation/address with \affaddr, and tag the
% e-mail address with \email.
%
% 1st. author
\alignauthor
Atish Das Sarma\\
%\titlenote{Dr.~Trovato insisted his name be first.}\\
       \affaddr{eBay Research Labs,}\\
       \affaddr{eBay Inc.}\\
       \affaddr{ San Jose, CA, USA.}\\
       \email{atish.dassarma@gmail.com}
% 2nd. author
\alignauthor
Ashwin Lall\\
%\titlenote{The secretary disavows
%any knowledge of this author's actions.}\\
       \affaddr{Department of Mathematics and Computer Science}\\
       \affaddr{Denison University,}\\
       \affaddr{Granville, OH, USA.}\\
       \email{lalla@denison.edu}
% 3rd. author
\alignauthor Danupon Nanongkai\\
%{\Large{\sf{\o}}}rv{$\ddot{\mbox{a}}$}ld\titlenote{This author is the
%one who did all the really hard work.}\\
       \affaddr{Theory and Applications of Algorithms Research Group}\\
       \affaddr{University of Vienna,}\\
       \affaddr{Vienna, Austria.}\\
       \email{danupon@gmail.com}
\and  % use '\and' if you need 'another row' of author names
% 4th. author
\alignauthor Amitabh Trehan\\
       \affaddr{Information Systems group,}\\
       \affaddr{Faculty of Industrial Engineering and Management,}\\
       \affaddr{Technion, Haifa, Israel}\\
       \email{amitabh.trehaan@gmail.com}
}
\date{}


%\begin{titlepage}


%\maketitle \thispagestyle{empty}

%Title candidates: ``Maintaining a large dense subgraph on dynamic networks''
%Title candidates: ``Randomized distributed algorithms for discovering dense subgraphs''

%Version for registration
%\input{abstractregistration}

%% Version for final submission

%\begin{abstract}
%We show that we can maintain an approximated densest subgraph of size at least $k$ in the dynamic networks (edge deletion/addition model).
%\end{abstract}

%\vspace*{.4in}

%\begin{center}
%{\bf Submitted for a Regular Presentation}
%\end{center}

%\end{titlepage}

\maketitle

%\input{abstract}


\begin{abstract}


In  distributed networks, some groups of nodes may have more inter-connections, perhaps due to their larger bandwidth availability or communication requirements. In many scenarios, it may be useful for the nodes to know if they form part of a dense subgraph, e.g., such a dense subgraph could form a high bandwidth backbone for the network.
In this work, we address the problem of self-awareness of nodes in a dynamic network with regards to graph density, i.e., we give distributed algorithms for maintaining dense subgraphs (subgraphs that the member nodes are aware of). The only knowledge that the nodes need is that of the \emph{dynamic diameter} $D$, i.e., the maximum number of rounds it takes for a message to traverse the dynamic network. For our work, we consider a model where the number of nodes are fixed, but a powerful adversary can add or remove a limited number of edges from the network at each time step. The communication is by broadcast only and  follows the CONGEST model in the sense that only messages of $O(\log n)$ size are permitted, where $n$ is the number of nodes in the network.

Our algorithms are continuously executed on the network, and at any time (after some initialization) each node will be aware if it is part (or not) of a particular dense subgraph. We give algorithms that approximate both the \emph{densest subgraph}, i.e., the subgraph of the highest density in the network, and the \emph{at-least-$k$-densest subgraph} (for a given parameter $k$), i.e., the densest subgraph  of size at least $k$. We give a ($2 + \epsilon$)-approximation algorithm for the densest subgraph problem. The at-least-$k$-densest subgraph is known to be NP-hard for the general case in the centralized setting and the best known algorithm gives a 2-approximation. We present an algorithm that maintains a ($3 + \epsilon$)-approximation in our distributed, dynamic setting. Our algorithms run in $O(D\log_{1+\epsilon} n)$ time.

%  where $n$ is the number of nodes in the network.

%Our algorithms rely upon network estimation techniques to get quick and reliable estimation (with suitable probabilistic guarantees) of number of nodes and edges in the network; here, we introduce  a suitable estimation technique, and also note that our algorithms are flexible to be adapted to such models where suitable estimation techniques are available. We then show by clever probabilistic arguments how our algorithms achieve the stated approximation guarantees.


% Maintaining density is a notion that is clearly motiva	ted by the fact that i

%We show that we can maintain an approximated densest subgraph of size at least $k$ in the dynamic networks (edge deletion/addition model).
\end{abstract}

\section*{ACM Classification} C.2.1 [\textbf{Computer Systems Organization}]: Computer-Communication Networks: Network
Architecture and Design- \emph{Distributed networks, Network communications, Wireless
communication}; C.2.4 [\textbf{Computer Systems Organization}]: Distributed Systems; C.4 [\textbf{Computer Systems Organization}]: Performance of
Systems- \emph{Reliability, availability, and serviceability}; 
%E.1 [\textbf{Data}]: Data Structures - \emph{Distributed data structures, Graphs and networks, Trees};
 H.3.4  [\textbf{Information Systems}]: Information Storage and
Retrieval: Systems and Software - \emph{Distributed systems, Information networks}
%\section*{General Terms} Algorithms, Design, Reliability, Security, Theory

\section*{Keywords} Dynamic networks, Distributed, Graph density, Subgraph density, Approximate, Probablistic, Estimation, aggregation
% Self-healing, reconfigurable, responsive, distributed,  networks, peer-to-peer, data structure, half-full tree, stretch, degree

\section{Introduction}\label{sec:intro}

%Note: We shouldn't forget to look at this paper (SODA'12) Polynomial integrality gaps for strong SDP relaxations of Densest k-Subgraph
%Aditya Bhaskara, Moses Charikar, Venkatesan Guruswami, Aravindan Vijayaraghavan and
%Yuan Zhou
% Added this in related work - Atish

Density is a very well studied graph property with a wide range of applications stemming from the fact that it is an excellent measure of the strength of inter-connectivity between nodes. While several variants of graph density problems and algorithms have been explored in the classical setting, there is surprisingly little work that addresses this question in the distributed computing framework. This paper focuses on decentralized algorithms for identifying dense subgraphs in dynamic networks.

% networks, in particular, on decentralized algorithms for arbitrary (perhaps dynamic) networks.

Finding dense subgraphs has received a great deal of attention in graph algorithms literature because of the robustness of the property. The density of a subgraph only gradually changes when edges come and go in a network, unlike other graph properties such as connectivity that are far more sensitive to graph perturbation. Density measures the {\em strength} of a set of nodes by the graph induced on them from the overall structure. The power of density lies in locally observing the strength of {\em any} set of nodes, large or small, independent of the entire network.


%\atish{All the below seem to suggest we want to do something with the dense subgraph. I think it is more important for this paper to stress the fact that finding dense subgraphs is a very well studied fundamental graph property that has received little attention in terms of distributed computation. We should avoid suggesting one specific use of the dense subgraph (since specifically in the dynamic setting this subgraph can keep changing). We should just highlight that this is an interesting algorithmic question and intuitively stress on the difficulties of quickly computing dense subgraphs (both in static and dynamic graphs) in the congest model. }
%\paragraph{Motavations} Say something to motivate the study of densest subgraph problem. Possible motivations.
%\begin{itemize}
%\item Want to use densest-at-least-k subgraph to be a backbone? But may not be a good backbone
%\item Want to make sure that the network is still dense. Want to be able to alert the admin when it's not dense anymore. (In some tasks it is ok that the whole network is not dense but we want a large part of it to be dense to guarantee a good communication; e.g., we may not need all the machine to talk to each other fast but some of them should so that those machines can carry a communication-intensive task.)
%\item Detect an unusual communication. Dense = too much communication. This could be an indicator of virus, users service failure, etc.
%\item Break the network into dense components. (``Engineering the network''.) ... Find one dense component, remove and find it again. ... Maybe our algorithm will output all components with some specified density (since we are maintaining various subgraphs of various density)?
%\item Natural computing? Biology? (Link Amitabh: {\tt http://compbio.cs.uic.edu/}. She does work on Zebra network....) Also see Leslie Valiant's work.
%\item Break into many subgraphs each of which is internally dense
%\end{itemize}

%Expansion, on the other hand measures the connection between a set of nodes and the rest of the network. Therefore, density constraints would be able to assess and better maintain the strength between specified (or all) subsets of nodes (even though there are exponential such sets) in a more robust manner.
Consider, for example, a distributed network that requires a small set of nodes, say {\em hubs}, that are treated as central and can be used as a backbone for communication amongst them. It is conceivable and even likely that they would incur a larger communication interaction between them, and therefore demand larger connectivity structure, lower latency, and higher resilience to failures. Therefore, a peer-to-peer network would like to retain this structure, or at least identify such nodes, even as the graph evolves over time. 

%In such an application scenario, it would be crucial to know which nodes are most strongly connected amongst themselves.


% Not directly related to our paper---Ashwin
% In another scenario, it may be useful to just test whether the entire network that is dynamically changing is still dense. It may be required, for a variety of reasons, that the graph be dense. For instance, being able to compute the densest subgraphs of a given size constraint and identifying the associated nodes would facilitate raising an alarm if and when the required property changes with time. The nice aspect of our results is that they can capture such applications even if the requirement is not on the entire graph but only on specific size constraints on the graph. These techniques could perhaps even be useful in the future in cases where a partitioning of the graph into smaller dense components is required.

% While there are no algorithms formally presented even for the static CONGEST~\cite{peleg} model framework designed to reflect peer to peer networks, 

In this paper, we expand the static CONGEST~~\cite{peleg} model and consider a dynamic setting where the graph edges may change continually. We present algorithms for approximating the (at least size $k$) densest subgraph in a dynamic graph model to within constant factors. Our algorithms are not only designed to compute size-constrained dense subgraphs, but also track or maintain them through time, thereby allowing the network to be aware of dense subgraphs even as the network changes. They are fully decentralized and adapt well to rapid network failures or modifications. This gives the densest subgraph problem a special status among global graph problems 
%(the problems that need $\Omega(D)$ time on static networks)
: while most graph problems are hard to approximate in $o(\sqrt{n})$ time even on static networks \cite{DasSarmaHKKNPPW11}, the densest subgraph problem can be approximated quickly, even in the dynamic setting. Khuller and Saha~\cite{KS} considered the problem of finding densest subgraphs with size restrictions and showed that these are NP-hard. Khuller and Saha ~\cite{KS} and also Andersen and Chellapilla ~\cite{AC} gave constant factor approximation algorithms. Some of our algorithms are based off of those presented in~\cite{KS}.

%We now explain our model for dynamic networks, define density objectives considered in this paper, and state our results.

%associated results obtained in this work.


%\paragraph{Densest subgraph on edge deletion/insertion dynamic network} Define the problem. Say roughly what edge deletion/insertion dynamic network is.
%
%\paragraph{Results.}

% Amitabh: created a new file model.tex as input to this.


%\input model


%\subsection{Model}

\subsection{Model: Edge-dynamic distributed}
Here, we briefly describe our model. Consider an undirected, unweighted, connected $n$-node graph $G =
(V, E)$, with the vertices representing processors with unbounded computational power but having only local knowledge.
The communication is synchronous, and occurs in discrete pulses, called {\em rounds}. However, the nodes are only allowed to \emph{broadcast}, i.e., if they send a message in a round, they send the same message to every neighbor.  All the nodes wake up simultaneously at the beginning of round 1 and can only broadcast an arbitrary message of size $O(\log n)$ in each round, which is successfuly received in the same round. Our algorithms work in an edge dynamic network model i.e. an edge deletion/addition model.  We consider a sequence of (undirected) graphs $G_0, G_1, \ldots$ on $n$ nodes, where, for any $t$, $G_t$ denotes the state of the dynamic network $G(V,E)$ at time $t$, where the adversary deletes and/or inserts upto $r$ edges at each step, i.e.,  $E(G_{t+1})= (E(G_{t})\setminus E_U) \cup E_V$, where $E_U \subseteq E(G_{t})$ and $E_V \subseteq E(\overline{G_{t}})$, $|E_U| + |E_V| = r$.
% The edge change rate parameter $r$ is assumed to be constant.
 Also, following the notion in \cite{KuhnOM11}, we define the {\em dynamic diameter} of the dynamic network $G(V,E)$, denoted by $D$, to be the maximum time a message needs to traverse the network at any time. To measure the efficiency of our algorithms, we will concentrate on their running times in terms of number of rounds.



%Suppose that every node (vertex) hosts a processor with
%unbounded computational power (though our algorithms only use time and space polynomial in $n$ at each vertex), but with only local knowledge initially.
%Specifically, assume that each node is associated with a distinct identity number from the set $\{1, 2, . . . , n\}$. At the beginning of the computation, each node $v$ accepts as input its own identity number and the identity numbers of its neighbors in $G$.
%\amitabh{recheck the anonymous assumption in the end, as Danupon mentioned}

%Specifically, assume that the network is \emph{anonymous}, i.e., the nodes do not have IDs but they do know their degrees, i.e., number of neighbors. The nodes may accept some additional inputs as specified by the problem at hand. 


%allowed to communicate through the edges of the graph $G$.
% In our model, all the nodes wake up simultaneously at the beginning of round 1.
%, and from this point on the nodes always know the number of the current round \amitabh{Wonder if this assumption is needed. Leaving it in for now}.
% In each round
%each node $v$ is allowed to broadcast an arbitrary message of size
%$O(\log n)$
% through each edge $e = (v, u)$ that is adjacent to $v$,
%and this message will arrive at each neighbor at the end of the current round.
%Our model is similar to (but in fact, weaker than) the standard model of distributed computation known as the
%{\em CONGEST model}~\cite{peleg}. Our model is weaker since the CONGEST model assumes unique IDs, point-to-point communication, and unlimited computation power at nodes (none of which we need).
% The message size constraint is very important for large-scale resource-constrained dynamic networks where running time is crucial.
% Now, we state our dynamicity conditions:
%This work addresses the
%problem of computing random walks in a time-efficient manner.
%\paragraph{Edge-Dynamic Network Model}
%We use the edge deletion/addition model; i.e., we consider a sequence of (undirected) graphs $G_0, G_1, \ldots$ on $n$ nodes, where, for any $t$, $G_t$ denotes the state of the dynamic network $G(V,E)$ at time $t$, where the adversary deletes and/or inserts upto $r$ edges at each step, i.e.,  $E(G_{t+1})= (E(G_{t})\setminus E_U) \cup E_V$, where $E_U \subseteq E(G_{t})$ and $E_V \subseteq E(K_n)$, $|E_U| + |E_V| = r$ (where  $K_n$ is the complete graph on $n$ vertices). The edge change rate parameter $r$ is assumed to be constant.

%either $E(G_t)=E(G_{t+1})\setminus e$ for some $e\in E(G_{t+1})$, or $E(G_{t+1})=E(G_{t})\setminus e$ for some $e\in E(G_{t})$.
%Following the notion in \cite{KuhnOM11}, we define the {\em dynamic diameter} of the dynamic network $G(V,E)$, denoted by $D$, to be the maximum time a message needs to traverse the network at any time.
%%Let $D_{\max}$ be the maximum diameter of the network at all time, i.e., $D_{\max}=\max_t D(G_t)$.
%%We assume that the network is connected at all time.
%More formally, dynamic diameter is defined as follows:
%\begin{definition}[Dynamic Diameter (Adapted from \cite{KuhnOM11}, Definition 3)]
%%{\bf Dynamic Diameter (~\cite{KuhnOM11}, Definition 3)}
%We say that the dynamic network $G=(V,E)$ has a dynamic diameter of $D$ upto time $t$ if for all $t' \le t$ and $u,v \in V$, we have $(u,max\{0,t' - D\}) \leadsto (v,t')$, where $(x,t') \leadsto (y,t)$ implies that at time $t$, node $y$ can receive direct information, through a chain of messages, originating from node $x$ at time $t'$.
%\end{definition}

% Note that this definition of dynamic diameter allows the graph to become disconnected, as long as messages never take more than $D$ rounds to get from one node to another. We assume that the dynamic diameter $D$ bounded and known to all the nodes in the network.

%\danupon{Need a more formal definition. See \cite{KuhnOM11}. We should also cite Gopal's SODA paper. They also talk about dynamic diameter.}
%There are several measures of efficiency of distributed algorithms, but we will concentrate on one of them, specifically, {\em the running time}, that is, the number of rounds of distributed communication. (Note that the computation that is performed by the nodes locally is ``free'', i.e., it does not affect the number of rounds.)

%Note: Maybe, maintaining the network diameter at all time is also interesting (although it's quite easy).

%An alternative way to define the network and parameters: Let $T_c$ and $T_f$ to be a time to ``count'' and ``flood'' at any time in the course of the network change. We note that $T_c$ and $T_f$ could be bounded by $D_{max}$ which is the maximum diameter of the network at all time. (Should we define the model like this?)

%\amitabh{Adding a figure for the model. Modify as desired. We should probably combine the subsections. Also, sync the notations and exact model definition.}

%
%
%\begin{figure}[h!]
%\caption{The  distributed Edge Insert and Delete  Model.}
%\label{algo: model-general}
%\begin{boxedminipage}{\textwidth}
%{\fontsize{10}{10}\selectfont
%\begin{algorithmic}
%\STATE Each node of $G_0$ is a processor.
%\STATE Each processor starts with a list of its neighbors in $G_0$.
%\STATE Pre-processing: Processors may exchange messages with their neighbors.
%%send messages to and from their neighbors.
%\FOR {$t := 1$ to $T$}
%\STATE Adversary deletes and/or inserts upto $r$ edges at each step i.e.  $E(G_{t+1})=(E(G_{t})\setminus E_U) \cup E_V$, where $E_U \subseteq E(G_{t})$ and $E_V \subseteq E(K_n)$ $|E_V| + |E_U| = r$, forming $X_t$ (where $r$ is a constant and $K_n$ is a complete graph on $n$ vertices).
%\IF{edge $(u,v)$ is inserted or edge $(u,v)$ is deleted}
%\STATE Nodes $u$ and $v$ may update their information and exchange messages with their neighbors.
%\STATE {\bf Computation phase:}
%\STATE Nodes of $X_t$ may communicate (synchronously, in parallel)
%with their immediate neighbors. These messages are never lost or
%corrupted, may contain the names of other vertices, and are received by the end of this phase.
%\ENDIF
%%\IF{edge $(u,v)$ is deleted}
%%\STATE All neighbors of $v_t$ are informed of the deletion.
%%\STATE During this phase, each node may add edges
%%joining it to any other nodes as desired.
%%Nodes may also drop edges from previous rounds if no longer required.
%%\ENDIF
%\STATE At the end of this phase, we call the graph $G_t$.
%\ENDFOR
%\vspace{5pt}
%\hrule
%\STATE {\bf Success metrics:}
%%Minimize the following ``complexity'' measures: \COMMENT{Formally define here, and state as a minimization}
%%Consider the graph  $G'$ which is the graph consisting solely of the original nodes and insertions without regard to
%%deletions and healings. Graph $G'_{t}$ is $G'$ at timestep $t$ (i.e. after the $t^{\mathrm{th}}$ insertion or deletion).
%%Graph $G'_{t}$ is $G'$ at timestep $t$ which is equivalent to $G'_{t'}$ where the $t' \le t$ is
%%the timestep at which the latest insertion on or before $t$ occured.
%%\begin{enumerate}
%\begin{compactenum}
%%\item{\bf Graph properties/invariants.}
%\item{\bf Approximate Dense Subgraphs:} \emph{Graph $S'_T$:}
%% {\bf FORMALLY DEFINE HERE}%\amitabh{Formally define here}
% The induced graph of a set $S'_T \subseteq V_T$, s.t., $\den(S'_T) \ge \frac{\den(S^{*}_T)}{\delta}$, where  $S^{*}_T \subseteq V$, s.t., $\den(S^{*}_T) = \max \den(S_T)$ over all $S_T \subseteq V_T$.
%\item{\bf Approximate at-least-k-Dense Subgraphs:} \emph{Graph $S^{k}_T$:} The induced graph of a set  $S^k \subseteq V, |S^k| > k$, s.t., $\den(S^k) \ge \frac{\den(S^{k*})}{\delta}$, where   $S^{k*} \subseteq V, |S^{k*}| > k$, s.t., $\den(S^{k*}) = \max \den(S)$ over all $S \subseteq V, |S| > k$.
%
%%{\bf FORMALLY DEFINE HERE}%\amitabh{Formally define here}
%%\item{\bf k-core:}
%  %$\max_{v \in G} degree(v,G_T) / degree(v,G'_T)$
%%\item {\bf Network stretch.} $\max \left( (x,y) \in G_{t}, G_{t'}; t, t' <T, \distance_{t'}(x,y) / \distance_{t}(x,y)
%%\right)$
%%\item {\bf Network stretch.} $\max_{x, y \in G_{T}} \frac{dist(x,y,G_{T})}{dist(x,y,G'_{T})}$, where, for a graph $G$ and nodes $x$ and $y$ in $G$, $dist(x,y,G)$ is the
%%length of the shortest path between $x$ and $y$ in $G$.
%%For any pair of nodes $x$ and $y$, $ \distance(x,y,G_{T}) / \distance(x,y,G'_t)$
%\item{\bf Communication per node.} The maximum number of bits sent by a single node in a single recovery round. $\log n$ in CONGEST model.
%% \tom{Want to modify this or omit?}
%\item{\bf Computation time.} The maximum total time (rounds) for all nodes to compute their density estimations starting from scratch assuming it takes a message no more than $1$ time unit to traverse any edge and we have unlimited local computational power at each node. %{\bf CAN WE SEPERATE INTO WORST CASE AND AMORTIZED HERE} %\amitabh{Can we seperate into worst case and amortized here}
%%\end{enumerate}
%\end{compactenum}
%\end{algorithmic}
%} %end of setspace
%\end{boxedminipage}
%\end{figure}
%
%


%\amitabh{The line below is not too clear to me. We are not maintaining any property (we don't have a repair model), we are just discovering the dense graphs. Correct?}\\
%\amitabh{Maybe we should use both 'compute' and 'maintain': compute for the first time and maintain for maintaining the computed approximation. Making some  changes below.}
%We are interested in algorithms that can compute and maintain an approximate (at-least-$k$) densest subgraph 
%(e.g., diameter, connectivity, densest subgraph) 
%of the network at all times. 
%We say that an algorithm can compute and maintain a solution $P$ in time $T$ if it can compute the solution in $T$ rounds and can maintain a solution at all times after time $T$, even as the network changes dynamically.


\subsection{Problem definition}

%Densest subgraph, approximating densest subgraph, etc.\atish{Amitabh, this part is still incomplete - please add the definitions in.}

%\subsection{Preliminaries}
\noindent Let $\G =(\V,\E)$ be an undirected graph and $S \subseteq \V$ be a set of nodes. Let us define the following:\\
\noindent \textbf{Graph Density:}
The density of a graph $G(V, E)$ is defined as $|E|/|V|$.\\
\noindent \textbf{SubGraph Density:}
The density of a subgraph defined by a subset of nodes $S$ of $V(G)$ is defined as its induced density. We will use $\den(S)$ to denote the density of the subgraph induced by $S$. 
%Therefore, $\den(S) = \frac{|E(S)|}{|S|}$. Here $E(S)$ is the subset of edges $(u, v)$ of $E$ where $u\in S$ and $v\in S$.  In particular, when talking about the density of a subgraph defined by a set of vertices $S$ induced on $G$, we use the notation $\den_G(S)$. We also use $\den_t(S)$ to denote $\den_{G_t}(S)$. When clear from context, we omit the subscript $G$. Note that our definition corresponds to that of the function $g_{S}$ in~\cite{AndersenSODA08}.\\
The problem we address in this paper is to construct distributed algorithms to discover the following:
\begin{compactitem}
\item{\bf (Approximate) Densest subgraphs:} The densest subgraph problem is to find a set $S^{*} \subseteq V$, s.t. $\den(S^{*}) = \max \den(S)$ over all $S \subseteq V$. A $\alpha$-approximate solution $S'$ will be a set $S' \subseteq V$, s.t. $\den(S') \ge \frac{\den(S^{*})}{\alpha}$.
\item {\bf (Approximate) at-least-$k$-densest subgraphs:} The densest at-least-$k$-subgraph problem is the previous problem restricted to sets of size at least $k$, i.e., to find a set $S^{k*} \subseteq V, |S^{k*}| \geq k$, s.t.\ $\den(S^{k*}) = \max \den(S)$ over all $S \subseteq V, |S| \geq k$. A $\alpha$-approximate solution $S^k$ will be a set $S^k \subseteq V, |S^k| \geq k$, s.t. $\den(S^k) \ge \frac{\den(S^{k*})}{\alpha}$.

\end{compactitem}

\section{Algorithms and  Results}

The nature of our algorithms is such that we {\em continuously} maintain an approximation to the densest subgraph in the dynamic network, \emph{at all times}.  This means that, at all times (except for a short initialization period), all nodes are aware of whether they are part of the approximated at-least-$k$ densest subgraphs, for all $k$.
%At any time, after a short initialization period, any node knows whether it is a member of the output subgraph of our algorithm. 
 In particular, we give approximation algorithms for the densest and at-least-$k$-densest subgraph problems which are efficient even on dynamic distributed networks. 
 We develop algorithms that, for any $\epsilon > 0$, $(2+\epsilon)$-approximate the densest subgraph, and  $(3+\epsilon)$-approximate the at-least-$k$-densest subgraph, in $O(D \log_{1+\epsilon} n)$ time provided the density is high enough. Formally:

% We develop an algorithm that, for any $\epsilon > 0$, $(2+\epsilon)$-approximates the densest subgraph in $O(D \log_{1+\epsilon} n)$ time provided that the densest subgraph has high enough density,
% i.e., it has a density at least $(cDr\log n)/\epsilon$, for some constant $c$. 
%We also develop a $(3+\epsilon)$-approximation algorithm for the at-least-$k$-densest subgraph problem with the same running time, provided that the at-least-$k$-densest subgraph has high enough density
% provided that $k$ times the density of the at-least-$k$-densest subgraph is at least $(cDr\log n)/\epsilon$, for some constant $c$. This algorithm builds upon Algorithm~\ref{algo:maintain} by picking densest subgraph, checking for the possibility that they may not be large enough and adding extra nodes appropriately.
 %These results are formalized in Section~\ref{sec:approx}. 
 %In addition to computing the $(2+\epsilon)$-approximated densest and $(3+\epsilon)$-approximated at-least-$k$-densest subgraphs, our algorithm can also {\em maintain} them {\em at all times}. This means that, at all times (except for a short initialization period), all nodes are aware of whether they are part of the approximated at-least-$k$ densest subgraphs, for all $k$.


%\begin{theorem}
%In any edge deletion and insertion dynamic network, for any $\alpha>1$ and $k\geq D_{\max}\log_\alpha n$, we can maintain an $O(\alpha)$-approximated densest subgraph of size at least $k$ using $O(1)$-time per step.
%\end{theorem}
%
%Can we still say something if we don't have the at least $k$ constraint (I forgot)? Note that we can deal with smaller $k$ at an expense of approximation ratio as well (if we delete more node then we will finish faster). So, maybe we should state this more general theorem.
%
%
%\begin{theorem}[Another possibility]
%In any edge deletion and insertion dynamic network, for any $\alpha>1$, $\delta>1$, and $k\geq (D_{\max}\log_\alpha n)/\delta$, we can maintain a subgraph of size at least $k$ and density at least $\Omega(\delta/\alpha)$ using $O(1)$-time per step, as long as there is a subgraph of size at least $k$ and density at least $\delta$.
%\end{theorem}
%
%The second theorem gives a tradeoff between the size and density of the subgraph we want to maintain. This is a simple way to view the above theorems.

%We now state the main results of this paper.
%
%\begin{theorem}[Densest at-least-$k$ problem on static networks]
%For any $k$, we can compute %maintain
%an $O(1)$-approximated densest subgraph of size at least $k$ in $O(D\log n)$ time.
%\end{theorem}
%
%%{\bf Danupon:} I combined the statement for the Densest and Densest at least $k$ problems as they state the same thing (if we don't care the approximation ratio).
%
%%\amitabh{Need to restate the theorems below to adjust for notions of time, step and compute, maintain}
%
%\begin{theorem}[Densest at least $k$ problem on dynamic networks]
%In any edge deletion and insertion dynamic network, for any $k\geq D\log n$, we can maintain
% an $O(1)$-approximated densest subgraph of size at least $k$ using $O(1)$ time.
%\end{theorem}
%
%\begin{theorem}[Densest subgraph on dynamic networks]
%In any edge deletion and insertion dynamic network, we can maintain an $O(1)$-approximated densest subgraph of size at least $k$ using $O(1)$ time per step as long as the densest subgraph has density at least $D\log n$.
%\end{theorem}

% Our algorithms are described in Section~\ref{sec:algo} and the approximation guarantees are proved in Section~\ref{sec:approx}. 



%\section{Algorithm}\label{sec:algo}
%
%\subsection{Main Algorithm}
%\label{sec:main}
%
%The nature of our algorithm is such that we {\em continuously} maintain an approximation to the densest subgraph in the dynamic network. At any time, after a short initialization period, any node knows whether it is a member of the output subgraph of our algorithm. In this section, we give the description of the algorithm and fully specify the behavior of each of the nodes in the network. The analysis of the running time and the approximation guarantees are deferred to the following sections.
%
%Our main protocol for maintaining a dense subgraph is given in Algorithm~\ref{algo:maintain}. It maintains a family of  $p=O(\log_{1+\epsilon} n)$ candidates for the densest subgraph, along with an approximation of the number of nodes and edges in each graph. The algorithm works in phases in which it estimates the size of the current subgraph $V_j$ and the number of edges in it using the algorithms discussed in the following subsection. At the end of the phase it computes the next subgraph $V_{j+1}$ using a criterion in Line 9 of Algorithm~\ref{algo:maintain} (explained further in Section~\ref{sec:approx}). After $p$ such rounds, the algorithm has all the information it needs to output an approximation to the densest subgraph. This process is repeated continuously, and the solution is computed from the last complete family of graphs (i.e., complete computation of $p$ subgraphs). 
%



\begin{theorem}
There exists an algorithm that for any dynamic graph with dynamic diamter $D$ and parameter $r$ returns a subgraph at time $t$ such that, w.h.p., the density of the returned subgraph is a $(2+\epsilon)$-approximation to the density of the densest subgraph at time $t$ if the densest subgraph has density at least $\Omega(Dr\log nr)$.
\end{theorem}

\begin{theorem}
There exists an algorithm that for any dynamic graph with dynamic diamter $D$ and parameter $r$ returns a subgraph of size at least $k$ at time $t$ such that, w.h.p., the density of the returned subgraph is a $(3+\epsilon)$-approximation to the density of the densest at least $k$ subgraph at time $t$ if the densest at least $k$ subgraph has density at least $\Omega(Dr\log n/k)$.
\end{theorem}



Further, our general theorems also imply the following for static graphs (by simply setting $r = 0$). No such results were known in the distributed setting even for static graphs. 

\begin{corollary}
In a static distributed graph, there is an algorithm that obtains, w.h.p., $(2+\epsilon)$-approximation to the densest subgraph problem in $O(D\log n)$ rounds of the CONGEST model.
\end{corollary}

\begin{corollary}
In a static distributed graph, there is an algorithm that obtains, w.h.p, $(3+\epsilon)$-approximation to the $k$-densest subgraph problem in $O(D\log n)$ rounds of the CONGEST model.
\end{corollary}


%(We might want to state this as a lemma.)  This family has the following property. Let $t$ be any time after the first time all $V_1, \ldots, V_p$ are first computed. For any $V'\subseteq V$, let $\rho_t(V')$ be the density of the subgraph of $G_t$ induced by $V'$. Let
%%
%$$V_{\cF}^k=\arg\max_{V_i\in \cF,\ |V_i|\geq k} r_i\,.$$
%%
%Then, $V_{\cF}^k$ is a good approximation of the densest subgraph of $G_t$ of size at least $k$, i.e.,
%%Let $H_i$ be the subgraph of $G_t$ induced by $V_i$. For any $k$,
%%$$\max_{V_i\in \mathcal{F}, |V_i|\geq k} \rho(H_i)\in [(1-\epsilon)\rho(G_{t}), (1+\epsilon)\rho(G_{t})]\,.$$
%%
%$$\rho_t(V_{\cF}^k)\geq (1-\epsilon) \max_{V'\subseteq V(G_t), |V'|\geq k} \rho_t(V')\,.$$
%%
%It also maintains the approximated optimal density: Let $i^*$ be such that $V_{i^*}=V_{\cF}^k$. Then,
%$$m_{i^*}/n_{i^*} \in \rho_t(V_{\cF}^k)\,.$$
%
%%{\bf (Make sure the number is correct!!!)}.
%
%%{\bf Output:} Let $t^*$ be the time the algorithm ends. The algorithm outputs a family of subgraphs $\mathcal{F}=\{H_0, H_1, \ldots, H_p\}$ where $H_0=G$, $G_i\subseteq G_{i-1}$ for all $i$ and $p=O(\log_{1+\epsilon} n)$ such that, for any $k$, $\max_{G_i\in \mathcal{F}} \rho(H_i)\in [\frac{1}{\alpha\beta}\rho(G_{t^*}), \alpha\beta\cdot\rho(G_{t^*})]${\bf (Make sure the number is correct!!!)}. (Each node knows which subgraphs in $\cal F$ it is in and the density of {\em all} subgraphs in $\cal F$.)

%\begin{algorithmic}[1]
%
%\STATE Let $\delta=\epsilon/24$.
%
%\STATE Let $j = 0$. Let $V_0=V$ (i.e., we mark every node as in $V_0$).
%
%
%\REPEAT
%
%\STATE Compute $n_j$, the $(1+\delta)$-approximation of $|V_j|$ (i.e., $(1+\delta)|V_j|\geq n_j\geq (1-\delta)|V_j|$). In the end of the algorithm every node knows $n_j$. See Algorithms~\ref{algo:kuhn} and~\ref{algo:count nodes} for detailed implementation.
%
%
%\IF{$n_j=0$}
%
%\STATE Let $j = 0$. %Let $V_0=V$ (i.e., we mark every node as in $V_0$).
%(Note that we do not recompute $n_0$.)
%
%\ENDIF
%
%\STATE Let $G_t$ be the network in the beginning of this step. Let $H_t$ be the subgraph of $G_t$ induced by $V_{j}$. We compute $m_{j}$, the $(1+\delta)$-approximation of the number of edges in $H_t$ (i.e., $(1+\delta)|E(H_t)|\geq m_{j}\geq (1-\delta)|E(H_t)|$). In the end of the algorithm every node knows $m_{j}$. See Algorithm~\ref{algo:count edges} for detailed implementation.
%
%%\STATE Let $\Delta=m_{j}/n_{j}$.
%
%
%\STATE Let $G_{t'}$ be the network in the beginning of this step.  Let $H_{t'}$ be the subgraph of $G_{t'}$ induced by $V_{j}$. Let $V_{j+1}$ be the set of nodes in $V_{j}$ whose degree in $H_{t'}$ is at most $(1+\delta)m_j/n_j$. In the end of this step, every node knows whether it is in $V_{j+1}$ or not. This step can be done in one round since every node already knows $m_{j}/n_{j}$ and can easily check, in one round, the number of neighbors in $G_{t'}$ that are in $V_{j}$.
%
%%\STATE Compute $n_j$, the $(1+\epsilon)$-approximation of $|V_j|$ (i.e., $(1+\epsilon)|V_j|\geq n_0\geq (1-\epsilon)|V_j|$). In the end of the algorithm every node knows $n_0$. See Algorithm~\ref{algo:count nodes} for detailed implementation.
%
%\STATE Let $j=j+1$.
%
%
%\UNTIL{forever}
%
%
%\end{algorithmic}



%At any time, the densest subgraph can be computed using the steps outlined in Algorithm~\ref{algo:densest}. This procedure works simply by picking the subgraph with the highest density, taking into account the possibility that the size of this subgraph may be less than $k$. If the graph turns out to be less than size $k$, we pad it by having the rest of the nodes run a distributed procedure to elect appropriately many nodes to add to the subgraph and get its size up to at least $k$. 
%
%Any time a densest subgraph query is initiated in the network, the nodes simply run Algorithm~\ref{algo:densest} based on the subgraphs continuously being maintained by Algorithm~\ref{algo:maintain}, and compute which of them are in the approximation solution. At the end of this query, each nodes is aware of whether it is in the approximate densest subgraph or not. 
%
%
%\begin{algorithm}
%\caption{{\sc Densest Subgraph}($k$)}
%\label{algo:densest}
%
%{\bf Input:} $k$, the parameter for the densest at-least-$k$ subgraph problem
%
%%Let $t$ be the time the algorithm ends. Needed now?
%
%{\bf Output:} The algorithm outputs a set of nodes $V_i\cup\hat{V}$ (every node knows whether it is in $\hat{V}$ or not) such that $|V_i\cup\hat{V}|\geq k$ that is supposed to be the approximated at-least-$k$ densest subgraph along with its approximated density.
%
%such that
%$$\rho_t(V')\geq (1-\epsilon) \max_{V'\subseteq V(G_t), |V'|\geq k} \rho_t(V')\,$$
%as well as its approximated density
%$r'\in \rho_t(V_{\cF}^k)\,.$
%Moreover, it outputs the approximated density of $\rho_t(V')$, $r'=\geq [(1-\epsilon)\rho_k(G_{t^*}), (1+\epsilon)\rho_k(G_{t^*})]$ where $\rho_k(G_{t^*})$ is the density of the densest subgraph of $G_{t^*}$ of size at least $k$.
%
%\begin{algorithmic}[1]
%%\STATE Simply output $V_{\cF}^k=\arg\max_{V_i\in \cF,\ |V_i|\geq k} r_i\,.$ and its corresponding approximated density $m_{i^*}/n_{i^*}$.
%\STATE Let $i=\max_{i} m_i/\max(k, n_i)\,.$
%%\STATE Let $V_{out}$ be the set of all nodes in $V_i$ (each node knows whether it is in $V'$ or not)
%\IF{$n_i<(1+\delta)k$}
%\STATE Let $\Delta=(1+\delta)k-n_i$. (Every node can compute $\Delta$ locally.)
%%et $V'$ be the set of nodes obtained by adding arbitrary $\ell$ nodes in $V\setminus V_i$ to $V_i$, where $(1+\delta)k-n_i\leq \ell\leq (1+\delta)((1+\delta)k-n_i)$  as follows.
%\REPEAT
%\STATE Every node not in $V_i$ locally flips a coin which is head with probability $\Delta/n_0$.
%\STATE Let $\hat{V}$ be the set of nodes whose coins return heads.
%\STATE Approximately count the number of nodes in $\hat{V}$ using the algorithm discussed in Section~\ref{sec:counting} with error parameter $\epsilon=\delta$. Let $\Delta'$ be the result. (Note that $\Delta'/(1+\delta)\leq |\hat{V}|\leq (1+\delta)\Delta'$ with high probability.)
%\UNTIL{$(1+\delta)\Delta\leq \Delta'\leq (1+2\delta)\Delta$}
%\ENDIF
%\RETURN $V_i\cup \hat{V}$
%\end{algorithmic}
%\end{algorithm}

%\subsection{Approximating the number of nodes and edges}\label{sec:counting}
%
%Our algorithms make use of an operation in which the number of nodes and edges in a given subgraph need to be computed. We achieve this in $O(D)$ rounds using a modified version of an algorithm from~\cite{KuhnLO10}. Their algorithm allows for approximate counting of the size of a dynamic network. We modify it to work for any subgraph that we are interested in. We also show how it can be used to approximate the number of edges in this subgraph at a given time. In the interest of space, these results can be found in Appendix~\ref{sec:count}.
%
% \input{count}

%\subsection{Padding nodes}
%
%
%\begin{algorithm}
%\caption{{\sc Padding}($t$, $\epsilon$)}
%\label{algo:padding}
%
%{\bf Input:} $p$, the number of nodes to be selected, and $\epsilon$, the error parameter
%
%{\bf Output:} $p'$ nodes will be selected (each node knows whether it is selected or not) where $p\leq p'\leq (1+\epsilon)p$.
%
%%such that
%%$$\rho_t(V')\geq (1-\epsilon) \max_{V'\subseteq V(G_t), |V'|\geq k} \rho_t(V')\,$$
%%as well as its approximated density
%%$r'\in \rho_t(V_{\cF}^k)\,.$
%%Moreover, it outputs the approximated density of $\rho_t(V')$, $r'=\geq [(1-\epsilon)\rho_k(G_{t^*}), (1+\epsilon)\rho_k(G_{t^*})]$ where $\rho_k(G_{t^*})$ is the density of the densest subgraph of $G_{t^*}$ of size at least $k$.
%
%\begin{algorithmic}[1]
%%\STATE Simply output $V_{\cF}^k=\arg\max_{V_i\in \cF,\ |V_i|\geq k} r_i\,.$ and its corresponding approximated density $m_{i^*}/n_{i^*}$.
%\STATE Let $i=\max_{i} m_i/\max{k, n_i}\,.$
%\STATE If $n_i<(1+\delta)k$ then let $V'$ be the set of nodes obtained by adding arbitrary $(1+\delta)k-n_i$ nodes in $V\setminus V_i$ to $V_i$, as follows.
%\STATE EXPLAIN THE PADDING ALGORITHM HERE.
%\RETURN $V'$
%\end{algorithmic}
%\end{algorithm}



%
%\paragraph{Danupon:} I think that, from our discussion, the above is actually the main result. The results that Atish listed earlier are below.
%
%
%\begin{theorem}\label{thm:main1}
%Under a model where the adversary is allowed to delete/add edges such that the diameter is always at most $D$, there is a distributed algorithm that requires only $O(1 + D/\log n)$ rounds per edge-failure such that it maintains a $c$-approximate densest subgraph for constant $c=$, .
%\end{theorem}
%
%\begin{theorem}\label{thm:main1}
%Under a model where the adversary is allowed to delete/add edges such that the diameter is always at most $D$, there is a distributed algorithm that requires only $O(1 + kD/\log n)$ rounds per edge-failure such that it maintains a $c$-approximate subgraph for the densest at least $k$ problem, for a constant $c=$.
%\end{theorem}



%\input introduction

%\input relatedwork

%\input high_level %

%\input algorithm % Explain the algorithm and how to implement it in the dynamic distributed setting

%\input time % Analyze the running time


%\input approximation % Analyze the approximation ratio of the algorithm


%\input extensions % 1) Maintaining some subgraphs so that we can answer the query "what is the density of the densest subgraph of size at least k?" quickly. 2) the more general case of $\rho$-dynamic


%\input futureworkconclusions


%\begin{thebibliography}{1}
\begin{thebibliography}{50}
\vspace*{1mm}
\scriptsize
 
\bibitem{AC}
Reid Andersen and Kumar Chellapilla.
\newblock Finding dense subgraphs with size bounds.
\newblock In {\em WAW '09: Proceedings of the 6th International Workshop on
  Algorithms and Models for the Web-Graph}, pages 25--37, 2009.

\bibitem{KS}
Samir Khuller and Barna Saha.
\newblock On finding dense subgraphs.
\newblock In {\em ICALP (1)}, pages 597--608, 2009.

\bibitem{KuhnOM11}
Fabian Kuhn, Rotem Oshman, and Yoram Moses.
\newblock Coordinated consensus in dynamic networks.
\newblock In {\em PODC}, pages 1--10, 2011.

\bibitem{peleg}
David Peleg.
\newblock {\em Distributed computing: a locality-sensitive approach}.
\newblock Society for Industrial and Applied Mathematics, Philadelphia, PA,
  USA, 2000.

\bibitem{DasSarmaHKKNPPW11}
Atish~Das Sarma, Stephan Holzer, Liah Kor, Amos Korman, Danupon Nanongkai,
  Gopal Pandurangan, David Peleg, and Roger Wattenhofer.
\newblock Distributed verification and hardness of distributed approximation.
\newblock In {\em STOC}, pages 363--372, 2011.

\end{thebibliography}


%----------------------------
%\newpage
%{\small
%\bibliographystyle{plain}
%\bibliography{dense,selfheal}
%}

%\newpage
%\appendix
%\section*{Appendix}
%\input count

%\input appendix1

%\input appendix2

%\input appendix3

%\input maintheory
%
%\input kcore

\end{document}
