\onlyShort{Due to lack of space, the complete proofs can be found in the full paper.}
\onlyShort{\vspace{-0.25cm}}
\section{Distributed Algorithms for Hypergraph MIS Problem}
\onlyShort{\vspace{-0.2cm}}
\label{sec:hyper}
%\subsection{Computing a Maximal Independent Set}
We present randomized distributed algorithms and
prove the following for the hypergraph MIS problem:
% (Recall that $\Delta$ is the maximum hypernode degree, $n$ is the number of hypernodes, and dimension of a hypergraph is the maximum hyperedge size.)

\begin{theorem} \label{thm:mis}
The hypergraph MIS problem can be solved  in the following expected time\footnote{Our time bounds can also be easily shown to hold with high probability, i.e., with probability $1 -1/n$.} in both vertex-centric and server-client representations.  
\begin{compactenum}
	\item $O(\log^2 n)$ time in the LOCAL model.
	\item  $O(\log^{(d+4)!+4} n)$ time\footnote{As is common, we use the notation $\log^f n$ which is the same as $(\log n)^f$.} in the CONGEST model when the input hypergraph has constant dimension $d$. 
	%\item $\tilde O(\min\{\Delta^{o(1)}, \sqrt{n}\}))$ 
	%\item $O(\min\{\Delta^{\epsilon}\log^{(1/\epsilon)^{O(1/\epsilon)}} n, \Delta^{o(1)}n^{o(1)}, \sqrt{n}\})$ time\footnote{As usual, we use the notation $\log^f n$ which is the same as $(\log n)^f$.} in the CONGEST model for any dimension. ($\eps > 0$ can be any arbitrarily small constant.) 
	%\danupon{This is a bit sloppy: $O(\Delta^{o(1)})$ is w.h.p. but $O(\sqrt{n})$ is in expectation}
	\item $O(\min\{\Delta^{\epsilon}\log^{(1/\epsilon)^{O(1/\epsilon)}} n, \sqrt{n}\})$ time in the CONGEST model for any dimension, where $\epsilon$ is such that $1\geq \epsilon \geq \frac{1}{\frac{\log\log n}{c\log\log\log n}-1}$ from some (large) constant $c$. (In particular, $\Delta^{\epsilon}\log^{(1/\epsilon)^{O(1/\epsilon)}} n$ becomes $\Delta^{o(1)}n^{o(1)}$ when we use $\epsilon =  \frac{1}{\frac{\log\log n}{c\log\log\log n}-1}$.)
\end{compactenum}
\end{theorem}
In Section \ref{sec:decomposition}, we prove a {\em decomposition lemma} which plays an important role in achieving all the above results. 
%We then show the first result in \Cref{thm:local}, second result in \Cref{sec:constant dimension}, and the last result in \onlyLong{\Cref{sec:sqrt n algo,sec:MIS Delta epsilon}.}\onlyShort{in the remaining section.}

\onlyShort{\vspace{-0.2cm}}
\subsection{Low-Diameter Decomposition}\label{sec:decomposition}
\onlyShort{\vspace{-0.2cm}}

First, we note that, for solving MIS, it is sufficient to construct an algorithm that solves the following {\em subgraph-MIS} problem on low-diameter networks. 


\begin{definition}[Subgraph-MIS Problem]
In the Subgraph-MIS problem, we are given an $n$-node network $G$. This network is either in a vertex-centric or server-client representation of some hypergraph $\cH$. Additionally, we are given a subnetwork $G'$ of $G$ representing a sub-hypergraph\footnote{Given a subset $V' \subseteq V$,  a sub-hypergraph of $\cH$ is simply a hypergraph induced by $V'$ --- except hyperedges that contain vertices that do not belong to $V'$, all other hyperedges of $\cH$ (which intersect with $V'$) are present in the sub-hypergraph.}   $\cH'$ of $\cH$. The goal is to find an MIS of $\cH'$. 
\end{definition}


\begin{lemma}[Decomposition Lemma] \label{thm:decomposition congest}
For any function $T$, if there is an algorithm $\cA$ that solves subgraph-MIS on CONGEST server-client (respectively vertex-centric) networks $G$ of $O(\log n)$ diameter in $T(n)$ time (where $n$ is the number of nodes in $G$), then there is an algorithm for MIS on CONGEST server-client (respectively vertex-centric) networks of {\em any} diameter that takes $O(T(n)\log^4 n)$ time. 
\end{lemma}

The main idea of the lemma is to run the {\em network decomposition} algorithm of Linial and Saks \cite{LinialS93} and simulate $\cA$ on each cluster resulting from the decomposition. The only part that we have to be careful is that running $\cA$ simultaneously on many clusters could cause a congestion. We show that this can be avoided by a careful scheduling. The details are as follows.

%
%The lemma essentially follows from the {\em network decomposition} algorithm of Linial and Saks \cite{LinialS93}

The network decomposition algorithm of \cite{LinialS93} produces an {\em $O(\log n)$-decomposition with weak-diameter $O(\log n)$}. That is, given a (two-dimensional) graph $G$, it partitions nodes into sets $S_1, S_2, \ldots S_k$ and assigns color $c_i\in \{1, 2, \ldots, O(\log n)\}$ to each set $S_i$ with the following properties: 
\begin{compactitem}
\item the distance between any two nodes in the same set $S_i$ is $O(\log n)$, and 
\item any two neighboring nodes of the same color must be in the same set (in other words, any two ``neighboring'' sets must be assigned different colors).
\end{compactitem}

This algorithm takes $O(\log^2 n)$ time even in the CONGEST model~\cite{LinialS93}. We use the above decomposition algorithm to decompose the server graph $G(\cH)$ (cf. \Cref{sec:prelim}) of the input hypergraph. The result is the partition of hypernodes (servers) into colored sets satisfying the above conditions (in particular, two nodes sharing the same hyperedge must be in the same partition or have differnet colors). In addition, we modify the Linial-Saks (LS) algorithm to produce low-diameter  subgraphs that contain these sets with the property that subgraphs of the same color have ``small overlap''. 


%This can be easily done in the vertex-centric model since the network in this model is already the server graph. It is also easy to achieve in the server-client LOCAL model. In the server-client CONGEST model, we show how to simulate the algorithm in \cite{LinialS93} as follows. 
\begin{lemma}\label{claim:decomposition}
Let $G$ be the input network (server-client or vertex-centric model) representing hypergraph $\cH$. In $O(\log^3 n)$ time and for some integer $k$, we can partition hypernodes into $k$ sets $S_1, \ldots, S_k$, produce $k$ subgraphs of $G$ denoted by $G_1, G_2, \ldots G_k$, and assign color $c_i\in \{1, 2, \ldots, O(\log n)\}$ to each subgraph $G_i$, with the following properties: 
\begin{compactenum}
\item For all $i$, $G_i$ has diameter $O(\log n)$ and $S_i\subseteq V(G_i)$. 
\item For any $S_i$ and $S_j$ that are assigned the same color (i.e. $c_i=c_j$), there is no hyperedge in $\cH$ that contains hypernodes (servers) in both $S_i$ and $S_j$. 
\item Every edge in $G$ is contained in $O(\log^3 n)$ graphs $G_{i_1}, G_{i_2}, \ldots$ 
\end{compactenum}
\end{lemma}
Observe that the first two properties in \Cref{claim:decomposition} are similar to the guarantees of the Linial-Saks algorithm, except that \Cref{claim:decomposition} explicitly gives low-diameter graphs that contain the sets $S_1, \ldots, S_k$. The third property guarantees that such graphs have ``small congestion''. 
\onlyLong{
\begin{proof}
Note that the Linial-Saks algorithm works as follows. The algorithm runs in iterations where in the $i^{th}$ iteration it will output sets of color $i$. In the $i^{th}$ iteration, each vertex $y$ selects an integer radius $r_y\in \{1, \ldots, O(\log n)\}$ at random (according to some distribution). Then it broadcasts its ID and the value $r_y$ to all nodes within distance $r_y$ of it. For every node $v$, after receiving all such messages from other nodes, selects the node with highest ID from among nodes $y$ that sends their IDs to $v$; denote such node by $C(v)$. For any node $y$, define set $S_y$ as the set that contains every node $v$ that has $C(v)=y$ and its distance to $y$ is {\em strictly} less than $r_{y}$. We call $S_y$ the {\em set centered at $y$} (note that $y$ might not be in $S_y$). 
%
All sets in this iteration receives color $i$. The distance between every pair of nodes $u$ and $v$ in any set $S_y$ is $O(\log n)$ since their distance to $y$ is $O(\log n)$. We can guarantee that there are no two neighboring nodes $u$ and $v$ in different sets because otherwise $C(u)=C(v)$ (this crucially uses the fact that sets are formed by nodes $v$ whose distance to $C(v)$ is strictly less than $r_{C(v)}$). By carefully picking the distribution of $r_y$, \cite{LinialS93} shows that the number of iterations is $O(\log n)$. 

\smallskip
The following is one simple (although not the most efficient) way to simulate the above algorithm in the server-client CONGEST model to compute $S_1, \ldots, S_k$. We implement each iteration of the above algorithm in {\em sub-iterations}. In the beginning of the $j^{th}$ sub-iteration, every server $y$ with $r_y=j$ sends its ID to its neighboring clients. We then repeat the following for $2j-1$ steps: every node (client or server) sends the maximum ID that it receives to its neighbors. It is easy to see that after all sub-iterations every server $v$ receives the maximum ID among the IDs of servers $y$ such that $r_y=j$ and the distance between $y$ and $v$ in the server graph is at most $j$. Since $r_y=O(\log n)$ for every $y$, there are $O(\log n)$ sub-iterations and each sub-iteration takes $O(\log n)$ time. After all sub-iterations, every server $v$ can select $C(v)$. Thus, we can simulate the Linial-Saks algorithm in $O(\log^3 n)$ time. (Simulating Linial-Saks algorithm on the vertex-centric model can be done similarly except that we will have  $j-1$ sub-iterations instead of $2j-1$.)

We now construct $G_1, \ldots, G_k$. At any sub-iteration above, if a node $v$ sends the ID of some node $y$ to its neighbors, we add its neighbors and all edges incident to $v$ to $G_y$ (corresponding to set $S_y$). Clearly, $S_y$ is contained in $V(G_y)$ since $G_y$ contains all nodes that receive the ID of $y$. This process also guarantees that $G_y$ has $O(\log n)$ diameter since every node in $G_y$ can reach $G_y$ in $O(\log n)$ hops by following the path that the ID of $y$ was sent to it. Additionally, since the simulation of the Linial-Saks algorithm finishes in $O(\log^3 n)$ rounds, and in each round we add an edge $(u, v)$ to at most two subgraphs, we have that every edge is in $O(\log^3 n)$ subgraphs.
\end{proof}



\begin{proof}[Proof of \Cref{thm:decomposition congest}]
We decompose the network as in \Cref{claim:decomposition}. Then, we use $\cA$ to compute MIS iteratively in $O(\log n)$ iterations as follows. At the $i^{th}$ iteration, we consider each set $S_t$ and graph $G_t$ of color $i$. We will decide whether each node in $S_t$ will be in the final solution of MIS or not.  We assume that we already did so for sets of colors $1, 2, \ldots, i-1$. 

Let $\cH_t$ be the following sub-hypergraph. $\cH_t$ consists of all hypernodes in $S_t$. For each hyperedge $e$ that contains a node in $S_t$, we add an edge $e'=e \cap S_t$ to $\cH_t$ if $e$ contains {\em none} of the following hypernodes: (1)  a hypernode in set $S'$ of color $j>i$, and (2) a node in set $S''$ of color $j<i$ that is already decided to be {\em not} in the MIS. We can construct $\cH_t$ quickly since each server (hypernode) can decide locally whether each client (hyperedge) adjacent to it satisfies the above property or not. 

Now we compute MIS of $\cH_t$ by simulating $\cA$ to solve the subgraph-MIS problem on $G_t$ where the subgraph we want to solve is the subgraph $G'_t$ of $G_t$ representing $\cH_t$. Note that since $G_t$ has diameter $O(\log n)$, $\cA$ will finish in $T(n)$ time if we simulate $\cA$ on only $G_t$. However, we will actually simulate $\cA$ on {\em all}  graphs $G_{t_1}, G_{t_2}, \ldots$ of color $i$ {\em simultaneously}. Since each edge is contained in $O(\log^3 n)$ such graphs, we can finish simulating $\cA$ on all graphs in $O(T(n)\log^3 n)$ time. 

After we finish simulating $\cA$ on $\cH_t$, we use the solution as a solution of MIS of the original graph $\cH$; that is, we say that a hypernode is in the MIS of $\cH$ if and only if it is in the MIS of $\cH_t$. We now prove the correctness. Let $M_t$ be the MIS of $\cH_t$. First, observe that any hypernode in $M_t$ can be added to the MIS solution of $\cH$ without violating the independent constraint since $\cH_t$ contains all hyperedges of $\cH$ except those that contain some hypernode of higher color (which is not yet added to the MIS of $\cH$) and hypernode of lower color that is already decided not to be in the MIS of $\cH$. Secondly, the fact that any hypernode $v$ in $S_t$ that is not in $M_t$ implies that there is a hyperedge $e'$ in $H_t$ that contains all hypernodes in $H_t$ except $v$. Let $e$ be a hyperedge in $\cH$ such that $e'\subseteq e$. Note that $e$ does not contain any hypernode in other set $S_{t'}$ of the same color as $S_t$. Also observe that every hypernode in $e\setminus S_t$ must be already decided to be in the MIS of $\cH$ (otherwise, we will not have $e'=e\cap S_t$ in $\cH_t$). Thus, every hypernode in $e'$ except $v$ is already in the MIS of $\cH$ as well; in other words, $v$ cannot be in the MIS of $\cH$. This completes the correctness of the algorithm. Thus, after we finish simulating $\cA$ on graphs of all colors, we obtain the MIS of $\cH$. Since we need $O(T(n)\log^3 n)$ time for each color, we need $O(T(n)\log^4 n)$ time in total. 
\end{proof}
}


%\begin{lemma} \label{thm:decomposition congest}
%If there is an algorithm $\cA$ for MIS on CONGEST $O(\log n)$-weak-diameter networks that takes $T(n)$ time for some function $T$, then there is an algorithm for MIS on CONGEST networks of {\em any} diameter that takes $\tilde O(T(n))$ time. 
%\end{lemma}
%\begin{proof}
%The claim follows from the {\em network decomposition} algorithm of Linial and Saks \cite{LinialS93}, which produces an {\em $O(\log n)$-decomposition with weak diameter $O(\log n)$}. That is, given a graph $G$, it partitions nodes into disjoint sets $S_1, S_2, \ldots S_k$ and assigns color $c_i\in \{1, 2, \ldots, O(\log n)\}$ to each set $S_i$ with the following properties: 
%\begin{itemize}
%\item the distance between any two nodes in the same set $S_i$ is $O(\log n)$, and 
%
%\item any two neighboring nodes of the same color must be in the same set (in other words, any two ``neighboring'' sets must be assigned different colors).
%\end{itemize}
%
%This algorithm takes $O(\log^2 n)$ time even in the CONGEST model. We will use the above decomposition algorithm to decompose the server graph of the input hypergraph. The result is the partition of hypernodes (servers) into colored sets satisfying the above conditions. This can be easily done in the vertex-centric model since the network in this model is already the server graph. It is also easy to achieve in the server-client LOCAL model. In the server-client CONGEST model, we show how to simulate the algorithm in \cite{LinialS93} as follows. 
%
%\paragraph{Simulating Linial-Saks in Server-Client CONGEST Model} Recall that this algorithm is as follows. The algorithm runs in iterations where in the $i^{th}$ iteration it will output sets of color $i$. In each iteration, each vertex $y$ selects an integer radius $r_y\in \{1, \ldots, O(\log n)\}$ at random (according to some distribution). Then it broadcasts its ID and the value $r_y$ to all nodes within distance $r_y$ of it. For every node $v$, after receiving all such messages from other nodes, selects the node $C(v)$ with highest ID from among nodes $y$ that sends their IDs to $v$. Every node $v$ that have the same values of $C(v)$ and its distance to $C(v)$ is {\em strictly} less than $r_{C(v)}$ become the same set. We call the set consisting of nodes $v$ that picks $y$ as $C(v)$ a {\em set centered at $y$} (note that $y$ might not be in such set). 
%
%All sets in this iteration receives color $i$. It is easy to guarantee that the distance between every pair of nodes $u$ and $v$ in the same set is $O(\log n)$ since their distance to $C(u)=C(v)$ is $O(\log n)$. We can guarantee that there are no two neighboring nodes $u$ and $v$ in different sets because otherwise $C(u)=C(v)$ (this crucially uses the fact that sets are formed by nodes $v$ whose distance to $C(v)$ is strictly less than $r_{C(v)}$). By carefully picking the distribution of $r_y$, \cite{LinialS93} shows that the number of iterations is $O(\log n)$. 
%
%The following is one simple (although not the most efficient) way to simulate the above algorithm in the Server-Client CONGEST model. We implement each iteration of the above algorithm in {\em sub-iterations}. In the beginning of the $j^{th}$ sub-iteration, every server $y$ with $r_y=j$ sends its ID to its neighboring clients. We then repeat the following for $2j-1$ steps: every node (client or server) sends the maximum ID that it receives to its neighbors. It is easy to see that after all sub-iterations every server $v$ receives the maximum ID among the IDs of servers $y$ such that $r_y=j$ and the distance between $y$ and $v$ in the server graph is at most $j$. Since $r_y=O(\log n)$ for every $y$, there are $O(\log n)$ sub-iterations and each sub-iteration takes $O(\log n)$ time. After all sub-iterations, every server $v$ can select $C(v)$. Thus, we can simulate Linial-Saks algorithm in $O(\log^3 n)$ time.
%
%
%In addition to the decomposition, we also construct a {\em communication tree} $T_y$ corresponding to each node $y$ as follows: At any sub-iteration, if a node $v$ sends the ID of $y$ to its neighbors, we add its neighbors that are not already in $T_y$ as the children of $v$ in $T_y$ (we assume by induction that $v$ is already in $T_y$). It is easy to see that $T_y$ is a tree of depth $O(\log n)$ rooted at $y$. 
% 
%%\paragraph{Constructing the Breadth-First Search Communication Tree}
%
%%For any node $y$, we use $G_y$ as the subnetwork of the original network $G$ induced by nodes in the set centered at $y$. We let $G'_y$ be the union of $G_y$ and $T_y$. Note that $G'_y$ has $O(\log n)$  diameter while $G_y$ might not. In fact, $G'_y$ is an $O(\log n)$-weak diameter network. 
%
%%\paragraph{Simulating $\cA$ on $G'_y$}
%
%\paragraph{Using $\cA$} After we decompose the network, we use $\cA$ to compute MIS iteratively in $O(\log n)$ iterations as follows. At the $i^{th}$ iteration, we consider each set $S$ of color $i$ centered at some node $y$. We will decide whether each node in $S$ will be in the final solution of MIS or not.  We assume that we already did so for sets of colors $1, 2, \ldots, i-1$. 
%
%Let $\cH_S$ be the following sub-hypergraph. $\cH_S$ consists of all hypernodes in $S$. For each hyperedge $e$ that contains a node in $S$, we add an edge $e'=e \cap S$ to $\cH_S$ if $e$ contains {\em none} of the following nodes: (1)  a node in set $S'$ of color $j>i$, and (2) a node in set $S''$ of color $j<i$ that is already decided to be {\em not} in the MIS. We can construct $\cH_s$ quickly since each server (hypernode) can decide locally whether each client (hyperedge) adjacent to it satisfies the above property or not. 
%
%Now we compute MIS of $\cH_S$ by simulating $\cA$ on $G'_y$ (recall that $S$ is the set of servers centered at $y$). Note that since $G'_y$ $\cA$ is an $O(\log n)$-weak-diameter network, $\cA$ will finish in $T(n)$ time if we simulate $\cA$ on only one set. However, we argue that we can simulate $\cA$ on {\em all} sets of color $i$ {\em simultaneously} in $\tilde O(T(n))$ time. To prove this, Let $y_1, \ldots, y_k$ be the centers of sets of color $i$. We only have to show that when the simulations of $\cA$ on different graphs $G'_{y_a}$ and $G'_{y_{b}}$ want to send messages on trees $T_{y_a}$ and $T_{y_{b}}$, they can do so without interrupting each other. We can do this by using the order of our simulation of Linial-Saks algorithm; that is, a node $v$ sends and receives a message to its children in $T_y$ at the time it sends the ID of $y$ to its children in the simulation of Linial-Saks algorithm.
%%
%% When a node wants to send a message to its neighbor in $G_{y_j}$, we can do so in all $G_
%%
%By this method, we can simulate $\cA$ on all $G_{y_1}, \ldots, G_{y_k}$ simultaneously.\danupon{I'm being very vague here.}
%
%Once we get the solution .... claim that this is correct ...
%\end{proof}


%\subsection{$\tilde O(1)$ Time in the LOCAL model}\label{sec:MIS local}


%\begin{corr}
\begin{lemma} \label{thm:local}
MIS can be solved in $O(\log^2 n)$ rounds in the LOCAL models (both vertex-centric and server-client representations). 
\end{lemma}
%\end{corr}
%\onlyLong{
\onlyLong{\begin{proof}}
  \onlyShort{\begin{proof}[Proof Sketch]}
  Using \Cref{claim:decomposition}, we partition the hypernodes of the input network into subgraphs each of which have $O(\log n)$ diameter and no two subgraphs assigned the same colour share a hyper edge.
\onlyLong{Our algorithm proceeds in the same way as in the proof of \Cref{thm:decomposition congest}, except that there is no congestion in the LOCAL model when we simulate $\cA$ (as specified in Lemma \ref{thm:decomposition congest}) on all graphs of color $i$.} 
\onlyShort{Note that there is no congestion in the LOCAL model when we simulate $\cA$ (as specified in Lemma \ref{thm:decomposition congest}) on all graphs of color $i$.}
  Thus, we need $O(T(n))$ time per color instead of $O(T(n)\log^3 n)$. Moreover, we can solve the subgraph-MIS problem on a network of $O(\log n)$ diameter in $O(\log n)$ time by collecting the information about the subgraph to one node, locally compute the MIS on such node, and send the solution back to every node. Thus, $T(n)=O(\log n)$. 
%
%It follows from \Cref{thm:decomposition congest} 
%
It follows that we can solve MIS on networks of any diameter in $O(\log^2 n)$ time. 
\end{proof}

%
%
\onlyShort{\vspace{-0.35cm}}
\subsection{$O(\log^{(d+4)!+4} n)$ time in the CONGEST model assuming constant dimension $d$}\label{sec:constant dimension}
\onlyShort{\vspace{-0.2cm}}

Let $(\cH, \cH')$ be an instance of the subgraph-MIS problem such that the network $G$ representing $\cH$ has $O(\log n)$ diameter. We now show that we can solve this problem in $O(\log^{(d+4)!} n)$ time when  $\cH'$ has a constant dimension $d$, i.e. $|e|\leq d$ for every hyperedge $e$ in $\cH'$. By \Cref{thm:decomposition congest}, we will get a $O(\log^{(d+4)!+4} n)$-time algorithm for the MIS problem in the case of constant-dimensional hypergraphs (of any diameter) which works in both vertex-centric and server-client representations and even in the CONGEST model. This algorithm is also an important building block for the algorithm in the next section. 

%\danupon{\cite{aravind2}: $O((\log n)^{(d+4)!})$ for $d\leq \frac{\log\log n}{4\log\log\log n}$.}

Our algorithm simulates the PRAM algorithm of Beame and Luby \cite{BeameL90} which was proved by Kelsen \cite{Kelsen92} to finish in $O(\log^{(d+4)!} n)$ time when the input hypergraph has a constant dimension $d$ and this running time was recently extended to any $d\leq \frac{\log\log n}{4\log\log\log n}$ by Bercea~et~al.~\cite{aravind2}\footnote{The original running time of Kelsen \cite{Kelsen92} is in fact $O((\log n)^{f(d)})$ where $f(d)$ is defined as $f(2)=7$ and $f(i)=(i-1)\sum_{j=2}^{i-1}f(j)+7$ for $i>2$. The $O(\log^{(d+4)!} n)$ time (which is essentially the same as Kelsen's time) was shown in \cite{aravind2}. We will use the latter running time for simplicity. Also note that the result in this section holds for all $d\leq \frac{\log\log n}{4\log\log\log n}$ due to \cite{aravind2}.}. 
%
The crucial part in the simulation is to compute a number $\zeta(\cH')$ defined as follows. For $\emptyset \neq x \subseteq V(\cH')$ and an integer $j$  with $1\leq j\leq d-|x|$ we define: 
%
$N_j(x, \cH') = \{y\subseteq V(\cH') \mid x\cup y \in E(\cH') \wedge x\cap y = \emptyset\wedge |y|=j\},$
and
$d_j(x, \cH') = (|N_j(x, \cH')|)^{1/j}.$
Also, for $2\leq i\leq d$, let\footnote{A note on the notation: \cite{BeameL90,Kelsen92} use $\Delta$ to denote what we use $\zeta$ to denote here. We use a different notation since we use $\Delta$ for another purpose.} 
$\zeta_i(\cH') = \max \{d_{i-|x|}(x, \cH') \mid x\subseteq V(\cH') \wedge 0<|x|<i\}$
and
$\zeta(\cH') = \max\{\zeta_i(\cH') \mid 2\leq i\leq d\}.$
%
We now explain how to compute $\zeta(\cH')$ in $O(\log^{(d+4)!} n)$ time. First, note that we can assume that every node knows the list of members in each hyperedge that contains it: this information is already available in the vertex-centric representation; and in the server-client representation, every hyperedge can send this list to all nodes that it contains in $O(d)$ time in the CONGEST model. Every node $v$ can now compute, for every $i$, 
$\zeta_i(v, \cH') = \max \{d_{i-|x|}(x, \cH') \mid x\subseteq V(\cH') \wedge 0<|x|<i \wedge v\in x\}.$
This does not require any communication since for any $x$ such that $v\in x$, node $v$ already knows all hyperedges that contain $x$ (they must be hyperedges that contain $v$). Now, we compute $\zeta(\cH') = \max\{\zeta_i(v, \cH') \mid 2\leq i\leq d \wedge v\in V(\cH')\}$ by computing through the breadth-first search tree of the network representing $\cH$ (this is where we need the fact that the network has $O(\log n)$ diameter). 

Once we get $\zeta(\cH')$, the rest of the simulation is trivial; we refer to the full paper for details.
\onlyLong{
We provide some detail here for completeness. We mark each hypernode in $\cH'$ with probability $p=\frac{1}{2^{d+1}\zeta(\cH')}$. If a hyperedge has all of its nodes
marked, unmark all of its nodes. Remove the hypernodes that are still marked from $\cH'$ and add them to the independent set. We also remove these hypernodes from $\cH'$, thus reducing the size of some hyperedges in $\cH'$. In the remaining hypergraph do the following: eliminate any edges properly containing another edge; remove any hypernodes that form a 1-dimension edge (i.e. remove every hypernode $v$ such that there is a hyperedge $\{v\}$); finally, remove isolated vertices (i.e., those not contained in any edge) and add them to the independent set. Let $\cH'$ be the resulting hypergraph. Repeat this procedure until there is no hypernodes left. It is easy to see that all steps (before we repeat the procedure) takes $O(1)$ rounds.
%\danupon{TO DO LATER: Try to avoid ``It's easy to see''. Also, this procedure has many common steps with the one in the simulation of Karp et al. It should be better to combine the common part so we don't have to repeat the same thing.}
Kelsen \cite{aravind2} and Bercea~et~al.~\cite{aravind2} showed that we have to repeat this procedure only $O(\log^{(d+4)!} n)$ time (in expectation and with high probability) when $d\leq \frac{\log\log n}{4\log\log\log n}$ (there is no guarantee for any other values of $d$); so, our simulation finishes in $O(\log^{(d+4)!} n)$ rounds. 
}


 
\onlyShort{\vspace{-0.2cm}}
\subsection{$\Delta^{\epsilon}\log^{(1/\epsilon)^{O(1/\epsilon)}} n$ and $\Delta^{o(1)}n^{o(1)}$ Time in the CONGEST model}\label{sec:MIS Delta epsilon}
\onlyShort{\vspace{-0.2cm}}

%
%1. I think the k-uniform hypergraph version of Turan's theorem can be easily proved using the same proof for Theorem 3.2.1 in Alon-Spencer book (page 29). We only have to set p=(1/d)^{1/(k-1)}. Note that AS uses d as our \delta (average server degree -- I won't use \alpha for this since it should be reserved for the maximum independent set size) and k as our d (dimension). So, in our language, the theorem is:
%

We rely on a modification of Tur\'an's theorem, which states that a (two-dimensional) graph of {\em low} average degree has a {\em large} independent set (see e.g. Alon and Spencer \cite{AlonS08book}). We show that this theorem also holds for high-dimensional hypergraphs, and show further that such a large independent set can be found w.h.p when the network diameter is $O(\log n)$. %Recall that the degree of a hypernode is the number of hyperedges that contain it and the dimension of a hyperedge is the number of hypernodes it contains.
%\danupon{Note: $\tilde O$ in this section only hides } 

\begin{lemma}[A simple extension of Tur\'an's theorem]\label{thm:Turan} Let $d\geq 2$ and $\delta\geq 2$ be any integers. Let $\cH$ be any hypergraph such that every hyperedge in $\cH$ has dimension at least $d$, there are $n$ hypernodes, and the average hypernode degree is $\delta$. (Note that the diameter of the network representing $\cH$ can be arbitrary.) If every node knows $\delta$ and $d$, then we can find an independent set $M$ whose size in expectation is at least  $\frac{n}{\delta^{1/(d-1)}}(1-\frac{1}{d})$ in $O(1)$ time. 
%
%of size at least $\frac{1}{2}\frac{n}{\delta^{1/(d-1)}}(1-\frac{1}{d})$ in $\cH'$ in $\tilde O(1)$ time w.h.p.  
\end{lemma}
%
\onlyLong{
\begin{proof}
We modify the proof of Theorem 3.2.1 in \cite[pp.29]{AlonS08book}. Let $p=(1/\delta)^{1/(d-1)}$ (note that $p<1$) and $S$ be a random set of hypernodes in $\cH$ defined by $Pr[v\in S]=p$ for every hypernode $v$. Let $X=|S|$, and let $Y$ be the number of hyperedges in $\cH$ contained in $S$ (i.e. hyperedge $e\in E(\cH)$ such that $e\subseteq S$). For each hyperedge $e$, let $Y_e$ be the indicator random variable for the event $e\subseteq S$; so, $Y=\sum_{e\in E(\cH)} Y_e$. Observe that for any hyperedge $e$, 
$E[Y_e] = p^{|e|} \leq p^d$ 
since $e$ contains at most $d$ hypernodes. So, $E[Y] = \sum_{e\in E(\cH)} E[Y_e] \leq \frac{n\delta}{d}p^d$ (the inequality is because the number of hyperedges in $\cH$ is at most  $\frac{n\delta}{d}$). 
%
Clearly, $E[X]=np$; so, 
$$E[X-Y] \geq np-\frac{n\delta}{d}p^d = n p (1-\frac{\delta}{d}p^{d-1}) = n(\frac{1}{\delta})^{\frac{1}{d-1}}(1-1/d)$$
where the last equality is because $p=(\frac{1}{\delta})^{\frac{1}{d-1}}$. 
%
%
Our algorithm will pick such a random set $S$. (Every node can decide whether it will be in $S$ locally.) Then it selects one vertex from each edge of $S$ and deletes it. (This can be done in $O(1)$ time.) This leaves a set $S^*$ with at least $n(\frac{1}{\delta})^{\frac{1}{d-1}}(1-\frac{1}{d})$ hypernodes in expectation. All edges having been destroyed, $S^*$ is an independent set.
%
%Our algorithm will pick a random set $S$ repetitively until it finds a random set $S$ with the corresponding $X$ and $Y$ such that $X-Y\geq E[X-Y]/2$. Note that every time it picks a random set $S$, it can compute $X$ and $Y$ in $O(\log n)$ time since the network diameter is $O(\log n)$. 
%%
%
%
%By Chernoff's bound, the algorithm has to repeats selecting random sets for only $O(\log n)$ times w.h.p. 
%%
%Thus, in $\tilde O(1)$ rounds, we will find a random set $S$for which the number of hypernodes of $S$ minus the number of hyperedges contained in $S$ is at least $\frac{1}{2} n'(\frac{1}{\delta})^{\frac{1}{d-1}}(1-\frac{1}{d})$. Select one veertex from each edge of $S$ and delete it. This leaves a set $S^*$ with at least $\frac{1}{2} n'(\frac{1}{\delta})^{\frac{1}{d-1}}(1-\frac{1}{d})$ hypernodes. All edges having been destroyed, $S^*$ is an independent set.
\end{proof}
}


% ----------------------------------------
% The version below is Turan's theorem for subgraph-MIS
% ----------------------------------------
%\begin{lemma}[Simple extension of Tur\'an's theorem]\label{thm:Turan} Let $d\geq 2$ and $\delta\geq 2$ be any integers. Let $\cH$ be any hypergraph whose representation has $O(\log n)$ diameter. Let $\cH'$ be any sub-hypergraph of $\cH$ such that every hyperedge in $\cH'$ has dimension at least $d$, there are $n'$ hypernodes, and the average hypernode degree is $\delta$. Then, we can find an independent set $M$ whose size in expectation is at least  $\frac{n}{\delta^{1/(d-1)}}(1-\frac{1}{d})$ in $\tilde O(1)$ time. 
%%
%%of size at least $\frac{1}{2}\frac{n}{\delta^{1/(d-1)}}(1-\frac{1}{d})$ in $\cH'$ in $\tilde O(1)$ time w.h.p.  
%\end{lemma}
%%
%\begin{proof}
%
%
%We modify the proof of Theorem 3.2.1 in \cite[pp.29]{AlonS08book}. Let $p=(1/\delta)^{1/(d-1)}$ (note that $p<1$) and $S$ be a random set of hypernodes in $\cH'$ defined by $Pr[v\in S]=p$ for every hypernode $v\in V(\cH')$. Let $X=|S|$, and let $Y$ be the number of hyperedges in $\cH'$ contained in $S$ (i.e. hyperedge $e\in E(\cH')$ such that $e\subseteq S$). For each hyperedge $e$, let $Y_e$ be the indicator random variable for the event $e\subseteq S$; so, $Y=\sum_{e\in E(\cH')} Y_e$. Observe that for any hyperedge $e$, 
%$E[Y_e] = p^{|e|} \leq p^d$ 
%since $e$ contains at most $d$ hypernodes. So, $E[Y] = \sum_{e\in E(\cH)} E[Y_e] \leq \frac{n'\delta}{d}p^d$ (the inequality is because the number of hyperedges in $\cH'$ is at most  $\frac{n'\delta}{d}$). 
%%
%Clearly, $E[X]=n'p$; so, 
%$$E[X-Y] \geq n'p-\frac{n'\delta}{d}p^d = n' p (1-\frac{\delta}{d}p^{d-1}) = n'(\frac{1}{\delta})^{\frac{1}{d-1}}(1-1/d)$$
%where the last equality is because $p=(\frac{1}{\delta})^{\frac{1}{d-1}}$. 
%%
%%
%Our algorithm will pick such a random set $S$. Then it selects one vertex from each edge of $S$ and deletes it. (This can be done in $O(1)$ time.) This leaves a set $S^*$ with at least $n'(\frac{1}{\delta})^{\frac{1}{d-1}}(1-\frac{1}{d})$ hypernodes in expectation. All edges having been destroyed, $S^*$ is an independent set.
%%
%%Our algorithm will pick a random set $S$ repetitively until it finds a random set $S$ with the corresponding $X$ and $Y$ such that $X-Y\geq E[X-Y]/2$. Note that every time it picks a random set $S$, it can compute $X$ and $Y$ in $O(\log n)$ time since the network diameter is $O(\log n)$. 
%%%
%%
%%
%%By Chernoff's bound, the algorithm has to repeats selecting random sets for only $O(\log n)$ times w.h.p. 
%%%
%%Thus, in $\tilde O(1)$ rounds, we will find a random set $S$for which the number of hypernodes of $S$ minus the number of hyperedges contained in $S$ is at least $\frac{1}{2} n'(\frac{1}{\delta})^{\frac{1}{d-1}}(1-\frac{1}{d})$. Select one veertex from each edge of $S$ and delete it. This leaves a set $S^*$ with at least $\frac{1}{2} n'(\frac{1}{\delta})^{\frac{1}{d-1}}(1-\frac{1}{d})$ hypernodes. All edges having been destroyed, $S^*$ is an independent set.
%\end{proof}


\paragraph{Algorithm.} We use the following algorithm to solve the subgraph-MIS problem on a sub-hypergraph $\cH'$ of $\cH$, assuming that the network representing $\cH$ has $O(\log n)$ diameter. Let $n'=|V(\cH')|$. Let $d$ be an arbitrarily large constant. Let $\cH'_d$ be the sub-hypergraph of $\cH'$ where $V(\cH'_d)=V(\cH')$ and we only keep hyperedges of dimension (i.e. size) at least $d$ in $\cH'_d$. (It is possible that $\cH'_d$ contains no edge.) We then find an independent set of expected size at least $\frac{n'}{\Delta^{1/(d-1)}}(1-1/d)$ in $\cH'_d$, denoted by $S$; this can be done in $O(1)$ time by \Cref{thm:Turan} (note that we use the fact that $\delta\leq \Delta$ here). Let $\cH'_S$ be the sub-hypergraph of $\cH'$ induced by nodes in $S$. %(i.e., a hyperedge $e\in E(\cH')$ is in $\cH'_S$ if and only if $e\subseteq S$). 
%
Note that $\cH'_S$ does not contain any hyperedge in $\cH'_d$ and thus has dimension at most $d$, which is a constant. So, we can run the $O(\log^{(d+4)!} n)$-time algorithm from \Cref{sec:constant dimension} to find an MIS of $\cH'_S$. We let $M'_S$ be such a MIS of $\cH'_S$. 

Our intention is to use $M'_S$ as part of some MIS $M'$ of $\cH'$. Of course, any hypernode $v$ in $V(\cH'_S)\setminus M'_S$ cannot be in such $M'$ since  $M'\cup \{v\}$ will contain some hyperedge $e$ in $\cH'_S$ which is also a hyperedge in $\cH'$.  
%
It is thus left to find which hypernodes in $V(H')\setminus S$ should be added to $M'_S$ to construct an MIS $M'$ of $\cH'$. To do this, we use the following hypergraph. Let $\cH''$ be the sub-hypergraph of $\cH'$ such that $V(\cH'')=V(\cH')\setminus S$ and for every hyperedge $e\in E(\cH')$, we add a hyperedge $e\cap V(\cH'')$ to $\cH''$ if and only if $e \subseteq M'_S \cup V(\cH'')$; in other words, we keep edge $e$ that would be ``violated'' if we add every hypernode in $\cH''$ to $M'$. 
% 
We now find an MIS $M''$ of $\cH''$ by recursively running the same algorithm with $\cH''$, instead of $\cH'$, as a subgraph of $\cH$. The correctness follows from the following claim\onlyShort{ (see the full paper for the proof).}\onlyLong{.}

%; in other words, $\cH''$ is a subgraph of $\cH'$ induced by nodes {\em not} in $S$ except that we remove some edges that are 

% contains some node in $S$ that is not in the MIS of $M'_S$. 

\begin{claim}
$M'=M'_S\cup M''$ is a MIS of $\cH'$. 
\end{claim}
\onlyLong{
\begin{proof} First, we show that $M'$ is an independent set of $\cH'$. Assume for a contradiction that there is a hyperedge $e$ in $\cH'$ such that $e\subseteq M'$. This means that $e \subseteq M'_S \cup V(\cH'')$ since $M'_S\cup M''\subseteq M'_S \cup V(\cH'')$. It follows from the construction of $\cH''$ that  there is an edge $e'=e\cap V(\cH'')$ in $\cH''$. Note that $e\cap V(\cH'')\subseteq M''$; in other words $e'\subseteq M''$.  This, however, contradicts the fact that $M''$ is an MIS in $\cH''$. 

Now we show that $M'$ is maximal. Assume for a contradiction that there is a hypernode $v$ in $V(\cH')\setminus M'$ such that $M'\cup \{v\}$ is an independent set. If $v$ is in $S$, then $M'_S\cup \{v\}$ is an independent set in $\cH'_S$ (since it is a subset of $M'\cup \{v\}$), contradicting the fact that $M'_S$ is an MIS in $\cH'_S$. So, $v$ must be in $V(\cH'')$. This, however, implies that $M''\cup \{v\}$ is an independent set in $\cH''$ (again, since it is a subset of $M'\cup \{v\}$), contradicting the fact that $M''$ is an MIS in $\cH''$.
\end{proof}
}
%
%
We now analyze the running time of this algorithm. Recall that $E[|S|]\geq \frac{n'}{\delta^{(1/(d-1))}}(1-1/d)$. In other words, the expected value of $|V(\cH'')|\leq (1- \frac{c(d)}{\Delta^{1/(d-1)}}) |V(\cH')|$ where $c(d)=\frac{1}{2}(1-1/d)$ is a constant which is strictly less than one (recall that $d$ is a constant). It follows  that the expected number of recursion calls is $O(\Delta^{\frac{1}{d-1}})$. %\danupon{I still feel that we need to say how to analyze this.}.
%\danupon{TO DO: I haven't provided details here since I don't remember exactly how the analysis goes when we only have the expectation. Please help filling in details here.} 
%
Since we need $O(\log^{(d+4)!} n)$ time to compute $M'_S$ and to construct $\cH''$, the total running time is  $O(\Delta^{\frac{1}{d-1}}\log^{(d+4)!} n)$. By \Cref{thm:decomposition congest}, we can compute MIS on any hypergraph $\cH$ (of any diameter) in 
\longOnly{$$O(\Delta^{\frac{1}{d-1}}\log^{(d+4)!+4} n)$$}
\shortOnly{$O(\Delta^{\frac{1}{d-1}}\log^{(d+4)!+4} n)$}
time. 
% 
For any constant $\epsilon>0$, we set $d=1+1/\epsilon$ to get the claimed running time of \longOnly{
\begin{align}
O(\Delta^{\epsilon}\log^{(5+1/\epsilon)!+4} n) = \Delta^{\epsilon}\log^{(1/\epsilon)^{O(1/\epsilon)}} n.
\label{eq:Delta epsilon time}
\end{align}}
\shortOnly{$O(\Delta^{\epsilon}\log^{(5+1/\epsilon)!+4} n) = \Delta^{\epsilon}\log^{(1/\epsilon)^{O(1/\epsilon)}} n.$}
%
%Since this running time holds for any constant $d$, the claimed running time of  $O(\Delta^{\epsilon})$ for any small $\epsilon>0$ follows. 
%
%Thus, by \Cref{thm:decomposition congest}, we can compute MIS on any hypergraph $\cH$ (of any diameter) in 
%\longOnly{$$O(\Delta^{\epsilon}\log^{(5+1/\epsilon)!+4} n)$$}
%\shortOnly{$O(\Delta^{\epsilon}\log^{(5+1/\epsilon)!+4} n)$}
%time. 
%
Moreover, by the recent result of Bercea~et~al. \cite{aravind2}, we can in fact set $d$ as large as $\frac{\log\log n}{4\log\log\log n}.$  
%
% --- This version gives \Delta^{o(1)}n^{o(1)} -----
%
\longOnly{
In this case, note that for some constant $c'$, 
$$(d+4)! = d^{c'd} = e^{c'd\log d}= e^{c'\cdot\frac{\log\log n}{c\log\log\log n}\cdot \log\log\log n} = \log^{1/10} n$$ 
where the last equality holds when we set $c=10c'$.
Thus, 
$$\log^{(d+4)!} n = \log^{\log^{1/10} n} n  = 2^{(\log^{1/10} n)\log\log n}=n^{o(1)}.$$
The running time thus becomes $\Delta^{o(1)}n^{o(1)}.$
}
\shortOnly{If we set $d=\frac{\log\log n}{c\log\log\log n}$ for some large enough constant $c$, the term $\log^{(d+4)!} n$ can be bounded by $n^{o(1)}$ and thus the running time becomes $\Delta^{o(1)}n^{o(1)}$.}



%% --- This version gives Delta^{o(1)}polylog n ---
%
%
%\longOnly{
%{\bf *** The analysis below still has a problem. Please comment it out before submitting ***}
%
%This leads us to the running time of $\Delta^{o(1)}\polylog n$ by considering three cases. 
%
%\underline{Case 1:} $\Delta\leq \log n$. We can set $\epsilon=1$ in the previous $\Delta^{\epsilon}\log^{(1/\epsilon)^{O(1/\epsilon)}} n$ time (\Cref{eq:Delta epsilon time}) to get $\polylog n$ time.
%
%\underline{Case 2:} $\log n\leq \Delta\leq n$. We set $d$ to be such that 
%$$(d+4)!+4=\Theta(\frac{\log^{1/10} \Delta}{\log\log n}).$$ 
%Note that $d=\omega(1)$ since ...
%{\bf *** DOESN'T WORK: $d$ could be 1 when $\Delta=\polylog n$ which makes $\Delta^{\frac{1}{d-1}}\neq \Delta^{o(1)}$***} We can set $d$ to such value without violating the constraint $d\leq\frac{\log\log n}{4\log\log\log n}$ since if we set $d=\frac{\log\log n}{4\log\log\log n}$, then 
%$$\frac{d}{2}\log \frac{d}{2} = \frac{\log\log n}{2\log\log\log n}(\log\log\log n-2\log\log\log\log n) \geq \frac{\log\log n}{4}.$$
%This makes
%$$(d+4)!+4 \geq (\frac{d}{2})^\frac{d}{2} = 2^{\frac{d}{2}\log \frac{d}{2}}\geq \log^{1/4} n \geq \log^{1/4} \Delta.$$
%This makes the running time be  
%$$O(\Delta^{\frac{1}{d-1}}\log^{(d+4)!+4} n) = O(\Delta^{\frac{1}{d-1}}\log^{\Theta(\frac{\log^{1/10} \Delta}{\log\log n})} n)  = O(\Delta^{\frac{1}{d-1}}2^{\Theta(\frac{\log^{1/10} \Delta}{\log\log n})\log\log n}) = \Delta^{o(1)}$$ 
%where the last equality uses the fact that $d=XXX = \omega(1)$ and $2^{\Theta(\log^{1/10} n)} = \Delta^{o(1)}.$
%
%\underline{Case 3:} $\Delta\geq n$. We set $$d=\frac{\log\log n}{c\log\log\log n}$$ for some large enough constant $c$. 
%In this case, note that for some constant $c'$, 
%$$(d+4)! = d^{c'd} = e^{c'd\log d}= e^{c'\cdot\frac{\log\log n}{c\log\log\log n}\cdot \log\log\log n} = \log^{1/10} n$$ 
%where the last equality holds when we set $c=10c'$.
%Thus, 
%$$\log^{(d+4)!} n = \log^{\log^{1/10} n} n  = 2^{(\log^{1/10} n)\log\log n}=n^{o(1)}.$$
%The running time thus becomes $\Delta^{o(1)}n^{o(1)} = \Delta^{o(1)}$.
%}



%We will set $d$ so that $(d+4)!+4\leq $  

%\leq (d+4)^{d+4}$


%the running time becomes 
%%
%$O(\Delta^{\frac{1}{d-1}} \log^{(d+4)!} n) = O(\Delta^{o(1)}\log^{(d+4)^{(d+4)}} n).$
%%
%%$$\Delta^{\frac{4\log\log\log n}{\log\log n-4\log\log\log n}}\log^{(\frac{\log\log n}{4\log\log\log n}+4)!+4} n.$$
%%
%%Note that $(d+4)^{(d+4) = O((2^{\log\log\log n})^{O()})$


%Let $T_n(n')$ denote the running time to solve the subgraph-MIS problem when there are  $n$ nodes in the network representing hypergraph $\cH$ and $n'$ hypernodes in the subgraph $\cH'$ of $\cH$. We need $\tilde O(1)$ to compute $M'_S$ and construct $\cH''$. We then need $T_n(n'')$ time, where $n''=|V(\cH'')|$, to compute $M''$. Since $|S|\geq \frac{1}{2}\frac{n'}{\delta^{(1/(d-1))}}(1-1/d)$, we have that $n''\leq (1- \frac{c(d)}{\Delta^{1/(d-1)}}) n'$ for some constant $c(d)=\frac{1}{2}(1-1/d)$ which is strictly less than one (recall that $d$ is a constant).  Thus, 
%%
%\[T_n(n') \leq T_n(c(d) \frac{n'}{\Delta^{\frac{1}{d-1}}}).\]
%%
%This recursion can be easily solved to $T_n(n') = \tilde O(\Delta^{\frac{1}{d-1}})$. (In particular, this is because the number of hypernodes in the sub-hypergraph decreases by a fraction of $\Omega(\Delta^{\frac{1}{d-1}})$ after every recursion.) Since this running time holds for any constant $d$, the claimed running time of  $\tilde O(\Delta^{\epsilon})$ for any small $\epsilon>0$ follows. 
%%
%Thus, by \Cref{thm:decomposition congest}, that we can compute MIS on any hypergraph $\cH$ (of any diameter) in $\tilde O(\Delta^{\epsilon})$ time. 

%This implies that the So, we will have at most $\tilde O(\Delta\Delta^{\frac{1}{d-1}})$ recursions. The running time is thus  


% Beam-Luby's MIS algorithm in $\tilde O(1)$ time to determine the status of all nodes in $\cH_S$, which can be kept as a status of $\cH$, i.e., 
%
%\begin{itemize}
%\item the independent set $M_S$ of $\cH_S$  is also independent in $\cH$, and 
%\item any node in $\cH_S$ that cannot be added to $M_S$ cannot be added to the independent set in $G$. 
%\end{itemize}
%
%Both properties are simply because G_S is an induced subgraph of G. This means that we have determined the status of at least (1-1/d^*)\frac{n}{\delta^{1/(d^*-1)}} nodes. So, after repeating this for \frac{d^*}{d^*-1}\delta^{1/(d^*-1)} = O(\delta^{1/(d^*-1)}) times we will determine the status of all nodes. So, the running time is \tilde O(\delta^{1/(d^*-1)}) = \tilde O(m^{1/(d^*-1)})  which can be written as \tilde O(m^\epsilon) for any \epsilon>0. 
\onlyShort{We obtain the $O(\sqrt{n})$ time by modifying the PRAM algorithm of Karp, Upfal, and Wigderson \cite[Section 4.1]{KarpUW88}. This algorithm can be found in the full version.}
%
\onlyLong{
\subsection{$O(\sqrt{n})$ Time in the CONGEST model}\label{sec:sqrt n algo}

We obtain the $O(\sqrt{n})$ time by modifying the PRAM algorithm of Karp, Upfal, and Wigderson \cite[Section 4.1]{KarpUW88}. (Note that we do not need the fact that the network diameter is $O(\log n)$ for this algorithm.) Their algorithm is as follows. Let $v_1, v_2, \ldots, v_n$ be a random permutation of hypernodes. The algorithm gradually adds a hypernode to the independent set one by one, starting from $v_1$. It stops at some hypernode $v_k$ when $v_k$ cannot be added to the independent set. Thus, $v_1, \ldots v_{k-1}$ are added to the independent set; the algorithm removes these hypernodes from the graph. It also removes {\em all} hypernodes that cannot be added to the independent set (i.e. any $v$ such that $\{v_1, \ldots, v_{k-1}, v\}$ contains some hyperedge) and all hyperedges that contain them. It repeats the same process to find a MIS of the remaining graph. It is easy to show (see \cite{KarpUW88} for detail) that the union of a MIS of the remaining graph and $\{v_1, \ldots, v_{k-1}\}$ is a MIS or the input graph. The key to proving the efficiency of this algorithm is the following. 

\begin{claim}[\cite{KarpUW88}]\label{claim:Karp et al}
The expected number of removed hypernodes ($v_1, \ldots, v_{k-1}$ and hypernodes that cannot be added to the independent set) in the above process is $\Omega(\sqrt{n})$. 
%either add all first $\sqrt{n}$ nodes (i.e. $v_1, \ldots, v_{\sqrt{n}}$) to the independent set or the expected number of eliminated nodes is at least $\sqrt{n}$.
\end{claim}
It follows almost immediately that we have to repeat the process only $O(\sqrt{n})$ times in expectation (see \cite[Appendix]{KarpUW88} for detail).
%
We now show how to modify this algorithm to our setting. Every hypernode $v$ picks a random integer $r(v)$ between $1$ and $n^2$. It can be guaranteed that hypernodes pick different numbers with high probability. Then every hypernode $v$ marks itself to the independent set if for any hyperedge $e$ that contains $v$, $r(v)<\max_{u\in e} r(u)$, i.e., its number is not the maximum in any hyperedge. We add all marked hypernodes to the independent set, remove them from the graph, and eliminate hypernodes that cannot be added to the independent set (i.e. a hypernode $v$ marks itself as ``eliminated'' if there is a hyperedge $e$ such that $e\setminus \{v\}$ is a subset of marked hypernodes). We then repeat this process until there is no hypernode left. 

Using \Cref{claim:Karp et al}, we show that our algorithm has to repeat only $O(\sqrt{n})$ times, as follows. Consider an ordering $v_1, \ldots, v_n$ where $r(v_i)<r(v_{i+1})$. This is a random permutation. Let $k$ be such that $v_1, \ldots, v_k$ are added to the independent set by Karp et al.'s algorithm and $v_{k+1}, \ldots, v_n$ are not. Observe that for every $1\leq i\leq k$ and every hyperedge $e$ that contains $v_i$,  $r(v_i)<\max_{u\in e} r(u)$ (otherwise edge $e$ will be violated when we add $v_1, \ldots, v_k$ to the independent set). In other words, our algorithm will also add $v_1, \ldots, v_k$ to the independent set (but it may add other hypernodes as well). It follows  that our algorithm will eliminate every hypernode that is eliminated by Karp et al.'s algorithm. In other words, the set of hypernodes removed by our algorithm is a superset of the set of hypernodes removed by Karp et al.'s algorithm. Thus, by \Cref{claim:Karp et al}, the expected number of hypernodes removed in each iteration of our algorithm is $\Omega(\sqrt{n})$. By the same analysis as Karp et al., our algorithm will need only $O(\sqrt{n})$ iterations in expectation. Each iteration can be easily implemented in $O(1)$ rounds, so our algorithm takes $O(\sqrt{n})$ time in expectation. 
% --- Below is for w.h.p. ---
%This implies $O(\sqrt{n}\log n)$ time with high probability by a standard technique (i.e. terminate if the algorithm runs too long and repeat).

}
\input{applications}
\input{lowerbound}
\onlyLong{
\vspace{-0.5cm}
\section{Distributed Algorithms for Other Hypergraph Problems}
\vspace{-0.2cm}
\label{sec:other}
%\subsection{Luby's Algorithm}\label{sec:Luby}
%\danupon{Maybe this subsection should be in prelim?}
Many algorithms in this section will simulate an algorithm for finding  a MIS on a (standard) graph developed by Luby \cite{Luby86} as a subroutine. 
One version of this algorithm is this: (1) Randomly assign unique priorities to nodes in $G$ (which can be achieved w.h.p. by having each node in $G$ randomly pick an integer between $1$ and $n^4$). (2) We mark and add all nodes that has higher priority than all its neighbors to the independent set. (3) We remove these marked nodes and their neighbors from the graph and repeat the procedure. Luby \cite{Luby86} shows that this procedure will repeat only $O(\log n)$ times in expectation. So, it is sufficient to get $\tilde O(1)$ time if our algorithms can simulate the three steps above in $\tilde O(1)$ time. 


\onlyLong{\subsection{Maximal Clique} \label{sec:clique}}

%TO DO: $\log n$ algorithm for standard graph which is $\tilde d$ 

\begin{theorem}
Maximal clique can be computed in $\tilde O(D)$ time in the CONGEST vertex-centric model and $\tilde O(D+\dimension)$-time in the CONGEST server-client model, where $D$ is the network (i.e., server graph or the server-client bipartite graph) diameter and $\dimension$ is the hypergraph dimension.
\end{theorem}
\onlyLong{
\begin{proof}
Recall that in this problem, we want a maximal set $S$ of hypernodes such that every two hypernodes $u$ and $v$ in $S$ are contained in some common hyperedge. This is equivalent to finding a maximal clique in the server graph (defined in \Cref{sec:prelim}). 

Since the underlying network of the vertex-centric model is exactly the server graph, we can easily find a maximal clique in this model, as follows. Pick any node $s$. (This can be done in $O(D)$ time by, e.g. picking a node with smallest ID or using a leader election algorithm.) Let $S$ be the set of all neighbors of $s$. Let $G_S$ be the subgraph of the server graph induced by nodes in $S$. Observe that if $M$ is a maximal clique in $G_S$ then $\{s\}\cup M$ is a maximal clique in $G$. So, it is sufficient to find a maximal clique in $G_S$. Observe further that if $\bar{G}_S$ is the complement graph of $G_S$ (i.e. an edge $(u, v)$ is in $\bar{G}_S$ if and only if it is not in $G_S$), then finding a maximal clique in $G_S$ is equivalent to finding a MIS in $\bar{G}_S$. 

We now simulate Luby's algorithm to find a MIS in $\bar{G}_S$.  We simulate the first step by letting node $s$ generate a random permutation of nodes in $\bar{G}_S$, say $v_1, v_2, \ldots, v_{|S|}$, and send a priority $i$ to node $v_i$. This can be done in one round since all nodes in $\bar{G}_S$ are neighbors of $s$. Now, for every node $v$ in $\bar{G}_S$ of priority, say $i$,  checks whether its priority is higher than all its neighbors in $\bar{G}_S$ (as required by the second step of Luby's algorithm). Observe that this is the case if and only if the priorities $i+1, i+2, \ldots |S|$ are given to $v$'s neighbors in $G_S$. Node $v$ can check this in one round by receiving the priorities of all its neighbors in $G_S$. For simulating the third step, each node $v$ has to know whether it has a neighbor in $\bar{G}_S$ that is marked. We do this by counting the number of marked nodes (every node tells $s$ whether it is marked or not). Let $c$ be such number. Then, every node $v$ counts how many of its neighbors in $G_S$ are marked. If this is less than $c$, then $v$ has a neighbor in $\bar{G}_s$ that is marked. This takes $O(1)$ rounds. 


The above simulation of Luby's algorithm can be extended to the server-client model with an extra $O(\dimension)$ factor cost: For $s$ to distribute the priorities in the first step, it has to send up to $\dimension$ priorities to the same hyperedge. For the second step, where each node $v$ has to check whether its priority is higher than all its neighbors in $\bar{G}_S$, $v$ has to receive the priorities of all its neighbors in $G_S$, and it might have to receive up to $\dimension$ priorities from the same hyperedge. Finally, for the third step where every node has to know the number of neighbors in $G_S$ that are marked, it has to received the list of IDs of its marked neighbors, and it might have to receive up to $\dimension$ IDs from the same hyperedge. 
\end{proof}
}
Note that the dependence on the diameter in the running time is necessary, as shown in \Cref{thm:MCDS-ST-lowerbound}.
%
\onlyLong{\subsection{$(\Delta+1)$-Coloring}}
%
%We show that, just like the case of standard (2-dimensional) graphs, $(\Delta+1)$-coloring and maximal matching on hypergraphs can be computed quickly on both vertex-centric and server-client representations, even in the CONGEST model.
\onlyShort{
  The $(\Delta+1)$-coloring problem requires a coloring of the nodes such that no hyperedge is monochromatic. This can achieved by ensuring that at least vertices per hyperedge have distinct colors, which can be ensured using standard graph coloring (cf.\ full paper).
Also, by solving MIS on the line graph of a given hypergraph, we can get a maximal matching.
\begin{theorem}
%The followings hold in the CONGEST model in both vertex-centric and server-client representations.
% 
The $(\Delta+1)$-coloring problem on hypergraphs has the same complexity as the $(\Delta+1)$-coloring problem on standard (two-dimensional) graphs; in particular, it can be solved in $O(\log n)$ time. This holds in both vertex-centric and server-client representations and in the CONGEST model. 
The maximal matching problem on hypergraphs can be solved in $O(\log n)$ time in the CONGEST server-client model. %\danupon{I'm not sure how much time we need for the vertex-centric model.}
\end{theorem}
 % 
 }
 \onlyLong{
\begin{theorem}
%The followings hold in the CONGEST model in both vertex-centric and server-client representations.
% 
The $(\Delta+1)$-coloring problem on hypergraphs has the same complexity as the $(\Delta+1)$-coloring problem on standard (two-dimensional) graphs; in particular, it can be solved in $O(\log n)$ time. This holds in both vertex-centric and server-client representations and even in the CONGEST model. 
\end{theorem}

\begin{proof}
Recall that in the $(\Delta+1)$-coloring problem we want to color hypernodes so that there is no monochromatic hyperedge, i.e. all hypernodes it contains have the same color. We solve this problem by converting a hypergraph $\cH$ to a two-dimensional graph $G$ on the same set of nodes as follows. For every hyperedge $e$ in $\cH$, pick arbitrary two distinct hypernodes it contains, say $u$ and $v$, and create an edge $e'=(u, v)$ in $G$. Observe that $G$ has maximum degree at most $\Delta$ and any valid coloring in $G$ will be a valid coloring in $H$ (since if an edge $e$ in $\cH$ is monochromatic, then the corresponding edge $e'$ in $G$ will also be monochromatic). Thus, it is sufficient to find a $(\Delta+1)$ coloring in $G$. We can do this by simulating any $(\Delta+1)$-coloring algorithm for $G$ on $\cH$. This shows that $(\Delta+1)$-coloring on hypergraphs is {\em as easy as}  $(\Delta+1)$-coloring on standard graphs. 
\end{proof}
}

%For the maximal matching problem, we show that we can use a certain type of algorithms on standard graphs to solve the problem on hypergraphs.  We say that any algorithm is a {\em broadcasting algorithm} if every node sends the same message to all its neighbors in every round.  

%\begin{theorem}
%If there is a broadcasting algorithm that solves maximal matching on standard (two-dimensional) CONGEST $n$-node network in $T(n)$ time, then there is an algorithm that solve maximal matching problem on hypergraphs in $\tilde O(T(n))$ time in the CONGEST server-client model. In particular, the maximal matching problem on hypergraphs can be solved in $\tilde O(1)$ time in the CONGEST server-client model. \danupon{I'm not sure how much time we need for the vertex-centric model.}
%\end{theorem}
\onlyLong{\subsection{Maximal Matching}
\begin{theorem}
The maximal matching problem on hypergraphs can be solved in $O(\log n)$ time in the CONGEST server-client model. %\danupon{I'm not sure how much time we need for the vertex-centric model.}
\end{theorem}
}
\onlyLong{
\begin{proof}
%
%
Recall that this problem on a hypergraph $\cH$ asks for a maximal set $S\subseteq E(\cH)$ of {\em disjoint} hyperedges, i.e. $e\cap e'=\emptyset$ for all $e\neq e'$ in $S$. Consider the following {\em line graph} $G$: nodes of $G$ is the hyperedges in $\cH$, i.e. $V(G)=E(\cH)$, and there is an edge between two nodes $e, e'\in V(G)$ if and only if their corresponding hyperedges overlap, i.e. $e\cap e'\neq \emptyset$. Clearly, a set $S$ is a maximal matching in $\cH$ if and only if it is a MIS in $G$. Thus, it is left to find a MIS in $G$. 
%
This can be done by simulating Luby's algorithm \cite{Luby86}.
%
Observe that the first and second steps need no communication. For the third step, every node in $G$ (hyperedges in $\cH$) only needs to know the highest priority among its neighbors. This can be done in $O(1)$ rounds by having each hyperedge (client in the server-client representation) in $\cH$ send its priority to all hypernodes (server) that it contains, then these hypernodes sends the maximum priority that it receives to all hyperedges that contain it. 
%
So, we can implement the three steps of Luby's algorithm in $O(1)$ rounds.
\end{proof}
}
}
