\documentclass{article}[14pt]
\usepackage{amsmath}
\usepackage{graphicx}
\usepackage{subfigure}
\pdfpagewidth 8.5in
\pdfpageheight 11in
\setlength\topmargin{0in}
\setlength\headheight{0in}
\setlength\headsep{0in}
\setlength\textheight{9.7in}
\setlength\textwidth{6.0in}
\setlength\oddsidemargin{0in}
\setlength\evensidemargin{0in}
\usepackage{setspace}
%\doublespacing
%\setlength\headheight{77pt}
%\setlength\headsep{0.25in}
\begin{document}


\title{File Sharing}
\author{Bing Wei, Zengbin Zhang \\
\textit{bwei,zengbin@cs.ucsb.edu}}

\maketitle

\section{System Architecture}
The general system implementation architecture is shown in fig.\ref{fig:topo}. We implemented
 the system in Python, including 4 classes: file\_server, file\_client, msg\_server,
 msg\_client. The message client and server are used as daemons to send and receive UDP
 messages. The system is configured by a configuration file, which contain the
  \emph{address} and listening \emph{port}, as well as the \emph{process id} of the File
   Server and all the clients. Each client is assigned with a process id ($>0$), and the server
   is assigned with process id 0.
    
\begin{figure}[h]
\centering
\includegraphics[height=2.0in]{arch.eps}
\caption{Client/Server Architecture}
\label{fig:topo}
\end{figure}

\subsection{Event and Request Generation}
The causality among different processes are simulated by randomly generated events. We use a Timer with randomized 
timeout value for each process to generate events (EVT), and each time the event message is sent to a randomly 
chosen receiver. Appending requests (REQ) are also randomly generated by each process. 

\begin{figure}[h]
\centering
\includegraphics[height=2.0in]{msgflow.eps}
\caption{Message Flow}
\label{fig:flow}
\end{figure}


\section{Lamport-Inspired Approach}

\subsection{Client Implementation}
Import data structures on client are as follows. $T_i$ is the logical clock on the client $C_i$. The local request queue $req\_local$ records all the requests generated by the client. The request received queue $req\_rcv$ records all the received, but not replied requests. The reply received dictionary $rpl\_rcv$ records received replies of the sent requests. The key of the $rpl\_rcv$ dictionary is the request logical time. The value of the dictionary is a list of client IDs from whom the reply has been received. Initially, $T_i=<i, 0>$; $req\_local=null$; $req\_rcv=null$; $rpl\_rcv=null$.  
$HB(e_i, e_j)$ means event $e_i$ happens before $e_j$. $\neg HA(e_i, e_j)$ means $HB(e_i, e_j)$ or $T(e_i)=T(e_j)$. $e_{icm}$ denotes an incoming request event. $e_{fql}$ denotes the first event in $req\_local$. Let $e_{fqr}$ be the first event in $req\_rcv$. Let $E_{rcv}=\{e \mid e \in req\_rcv\ and\ HB(e, e_{fqr}) \}$.\\\\
Algorithm at $C_i$:
\begin{itemize}
\item Send EVT msg to a random client, piggyback the logical clock in the msg.
\item Send REQ msg to all clients in a random time interval, piggyback the logical clock in the msg. And put the request in $req\_local$.
\item When receiving an EVT msg from $C_j$, update $T_i=(i, max(t[1], T_i[1])+1)$, t is the logical time in the msg. 
\item When receiving a REQ msg from $C_j$, update $T_i=(i, max(t[1], T_i[1])+1)$. IF ($\neg HA(e_{Icm}, e_{fq}))$, send a RPL msg to $C_j$; ELSE put  $e_{Icm}$ in $req\_rcv$
\item When receiving a RPL msg  from $C_j$, insert the msg in $rpl\_rcv$. IF (a request receives replies from all clients), send a WRT msg to FS, delete the request from $rpl\_rcv$. 
\item When receiving an ACK msg from FS, pop $e_fq$ from $req\_local$, send a RPL to every event in $E_{rcv}$.   
\end{itemize}
    
\subsection{Server Implementation}
\begin{itemize}
\item When receiving a WRT msg from $C_i$, write to the file, send an ACK msg to $C_i$.
\end{itemize}

\section{Vector Clock-based Approach}
\subsection{Client Implementation}
Initially, the time vector of client i $v_i=<t_1, t_2, ..., t_n>=<0, 0, ..., 0>$, $n$ is the number of clients. 
\begin{itemize}
\item  When sending REQ to file server, $t_i = t_i + 1$, piggyback the updated time vector in the message.	
\item  When sending EVT, piggyback current time vector.
\item  When receiving EVT with $v_j$, $for k = 1:n, v_i[k] = max(v_i[k], v_j[k])$.
\item  When receiving GRT (grant), send WRT (write) message to the file server.
\item  When receiving ACK (appending acknowledgement) from file server, send RLS (release) message to file server.
\end{itemize}

\subsection{Server Implementation} 
The file server has a ``processed'' time vector $v_g$ to remember the time of last granted request. Initially, the value is $<0, 0,..., 0>$. File server also 
uses a queue $Q:(id, v_{id})$ to store all the waiting requests.
\begin{itemize}
\item When receiving REQ from client j, with time vector $v_j$. If resource is ``BUSY'', put $v_j$ in $Q$. If resource is ``FREE'',
	if $v_g[j]=v_j[j]+1$ and $for (k = 1:n\ and\ k \neq j)\ v_i[k] \leq v_g[k]$, grant resource to client j, mark resource as ``BUSY'', $v_g[j] += 1$; else, put $v_j$ in $Q$.
\item When receiving WRT, write the message ``fs.log'', together with the time 
	vector of the corresponding request. Then send ACK to the client.
\item When receiving RLS, mark the resource as ``FREE''. Look at $Q$ to find a request $(j,v_j)$ with a time vector that satisfy the following conditions.
	\begin{itemize}
	\item[*] $v_g[j]=v_j[j]+1$ and $for (k = 1:n\ and\ k \neq j)\ v_i[k] \leq v_g[k]$		
	\end{itemize}		
	If found, grant the resource to $j$, mark the resource as ``BUSY'',$v_g[j] += 1$.

\end{itemize}		
	
\section{Correctness Verification}

\subsection{Causality Analysis}
In order to verify that the resource are granted in a causal order, we extract the time for each message appended
in the file, and analyzed the causal order of them. We compared all the timestamps (time vector or logical clock time) pairwisely to see if there is a 
higher timestamp that locates before a lower one. No causal violations have been detected.

\subsection{Non-FIFO Simulation}
Since in a relatively small network like CSIL, it is hard to get a non-FIFO scenario. In most of the case, the queues of both file server and clients are empty. In order to verify that our program works with non-FIFO links, we
modified the \emph{msg\_client} class and added a random delay for each outgoing message. This provided us lots of 
non-FIFO message transmissions, and thus we were able to verify that the program works well.

\section{Evaluation and Comparison}

\begin{itemize}
\item Robustness to failures\\
	 In Lamport-inspired approach, if one client dies, the system will be waiting forever, since the client will wait until he receives replies from all the clients. However, the vector-based approach will continue to work, since the server will grant the resource as long as the time vectors of the requests are in causal order. However, if one message is missing, none of the approaches will work.
\item Salability\\
 	In a system with $n$ clients, if each client issues $m$ requests, then 
	the number of messages needed by a Lamport-Inspired system will be $O(mn^2)$, while the number needed by a Vector Clock-based system will be $O(mn)$.
\item System Throughput\\
	Since in the Lamport-Inspired system, a client's request needs to be 
	approved by all the clients, the largest network delay on client-client channels or client-server channels will 
	be the bottleneck of the whole system. In vector-based approach, the delay on a specific channel will only affect the two edge nodes .

\item Non-FIFO communication\\
	Both the systems support non-FIFO links.

\item The Intelligence of the file server\\
	In Lamport-Inspired approach, the file server doesn't need to provide 
	any intelligence, and doesn't have maintain the causal order. 
	The ordering is maintained through the coordination among the clients.
	In vector-based approach, the ordering task is completed in the file 
	server. 

\end{itemize}
%\section{Scenario One}
%
%\subsection{Settings}
%
%\subsection{Results}
%\begin{figure}[h]
%\centering 
%
%\subfigure[CDF of Mean Coverage]{
%\label{mean_cdf} %% label for first subfigure
%\includegraphics[height=2.4in,angle=0]{../mean_CDF}
%}
%\subfigure[CDF of Worst Coverage]{
%\label{worst_cdf} %% label for second subfigure
%\includegraphics[height=2.4in,angle=0]{../worst_CDF}
%}
%\caption{Coverage Ratio}
%\label{fig:cr} %% label for entire figure
%\end{figure}
%
%\begin{figure}[h]
%\centering 
%
%\subfigure[Total Utilization]{
%\label{util} %% label for first subfigure
%\includegraphics[height=2.3in,angle=0]{../Util}
%}
%\subfigure[Fairness: Variation]{
%\label{worst_cdf} %% label for second subfigure
%\includegraphics[height=2.3in,angle=0]{../var}
%}
%\caption{Allocation}
%\label{fig:alloc} %% label for entire figure
%\end{figure}
%
%\begin{figure}[h]
%\centering
%\includegraphics[height=2.4in]{../apn.eps}
%\caption{Number of AP got Allocated}
%\label{fig:apn}
%\end{figure}




%\section{Measurement Setup}
%
%\begin{figure}[h]
%\centering
%\includegraphics[height=2.4in]{hw2map.eps}
%\caption{Topology of Experiment Sites}
%\label{fig:topo1}
%\end{figure}


%\section{Experiment Results}


%\begin{figure}[h]
%\centering 
%
%\subfigure[Link Outage Frequency]{
%\label{outage1} %% label for first subfigure
%\includegraphics[height=3.0in,angle=270]{../outage1}
%}
%\subfigure[Outage Duration Distribution]{
%\label{outage2} %% label for second subfigure
%\includegraphics[height=3.0in,angle=270]{../outage2}
%}
%\caption{Link Outage}
%\label{fig:outage} %% label for entire figure
%\end{figure}
%
%
%
%\begin{table}[h]
%\centering
%\begin{tabular}{ l c c }
%Location & Outages & Percentage \\
%\hline
%Source & 16 & 3.6 \\
%Destination & 141 & 32.0455 \\
%Core &120 & 27.3 \\
%Same Site & 1 & 0.237273 \\
%Link Fluttering & 121 & 27.5  \\
%Unknown & 41 & 9.31818 \\
%\end{tabular}
%\label{localcore}
%\caption{Error Location}
%\end{table}
\bibliographystyle{abbrv}
\bibliography{./papers}

%\begin{thebibliography}{0}
%
%\end{thebibliography}

\end{document}
