%\clearpage\section{Network Coding}

%Network Coding is an extra layer of coding applied between source coding and channel coding, and is used to enhance either data transmission through an unreliable data channel, or throughput in routing, when using a reliable data channel. \fixme{cite!}

%This project will focus on network coding in a wireless broadcasting setup, which is both unreliable and can change dynamically. Due to the importance of scalability in a broadcasting setup, a transmissions approach must be both scalable and simple. Error-control through feedback from the receiving nodes (e.g. negative acknowledgements) is only feasible for setups with limited amounts of clients. 

%As mentioned in \cite{NCBASICS}, an approach where linear combinations are randomly chosen, and coding vectors are appended to coded data packets is useful when applying network coding to a dynamic network.

%\subsection{Random Network Coding} % (fold)
%\label{sub:randomnetworkcoding}
%As described in \cite{NCBASICS}:\\

%When performing network coding on a series of source packets $\{x_1,x_2,...,x_n\}$, linear combinations of the source packets are created and sent instead of the original source packets. That is, the network coded packets are combinations of the form $c_1\cdot x_1+c_2\cdot x_2+...+c_n\cdot x_n$. The coefficients $\{c_1,c_2,...,c_n\}$ used to combine a network coded packet is described as the \textit{coding vector} when assembled in a vector. 

%The original source packets can be retrieved from the network coded packets when \textit{n} linear independent combinations of the source packets are present. The relationship between network coded packets, coding vectors and source packets can be described in the matrix equation in \eqref{eq:ncrelation}.


%\begin{align}
%\left[
%\begin{array}{c}	
%p_{1} \\
%p_{2} \\
%\vdots \\
%p_{n}
%\end{array}
%\right]
%&=
%\left[
%\begin{array}{cccc}	
%c_{11} & c_{21} & \cdots & c_{n1} \\
%c_{12} & c_{22} & \cdots & c_{n2} \\
%\vdots & \vdots & \ddots & \vdots \\
%c_{1n} & c_{2n} & \cdots & c_{nn}
%\end{array}
%\right]
%\left[
%\begin{array}{c}	
%x_{1} \\
%x_{2} \\
%\vdots \\
%x_{n}
%\end{array}
%\right]
%\label{eq:ncrelation}
%\intertext{Where:}
%&\text{n is the number of source packets, also known as generation size.} \notag\\
%&\text{$x_i$ are the source packets.} \notag\\
%&\text{$c_{ij}$ are the coding coefficients.} \notag\\
%&\text{$p_i$ are the resulting network coded packets.} \notag
%\end{align}

%The possibility of all received packets $c_i$ being linear independent combinations the source packets depends on the number of values each of the coefficients in the coding vector can take. This amount of values for these coefficients is also known as the \textit{field size}. That is, if a field size of 2 is used, each of the coefficients $c_{ij}$ can be either 1 or 0, while a field size of $2^8$ grants 256 different values for each of the coefficients, thus increasing the chance of the encoded packets being linear independent. However, the tradeoff for a large field size is the increased amount of computing required to decode each of the packets. 

%However, also the generation size influences the probability of the encoded packets being linear dependent, and larger generation sizes reduce this probability. Equation \eqref{eq:lineardependency} describes the probability of linear dependency from the field- and generation size.


%% subsection randomnetworkcoding (end)
%\subsection{Decoding Probability}
%A generation of linear combined packets can be decoded to the original source packets when the coding matrix reaches full rank. The probability of this matrix reaching full rank after $n$ received combinations can be illustrated with a markov chain, where each of the states are the rank of the coding matrix, and where the transitions occur when a new linear combination of the source packets is received. 
%The markov chain is illustrated in Figure \ref{fig:markov_chain}.

%\begin{figure}[h!]
%\centering
%	\begin{tikzpicture}[->,>=stealth',shorten >=1pt,auto,node distance=2.5cm,
%		            semithick]
%	\tikzstyle{every state}=[fill=white,text=black]

%		\def \fail {$\frac{1}{q^{g}}$}
%		\def \succes {$1-\frac{1}{q^{g}}$}

%		\node[initial,state]	(A)			{$0$};
%		\node[state]		(B) [right of=A]	{$1$};
%		\node[state] 		(C) [right of=B]	{$2$};
%		\node[state] 		(D) [right of=C]	{$r$};
%		\node[state]  		(E) [right of=D]	{$g$};

%		\path	(A)	edge [loop above] node {$\frac{1}{q^{g}}$}	(A)
%				edge              node {$1-\frac{1}{q^{g}}$}	(B)
%			(B)	edge [loop above] node {$\frac{1}{q^{g-1}}$}	(B)
%			    	edge              node {$1-\frac{1}{q^{g-1}}$}	(C)
%			(C)	edge [loop above] node {$\frac{1}{q^{g-2}}$} 	(C)
%				edge 		  node {$1-\frac{1}{q^{g-2}}$}	(D)
%			(D)	edge [loop above] node {$\frac{1}{q^{g-r}}$}	(D)
%			    	edge              node {$1-\frac{1}{q^{g-r}}$} 	(E)
%			(E) 	edge [loop above] node {1} 			(E);
%		
%	\end{tikzpicture}
%\caption{A markov chain showing the probabilities of increasing rank of coding matrix, where $q$ is field size and $g$ is generation size.}
%\label{fig:markov_chain}
%\end{figure}

%The probability of succesfully increase the rank of the coding matrix on a received combination, is given by Equations \eqref{eq:inc_rank} and \eqref{eq:samerank}
%%
%\begin{align}
%P_{r\rightarrow r+1} &= 1-\frac{1}{q^{g-r}} \label{eq:inc_rank} \\
%P_{r\rightarrow r} &= \frac{1}{q^{g-r}} \label{eq:samerank}
%\intertext{Where:}
%&\text{$q$ is the field size.} \notag \\
%&\text{$g$ is the generation size.} \notag\\
%&\text{$r$ is the current rank of the coding matrix.}\notag\\
%&\text{$P_{r\rightarrow r+1}$ is the probability of reaching next rank of coding matrix.}\notag\\
%&\text{$P_{r\rightarrow r}$ is the probability of not increasing rank.}\notag 
%\end{align}

%From the markov chain, a transition matrix can be created to further show the relationship between the rank of the coding matrix and the probability of increasing the rank.
%%
%\begin{align}
%&\boldsymbol{P}=
%\left[
%\begin{array}{ccccc}	
%\frac{1}{q^{g}}		& 0 			& 0 		& \cdots 		& 0 \\
%1-\frac{1}{q^{g}}	& \frac{1}{q^{g-1}} 	& 0  		& \cdots 		& 0 \\
%0 			& 1-\frac{1}{q^{g-1}}	& \ddots 	& \ddots 		& \vdots \\
%\vdots 			& \ddots 		& \ddots 	& \frac{1}{q^{1}} 	& 0 \\
%0 			& \cdots 		& 0	 	& 1-\frac{1}{q^{1}} 	& \frac{1}{q^0}
%\end{array}
%\right]^{\left(\text{$g$+1 $\times$ $g$+1}\right)}
%%
%\label{eq:transition_matrix}
%\intertext{Where:}
%&\text{$q$ is the field size.} \notag\\
%&\text{$g$ is the generation size.} \notag\\
%&\text{$P_{i,j}$ is the probability of reaching rank $i$-1 when the coding matrix has rank $j$-1.} \notag
%\end{align}

%The transition matrix $\boldsymbol{P}$ in \eqref{eq:transition_matrix} can be used to calculate the probability mass function (pmf) for the rank of the coding matrix after receiving $n$ linear combined packets.
%%
%\begin{align}
%\boldsymbol{s}_{0} &=
%\left[
%\begin{array}{c}
%1 \\ 
%0 \\ 
%\vdots \\ 
%0 
%\end{array}
%\right]^{\left(\text{$g$+1}\right)} \label{eq:initial_pmf}
%\intertext{Where:}
%&\text{$g$ is the generation size} \notag\\
%&\text{$\boldsymbol{s}_0$ is the initial pmf} \notag\\
%&\text{${\boldsymbol{s}_{0}}_i$ is the probability of the coding matrix being rank $i$-1.} \notag
%\end{align} 

%The initial pmf for the rank of the coding matrix is given in Equation \eqref{eq:initial_pmf}, which states the probability of rank 0 being 1, before any linear combined packets are received. The pmf after $n$ received coded packets, is calculated by Equation \eqref{eq:pmf_n_transitions}.
%%
%\begin{align}
%\boldsymbol{s}_{n} &= \boldsymbol{P}^n \times \boldsymbol{s}_0 \label{eq:pmf_n_transitions} \\
%\intertext{Where:}
%%&\text{$n$ is the number of transitions} \notag\\
%&\text{$\boldsymbol{P}$ is the transition matrix.} \notag\\
%&\text{$\boldsymbol{s}_0$ is the initial pmf.} \notag\\
%&\text{${\boldsymbol{s}_{n}}$ is the pmf after $n$ received linear combinations.} \notag
%\end{align}


%\subsection{Coding Overhead}
%The probability of linear depedency when $g$ combinations are received results in an unavoidable overhead, depending on both generation size and field size used. This section includes an analytical investigation of this overhead. 

%For each received combination of source packets, there is a probability $p$ that the combination will hold no useful data, thus not increasing the rank of the coding matrix. This probability, $p$, increases with the rank of the coding matrix. This makes the received combinations less likely to contain useful information for higher ranks of the coding matrix.
%The total overhead of a generation can be calculated as the sum of the overhead from each rank until full rank of the coding matrix. Equation \eqref{eq:rank_overhead_general} shows the general expression for each of these overhead.

%\begin{align}
%O &= \sum_{k=1}^{\infty} \left(k \cdot p^{k-1} \cdot (1-p)\right)-1 &{0\le p< 1} \label{eq:rank_overhead_general}
%%\intertext{Due to geometric convergence, this can be reduced to:}
%%\intertext{Due to $\sum p^k$ being a geometric series, the derivative of its convergence can be used:}
%\intertext{Derived geometric convergence is used to reduce the expression:}
%& \sum_{k=1}^{\infty} k \cdot p^{k-1} = \left( \sum_{k=0}^{\infty} p^k \right)^\prime = \left(\frac{1}{1-p}\right)^\prime = \frac{1}{(1-p)^2} &{0\le p< 1} \notag\\
%%& \left(\frac{1}{1-p}\right)^\prime = \left( \sum_{k=0}^{\infty} p^k \right)^\prime  &{0\le p\le 1} \notag\\
%\intertext{Substituting this into \eqref{eq:rank_overhead_general} gives:}
%O &= \frac{1}{(1-p)^2}\cdot (1-p)-1  = \frac{1}{1-p}-1 &{0\le p< 1} \notag\\
%\intertext{Simplifying this further gives a simple expression:}
%O &= \frac{1}{\frac{1}{p}-1} &{0\le p< 1} \label{eq:rank_overhead_simple}
%\intertext{Where:}
%&\text{$O$ is the overhead for the given rank in extra packets.} \notag\\
%&\text{$k$ is the transition number.} \notag\\
%&\text{$p$ is the probability of not increasing rank of coding matrix.} \notag
%\end{align}

%Equation \eqref{eq:rank_overhead_simple} gives a simple expression for the overhead for each rank-transition of the coding matrix. The total coding overhead for a generation is calculated by the sum of the overhead from each rank. Equation \eqref{eq:samerank} provides the probability of not increasing rank when receiving a new linear combination. Using this knowledge in Equation \eqref{eq:rank_overhead_simple} gives an expression for the total overhead in Equation \eqref{eq:total_overhead}
%%
%\begin{align}
%O_{\text{\tiny{total}}} &= \sum_{r=0}^{g-1}\frac{1}{q^{g-r}-1} \label{eq:total_overhead}
%\intertext{Where:}
%&\text{$O_{\text{\tiny{total}}}$ is the total overhead of a generation in packets.}\notag\\
%&\text{$g$ is the generation size.} \notag\\
%&\text{$q$ is the field size.} \notag\\
%&\text{$r$ is rank of the coding matrix.} \notag
%\end{align}















