
\documentclass[a4paper,10pt]{article}
\usepackage{color}
% \usepackage{fancyhdr}
\usepackage[utf8]{inputenc}

\usepackage{enumerate}
\usepackage{latexsym}
\usepackage{amssymb}
\usepackage{ulem}
% \usepackage{ifsym}
\usepackage{graphicx}
%\usepackage[margin=3cm]{geometry}

%\renewcommand{\headheight}{16pt}

%\newcommand{\HRule}{\rule{\linewidth}{0.1mm}}

\begin{document}

%\begin{titlepage}
\begin{center}
	\textsc{\Large Distributed Database Systems (WS 11/12) }\\[0.3cm]
	\textsc{\large Assignment 8}\\[1cm]
        Adam Grycner\\
        Szymon Matejczyk\\
        Guo Xinyi\\
        Yu Chenying\\[1cm]
        \today\\[1cm]
        \rule{\linewidth}{0.1mm}\\
 	%\HRule\\
%	\HRule\\[2cm]
	
\end{center}
\section{Exercise 8.1: Global Serializability with Snapshot Isolation}
Suppose the system of site A use SS2PL, and the system of site B use SI. When there is one transaction(t1) at site A to r(x) and another transaction(t2) at site B also to r(x). Then when t1 starts, the read of x is locked. So t2 can only starts after t1 ends. Thus these two transaction can not be concurrent.\\
The ticket technique will make sense.


\section{Exercise 8.2: Speedup}
\begin{itemize}
 \item What is the expected speedup with a machine of 100 MIPS?\\
$speedup = \frac{150}{50 + (150-50)/10 } = \frac{150}{60} = 2.5$
 \item What is the speedup with 10 machines, each with 10 MIPS? (speedup according to 1 machine with 10 MPIS; each machine reads part of whole data; we don't count communication costs; naive assumption - the less data we have the shorter is I/O time)\\
$speedup = \frac{150}{50/10 + ((150-50)/10) * 1 } = \frac{150}{15} = 10$
 \item What is the speedup with 100 machines, each with 1 MIPS?\\
$speedup = \frac{150}{50/100 + ((150-50)/100) * 10 } = \frac{150}{10.5} = 14.3$
\end{itemize}
\section{Exercise 8.3: Partitioning}
\begin{itemize}
\item Sort \\
 5 partitions, so count to 5 then put the next in the vector.\\
 1,1,1,4,5,\emph{6},10,11,11,11,\emph{15},16,20,22,22,\emph{23},25,29,30,30.\\
 vector[6,15,23]\\
 partition:{1,4,5,6}{10,11,15}{16,20,22,23}{25,29,30}
\item Histogram\\
 1-30 equally divided by 5.\\
 partition:{1,4,5,6}{10,11}{15,16}{20,22,23}{25,29,30}
\item Sort is better. It's more balanced.
\end{itemize}

 \section{Exercise 8.4: Parallel Hash Join}
\begin{enumerate}[(a)]
  \item At the beginning we need to divide the data into cores. 
  We can use a hash function h on the join attribute to do that, so that we devide relations R and S into m partitions.
  Let's denote the partitions as $R_{1}, \cdots, R_{m}$ and $S_{1}, \cdots,S_{m}$, respectively. By the properties of hash function, we expect the partitions to be equal-size.
  Because of the fact that we used partitions by hash function on join attribute, the only possible joins are between partitions with the same number $R_{1}$ with $S_{1}$ and so on.
  We can now send partitions to cores $R_{i}$ and $S_{i}$ to core i and compute joins separately.
  At the end cores send joined parttions to the main core.

  \textbf{Remark:} Computing hash function h for whole relations R and S on one core can be inneficient. To use all cores we can divide the relations to core so that every core gets equal number of rows and compute h function in the cores. Then if core owns a row that belongs to another one(using the partitioning by h function), it sends the row to it. Second phase is the same as in previous attempt.

  \item This case is simplier than the previous one. We only need to divide relation S into partitions respective to the R partitions using the same hash function. We send both R partitions and obtained S partitions to the cores and then proceed as before.
\end{enumerate}
\end{document}
