\documentclass[]{final_report}
\usepackage{graphicx}
\usepackage{hyperref}


%%%%%%%%%%%%%%%%%%%%%%
%%% Input project details
\def\studentname{Dawei Yang}
\def\projecttitle{Public Transport Monitor}
\def\supervisorname{Prof. Gregory O'Hare and Dr. Mauro Dragone}
\def\moderatorname{}%Dr. M. \'O Cinn\'eide


\begin{document}

\maketitle
\tableofcontents\pdfbookmark[0]{Table of Contents}{toc}\newpage

%%%%%%%%%%%%%%%%%%%%%%
%%% Your Abstract here

\begin{abstract}

%Localisation algorithms infer user's location based on sensor devices like GPS. Without manual operations, the algorithms need to be intelligent, context aware, and adaptive to provide locational information.

This dissertation introduces a localisation model that infers and gathers the information of user's position and related usage of public transport using Global Positioning System (GPS). The information can be used in a range of applications like personal trip scheduling, carbon footprint profiling and so forth. Unlike most common GPS localisation applications, our model aims not only at tracking the user's position but also analysing the routes information of public transport related to the position. The model applies Monte Carlo localisation algorithm to estimate the user's location based on GPS measurement. Our experiments show that the model is able to estimate the user's location and also identify the route information of the public transportation.

\end{abstract}
\newpage


%%%%%%%%%%%%%%%%%%%%%%
%%% Acknowledgments

\chapter*{Acknowledgements}
I would like to thank my supervisors, Prof. Gregory O'Hare and Dr. Mauro Dragone, for all their advice, encouragement, and seemingly inexhaustible patience during the course of this project.
Thanks also to Dr Alexander Ufimtsev for providing hardware to support this project.

%%%%%%%%%%%%%%%%%%%%%%
%%% Introduction

\chapter{Introduction}

%Put our project into the context.
The prevalence of Global Positioning System (GPS) has led to an increasing interest in producing applications that provide location information. At the same time, in the sense of ubiquitous computing, a natural form of human-computer interaction is appreciated. This requires the application to be intelligent, context aware and adaptive. In this spirit, the aim of this project is to design and develop a model that infers and gathers useful knowledge about the usage of public transport from GPS devices.

%Common GPS applications.
A GPS device receives location and time information from navigation satellites. Usually, the main objective of most common GPS applications is user positioning. The outputs of these applications are location information containing geographical coordinates and time stamps, which acknowledges the position of the user. However, there are some defects in these applications. The accuracy of location information is affected by the GPS signal noise, which reduces the quality of localisation. Besides, the position information itself is not sufficient to infer the user's moving status, such as where the user is going or which route of transportation the user is using.

% Differences
Different from these applications, this project not only focuses on determining the user's position but also tries to analyse the usage of transport that is related to the position. This knowledge can be used in a wide range of applications, like digital personal assistants for trip scheduling, monitor transportation for carbon footprint profiling and so forth. Figure ~\ref{fig:BusRouteMap} illustrates an example that differentiates the common GPS applications from this project.

\begin{figure}[h]
\centering
\fboxsep 2mm
\framebox{
	\includegraphics[width=10cm]{BusRouteMap} 
}
\caption{\label{fig:BusRouteMap} Two bus routes from position A to B.~\cite{hittheroad}}
\end{figure} 

% Two route example
The map shows a part of a district in Dublin, Ireland. In the map, the shaded and solid purple lines denote two bus routes merging at point A and branching after point B. When the user is travelling during A and B on one of the bus routes, the common GPS applications can only estimate the position of the user at certain time, whereas our model not only determines user's position but also infers which bus the user is on and where the user is heading. This extra information from our model can be fed to other applications. For example, with the knowledge of user's current location and transport route information, a scheduling system can estimate the arrival time of the transport based on historical data of the route, plan routes transitions for the user or send this information for others for scheduling their meeting. Gathering GPS information alone is inadequate to the tasks mentioned above. Because from the location data, we can only understand that the user is moving along the road but cannot differentiate between the two similar routes.


%Problems and challenges
The main challenges to the project are:
\begin{enumerate}
\item Localisation -- determining the position of the user based on GPS data.
\item Routes identification -- inferring the usage of public transport from related positions.
\item GPS ``noise'' -- errors and variances contained in the positioning coordinates received by GPS devices. This affects the accuracy of the calculated location.
\end{enumerate}

%Objectives
To tackle these problems, our design applied mobile robotics localisation algorithms, specifically Monte Carlo localisation algorithm, to smooth the ``noise'' of GPS measurement, to estimate user's position and to infer usage of public transport. We also implemented the algorithm in Matlab for the two bus routes illustrated in Figure ~\ref{fig:BusRouteMap}. The Matlab application analyses time-stamped log of the GPS data and determines the corresponding position at each time stamp. Then, we evaluated the accuracy of our model by comparing the results with the outputs of other localisation algorithms. We also checked the estimated positions with the acknowledged locations on the map, like bus stops.

%Structure of the paper
This paper is organized as follows. In the next section, we discuss the background and related work. Then, we provide an overview of the design of our model, followed by a description of detailed implementation. Before concluding, we present experimental
results that show the capabilities of our approach.




%%%%%%%%%%%%%%%%%%%%%%
%%% Background
\chapter{\label{chapter2} Background}

This chapter will put this project in context. Section ~\ref{ref:localisation} introduces the general idea of mobile robot localisation and explains why our problem can be fit in this category. The following section ~\ref{ref:Bayes} describes Bayes Filter, one of the most general algorithms in localisation. It is the basic idea that our project relies on. Then, section ~\ref{ref:particlefilter} introduces the basic idea of particle filter. It is an implementation of Bayes Filter. Section~\ref{ref:MonteCarlo} explains the Monte Carlo Localisation algorithm, which applies particle filter in the localisation problem. In section~\ref{ref:relatedwork}, related previous work is discussed. The last section ~\ref{ref:othertech} of this chapter explores some of the other technology used to implement our project.


% Robotics localisation
\section{Mobile robot localisation}
\label{ref:localisation}

%Mobile robot localisation is a large area, this section only surveys some of the basic idea as the background of our project.

%\subsection{Localisation in Robotics}
% What is it?
Sebastian Thrun describes in his book ``Probabilistic Robotics'' that ``Mobile robot localisation is the problem of determining the pose of robot relative to a given map of the environment.''~\cite{Book:Robotics} In this definition, the robot refers to the moving object equipped with a sensor which can perceive its environment. The pose of a robot usually comprises its location and orientation. We can determine the pose of the object by comparing the measurement data from the sensor to the corresponding pose in the given map. However, because the measurement data from the sensor contains errors and variance, it is difficult to determine the pose of the object by only a single sensor measurement. One solution to this problem is to estimate the pose of the object recursively by integrating the sensor data over time.

% Why should we care?
The problem of our project can be regarded as an instance of mobile robot localisation. The user on on the public transport is the moving object in our case. The GPS device carried by the user records time-stamped log of the coordinates as measurements. The map of public transport route is the environment map. We infer the user's location based on the recorded GPS data and the given route map.

% How it works?
There are three essential features in the mobile robot localisation, measurement, control and map. The measurement is the sensor data from the moving object. The control changes the state of the object. For example, the velocity of a moving bus is a form of control, as it changes the positions of the bus. The map is the environment in which  a measurement is generated. All the possible poses of the object are limited in the map. By establishing the relationship between the measurement and the given map, we can estimate the position of the object.

Figure ~\ref{fig:MobileRobotLocalisation} illustrates the recursive localisation process using these features. The goal of localisation is to infer the position X in the graph. In this recursive  process, every single estimation is based on the former estimated result. For example, to infer the position at time t ${X_t}$, we use the control ${U_t}$ to estimate the movement of the object from the previous position ${X_{t-1}}$. This generates an inaccurate result of estimation, because the control data itself also contains noises. Then, the localisation model refines the estimation using the measurement data ${Z_t}$ with the map data and determine the best possible position.

\begin{figure}[h]
\centering
\fboxsep 2mm
\framebox{
	\includegraphics[width=12cm]{GraphModelofMobileRobotLocalisation} 
}
\caption{\label{fig:MobileRobotLocalisation} The model of mobile robot localisation. The control U, the measurement Z and the MAP are the data we know. The location X is the what we need to infer.}
\end{figure} 

In mobile robot localisation, the belief denoted by ${Bel(X_t)}$ is used to denote the probability of the estimated positions. It is usually represented by conditional probability distributions. ``A belief distribution assigns a probability (or density value) to each possible hypothesis with regards to the true state.''~\cite{Book:Robotics} Here, the possible hypothesis are the estimated states ${X}$. Equation~\ref{equ:robotlocalisation} denotes the belief ${Bel(X_t)}$ over a state variable ${X_t}$ conditioned on all sensor and control data available at time t. ${Z_{1:t}}$ is a sequence of time-indexed sensor observations ${Z_1, Z_2, Z_3, ..., Z_t}$ and ${U_{1:t}}$ is a sequence of time-indexed control data ${U_1, U_2, U_3, ..., U_t}$. The equation answers the question ``What is the probability that the object is at location X if the history of sensor measurements is ${Z_1, Z_2, Z_3, ..., Z_t}$ and under the control of ${U_1, U_2, U_3, ..., U_t}$?'' for all possible locations X.~\cite{Paper:BayesianLocation}

\begin{equation}\label{equ:robotlocalisation}
Bel(X_t) = p({X_t}/{Z_{1:t}, U_{1:t}})
\end{equation} 

Similarly, equation~\ref{equ:robotlocalisation2} is another useful equation for calculating the belief. Different from equation~\ref{equ:robotlocalisation}, the belief is calculated before incorporating measurement ${Z_t}$, just after executing the control ${U_t}$.

\begin{equation}\label{equ:robotlocalisation2}
\overline{Bel}(X_t) = p({X_t}/{Z_{1:t-1}, U_{1:t}})
\end{equation} 

As can be seen from the equation~\ref{equ:robotlocalisation} and ~\ref{equ:robotlocalisation2}, the complexity of computing grows exponentially over time, because the numbers of sensor and control data keep growing over time. To solve the problem, in the next section, we will introduce the principle algorithm Bayes filter for calculating the belief in robotics.

% Bayes Filter
\section{Bayes Filter}
\label{ref:Bayes}

This section introduces the basic idea of Bayes filter (or Bayesian Filtering). Bayes filter is a general algorithm calculates the beliefs from measurement and control data. It reduces the complexity of computing in equation~\ref{equ:robotlocalisation} and ~\ref{equ:robotlocalisation2}.

Equation~\ref{equ:Bayes} and ~\ref{equ:Bayes2} are the general algorithm for Bayes filtering. The Bayes filter is also recursive. The belief ${Bel(X_t)}$ at time t is calculated from the belief ${Bel(X_{t-1})}$ at time ${t-1}$.

``The Bayes filter algorithm possesses two essential steps.  The first step is called prediction. In equation~\ref{equ:Bayes}, it processes the control ${U_t}$. It does so by calculating a belief over the state ${X_t}$ based on the prior belief over state ${X_{t-1}}$ and the control ${U_t}$. In particular, the belief ${\overline{Bel}(X_t)}$ that the robot assigns to state ${X_t}$ is obtained by the integral of the product of two distributions: the prior assigned to ${X_{t-1}}$, and the probability that control ${U_t}$ induces a transition from ${X_{t-1}}$ to ${X_t}$. The second step is called measurement update. In equation~\ref{equ:Bayes2}, the Bayes filter algorithm multiplies the belief ${\overline{Bel}(X_t)}$ by the probability that the measurement ${Z_t}$ may have been observed. As will become apparent further below when actually deriving the basic filter equations, the resulting product is generally not a probability. It may not integrate to 1. Hence, the result is normalised, by virtue of the normalisation constant ${\eta}$.''~\cite{Book:Robotics}

\begin{equation}\label{equ:Bayes}
\overline{Bel}(X_t) = \int p({X_t} \mid {X_{t-1}, U_t}) Bel(X_{t-1}) \mathrm{d}X_{t-1}
\end{equation}

\begin{equation}\label{equ:Bayes2}
Bel(X_t) = \eta p({Z_t} \mid {X_t}) \overline{Bel}(X_t)
\end{equation} 

The Bayes filter algorithm is derived from equation~\ref{equ:robotlocalisation} and ~\ref{equ:robotlocalisation2} based on the Bayes rules and Markov assumption.

Equation ~\ref{equ:Bayes3} depicts the basic Bayes rules (or Bayes theorem). It provides a convenient way to compute a poster ${p(x \mid y)}$ using the inverse conditional probability ${p(y \mid x)}$ along with the prior probability ${p(x)}$. That is to say, in our problem, when we are interested in inferring the position ${X}$ from the sensor data ${Z}$ (${ Bel(X) = p(X \mid Z)}$), we can use the probability of sensor data Z assuming that X was the case (${p(Z \mid X)}$).~\cite{Book:Robotics}

\begin{equation}\label{equ:Bayes3}
p(x \mid y) = \frac{p(y \mid x) p(x)}{p(y)}
\end{equation}

Another essential theory in robotics is Markov assumption. ``For locating objects, the Markov assumption implies that sensor measurements depend only on an object's current physical location and that an object's location at time t depends only on the previous state ${X_{t-1}}$. States before ${X_{t-1}}$ provide no additional information.''~\cite{Paper:BayesianLocation}

% Particle Filter 
\section{Particle Filter}
\label{ref:particlefilter}

Because the Bayes filter is not a practical algorithm, it can not be used directly. This section introduce an implementation of Bayes filter algorithm, particle filter. Particle filter approximates the posterior by a finite number of sample. It represents a distribution by a set of samples drawn from the distribution. Each of the sample represents a state of the pose. ~\ref{Paper:particlefilter}

Particles filter is an approximate approach to present the posterior distribution. It converts the infinite state space of a posterior distribution into finite state samples. Particles are often denoted as in equation~\ref{equ:particle}. Each particle ${X_t^{[m]} (1 \leq m \leq M)}$ is a concrete instantiation of the state at time t. It represents an estimation of the object's real state. M is the total number of particles at time t.

\begin{equation}\label{equ:particle}
X_t = [X_t^{[1]}, X_t^{[2]}, X_t^{[3]}, ..., X_t^{[M]}]
\end{equation}

The basic particle filter algorithm contains three steps, prediction, measurement update and re-sampling. Similar to Bayes filter algorithm, in the prediction phase, the hypothetical state ${X_t^{[m]}}$ is calculated based on the previous state ${X_{t-1}^{[m]}}$ and the control data ${U_t}$. This result is the temporary belief ${\overline{Bel}(X_t)}$. The difference is that particle filter applies the calculation to each of the particles.

In the measurement update step, particle filter calculates the probability of the measurement ${Z_t}$ under the particle ${X_t^{[m]}}$. This probability is called the weight ${W_t^{[m]}}$ or importance factor of the particle ${X_t^{[m]}}$. It is used to incorporate the measurement ${Z_t}$ into the particle set.

The third step is re-sampling or importance sampling. The algorithm draws with replacement ${M}$ from the temporary particle set generated in prediction phase. The probability of drawing each particle is given by its importance weight from the measurement update phase. After this step, the particles are distributed according to the posterior belief in equation~\ref{equ:Bayes2}.%${Bel(X_t) = \eta p({Z_t} \mid {X_t}) \overline{Bel}(X_t)}$.



% Monte Carlo localisation
\section{Monte Carlo localisation}
\label{ref:MonteCarlo}
Monte Carlo Localisation (MCL) is an algorithm that applies particle filter technology to mobile robot localisation problem.

 Figure ~\ref{fig:MonteCarlo1} to ~\ref{fig:MonteCarlo5} use a one-dimension robot localisation example to explain the Monte Carlo localisation. As illustrated in figure ~\ref{fig:MonteCarlo1}, initially, because we have no knowledge about the robot's position, the particles (our beliefs about the real location) spread random and uniformly over the whole space.
\begin{figure}[h]
\centering
\fboxsep 2mm
\framebox{
	\includegraphics[width=10cm]{one-dimension-robot1} 
}
\caption{\label{fig:MonteCarlo1} The initial phase, particles are randomly distributed in the map space with equal weights.}
\end{figure} 

Figure ~\ref{fig:MonteCarlo2} depicts the situation after the robot taking a sensor data. The algorithm calculates weight for each of the particle. As can be seen from the figure, the weights of the particles near the doors become higher ,whereas the others are lower.
\begin{figure}[h]
\centering
\fboxsep 2mm
\framebox{
	\includegraphics[width=10cm]{one-dimension-robot2}
}
\caption{\label{fig:MonteCarlo2} The update phase, robot takes a sensor data.}
\end{figure} 

Figure ~\ref{fig:MonteCarlo3} shows the particles status after re-sampling. Re-sampling assigns the more particles around the positions with higher weights and uniformly distributes the weights of the new set of particles. At the same time, the algorithm starts a new round of estimation of the robot's pose under control data ${U_t}$. As shown in the figure, the particles move a step forward.
\begin{figure}[h]
\centering
\fboxsep 2mm
\framebox{
	\includegraphics[width=10cm]{one-dimension-robot3} 
}
\caption{\label{fig:MonteCarlo3} The re-sampling and prediction phases.}
\end{figure} 

After taking another measurement from the robot, the algorithm calculates the weighs for the particles again. As shown in figure~\ref{fig:MonteCarlo4}, at this point, a great number of particles near the second door get more weights.
\begin{figure}[h]
\centering
\fboxsep 2mm
\framebox{
	\includegraphics[width=10cm]{one-dimension-robot4} 
}
\caption{\label{fig:MonteCarlo4} The update phase, robot takes another sensor data.}
\end{figure}

In figure~\ref{fig:MonteCarlo5}, the algorithm takes another re-sampling and prediction. More particles are centred around the real position of the robot.
\begin{figure}[h]
\centering
\fboxsep 2mm
\framebox{
	\includegraphics[width=10cm]{one-dimension-robot5} 
}
\caption{\label{fig:MonteCarlo5} The re-sampling and prediction phases.}
\end{figure} 

% Related work
 Related work
\section{Related work}
\label{ref:relatedwork}
This section surveys several previous work using particle filters for localisation. These work has provided much inspiration and guidance for this project.

The idea of using particle filters in robotics localisation is not a new one. An early paper in this area was produced by Sebastian Thrun in 2002~\cite{Paper:particlefilter}. His article surveys some of the innovations on the use of particle filters in robotics and described some of the successes of particle filters in the field of robotics. This paper introduce us the basic idea to apply particle filter in localisation problem. Another paper by Sebastian Thrun, Dieter Foxy, Wolfram Burgardz, and Frank Dellaert~\cite{Paper:RobustMonte} introduces a robust Monte Carlo localization (MCL) algorithm for mobile robots. This paper illustrates us more details of the MCL approach. 

The paper ``Learning and Inferring Transportation Routines'' written by Lin Liao, Dieter Fox and Henry Kautz~\cite{Paper:learninfer} introduces a hierarchical Markov model that can learn and infer a user's daily movements and use of different modes of transportation through the community. This project is a very similar to ours in terms of research field and approach. Their objective is to learn and infer the user's transportation routines from GPS data logs without requiring any manual labelling. They also applied particle filter technique for sensor integration and location prediction. In contrast to our project, their work mainly focuses on inferring the user's locations of usual goals, such as home or workplace and the mode of transportation, such as foot, car, or bus, and predicting the change of modes. The goal of our project is to infer the usage of public transportation, like specifying which bus the user takes. 

Paul Michael Newman's lecture notes~\cite{C4CourseResources} introduces the navigating problem of mobile robots and a range of solutions to the problem. He also provides Matlab codes in the notes to illustrate these localisation algorithms. His example of Matlab code for particle filter inspired us to develop a Matlab based program for our project.

% Others
 \section{Other technology}
This section introduces other technologies that we applied to implement the application of our project.

\label{ref:othertech}
\subsection{Matlab}
MATLAB is a high-level technical computing language and interactive environment for algorithm development, data visualization, data analysis, and numerical computation.~\cite{matlab}. We used Matlab to develop our localisation algorithm based on the following features.

\begin{itemize}
\item High-level language for technical computing -- The MATLAB language supports the vector and matrix operations that are fundamental to engineering and scientific problems. It enables fast development and execution.
\item Mathematical functions  -- Matlab supports a wide range of mathematical functions. This can greatly simplify the development of algorithms for our project.
\item Graphics functions for visualizing data -- Matlab provides the ability to export results to graphics formats. This feature helps us to better understand the program.
\end{itemize}

\subsection{Android platform}
Android is a software stack for mobile devices that includes an operating system, middleware and key applications.~\cite{android} It provides an open development platform for developers to build application written in Java. With the location service provided by Android, we developed the GPS application on an Android mobile phone to record GPS data log. 

\subsection{Apache HttpClient}
Apache HttpClient is a client side HTTP transport library. The purpose of HttpClient is to transmit and receive HTTP messages.~\cite{httpclient} We applied Apache HttpClient in the Java application to send and receive HTTP messages for inquiring bus route information. The bus route information is processed by the Java application and stored in text format for further use.



%%%%%%%%%%%%%%%%%%%%%
%% Design
\chapter{Design}


%%%%% Data Model
\section{Data Model}
This section describes the data models in the system. These models simulate the different aspects of the problem, presenting the subjects (or components) in the real world.

\subsection{Route map}
Route map is the basis for estimating the location of the user. It consists of the routes information of public transportation (bus, train, Luas) in the city of Dublin. Samples (particles) are drawn from these routes information and present possible locations of the current transportation that is monitored.

The route map is represented by a two-dimensional array. The first dimension of the array represents the distinct route of transportation and the second one denotes the location of stops in the route. For each element in the array, we can infer the route and stop that it stands for by the array indices.

Each element of the array represents a location on the routes map. It is represented by a vector containing four values, index number, distance, latitude and longitude. Distance is the length of the segment from the current stop to the starting stop in the route.  Coordinates, latitude and longitude, indicates the geographical location in the map.


\begin{table}[h!]
\caption{\label{tab:routemap}The data structure for one route in the map.}
\begin{center}
    \begin{tabular}{ | l | l | l | l |}
    \hline
    Index & Distance & Longtitude & Latitude \\ \hline \hline
    1 & 0          & -6.2179 & 53.3055 \\ \hline
    2 & 0.0040 & -6.2188 & 53.3094 \\ \hline
    3 & 0.0076 & -6.2213 & 53.3119 \\ \hline
    4 & 0.0127 & -6.2257 & 53.3146 \\ \hline
    5 & 0.0151 & -6.2278 & 53.3159 \\ \hline
    6 & 0.0195 & -6.2313 & 53.3185 \\ \hline
    7 & 0.0219 & -6.2333 & 53.3199 \\ \hline
    ... & ...... & ...... & ...... \\ 
    \hline
    \end{tabular}
\end{center}
\end{table}

Table ~\ref{tab:routemap} illustrates an example of data structure for one single route in the route map. The index is a unique number for each stop of the route. The distance is the sum of distances between two adjacent stops from stop 1 to the current stop. The distance between two adjacent stops is calculated from the coordinates (Equation ~\ref{equ:distance}). x and y stand for the coordinates of the stops.

\begin{equation}\label{equ:distance}
D = \sqrt{(x_1 - x_2)^{2} + (y_1 - y_2)^{2}}
\end{equation}

The elements of the route map are used as the initial samples before the GPS data imported into the system. At the initialisation phase, we do not have the knowledge of the location of the user, using route map as samples can cover the whole area we are monitoring.

Another use of route map is as the standard reference for converting the distance of a position in the routes lines to the coordinates. With the value of distance in a certain route, the coordinates of this point can be calculated from the following equations. In the equation (~\ref{equ:disRef}), the point needs to be calculated in between two adjacent stops `a' and `b'. `x' and `y' are the coordinates to be calculated. `dist' stands for the distance value of the point that is already known. Suffix `x' and `y' are the coordinates of stops.

\begin{equation}\label{equ:disRef}
\frac{x - a_x}{b_x - a_x} = \frac{dist - a_d}{b_d - a_d}
\quad and \quad
\frac{y - a_y}{b_x - a_y} = \frac{dist - a_d}{b_d - a_d}
\end{equation} 

\subsection{Particles}

In our model, particles are points in the route map representing the possible locations of the user. Each of the particles is drawn from the route map, so we have the knowledge of the route that the particle belongs to. We can only use distance to pinpoint the location of the particle in the route. This simplifies our algorithm by dealing with one dimension data.

Each of the particles corresponds to an importance weight, which is used to measure how similar the particle is to the real position denoted by probabilities in percentage. The sum of all the importance weights is one.


\subsection{Velocity}
\label{ref:velocity}
We use velocity to predict the next pose of the user in the motion model. Based on the velocity and time interval, distance can be calculated, so we can estimate the next particles from the previous ones.

Because the velocity of the bus cannot be monitored in our model, the velocity is estimated based on common sense on the speed range of the bus. For example, we assume the speed of a bus varies from 0 to 100 (km/h) and the overall average speed of the bus throughout the whole route is 50. In this situation, we define a vector of velocities containing the value from 0 to 100 with the interval of 10. The velocities are normal (or Gaussian) distributed as shown in Figure ~\ref{fig:velocitydistr}.

\begin{figure}[h]
\centering
\fboxsep 2mm
\framebox{
	\includegraphics[height=8cm]{velocitydistr} 
}
\caption{\label{fig:velocitydistr} Velocity for motion model.}
\end{figure} 


%%%%% Data building applications
\section{Data building applications}

\subsection{GPS recording application}
A mobile application was developed on the Android system for recording the GPS data. It writes coordinates (latitude and longitude) generated by the GPS device on the mobile phone to a file with fixed time intervals. Along with the movement of the transportation, the application generates a series of time-stamped coordinates. These data are used as observations in the MCL model to correct the beliefs of our estimations about the location of the user.

Another function of the application is to log the immediate location when a button on the screen is pressed. In this occasion the application records the current position to a separated log file which is used for storing these “on purpose” locations. These data are useful for verifying the accuracy of the model. Detailed explanation is in the Verification section.

\subsection{Routes map data building}
The route map is built on the coordinates of bus stops. The data of coordinates are gathered from web service through a Java program. By using Apache Http Client library, the program sends request to the website (www.hittheroad.ie) for the information of bus routes and parses the coordinates from the response. The program writes the latitude and longitude in pairs to a file, which is utilized by the route map.

%%%%% MCL Model
\section{MCL Model}
Our model estimates the location of the user based on Monte Carlo localisation algorithm. The algorithm estimates the location of the user and updates the beliefs of the location recursively. The algorithm consists of three main phases, initialisation phase, prediction phase and update phase.

\subsection{Initialisation phase}
This step initialises the particles.
Initially, the model spreads the particles all over the routes map to estimate the possible locations of the user. As the start location of the user is unknown, the particles distribute equally in the map with equal weights.

We divided the distance of each bus route equally by breaking points and used the distance between each of the points and the start point as the initial particles. These particles can cover the entire region that we were monitoring. The location of them can be calculated based on the route map with equation (~\ref{equ:disRef}).

\subsection{Prediction phase}

The prediction phase estimates the pose of the user by the particles and weights of the previous position and the velocities of the bus. The previous distance plus the product of velocity by time is the estimation of next position.

As mentioned in the section of Velocity (~\ref{ref:velocity}), the velocities are multiple values normal distributed from 0 to 100, which will lead to each of the previous particles generating multiple new particles at this stage. 
Table ~\ref{tab:predictParticles} illustrates the new generated particles based on velocities.

\begin{table}[h!]
\caption{\label{tab:predictParticles}Predicting particles based on velocities.}
\begin{center}
    \begin{tabular}{ |c |c |c | c | c | c | c | c | c |}
    \hline
    Index & Prev Particle & \multicolumn{7}{|c|}{New Particle based on velocity} \\ \hline
     &  & 0 & 10 & 20 & 30 & ...... & 90 & 100\\
    \hline \hline
   1 & 0.0285 & 0.0285 & 0.02865 & 0.0288 & 0.02955 & ...... & 0.02985 & 0.0300 \\ \hline
   2 & 0.0300 & 0.0300 & 0.03015 & 0.0303 & 0.03105 & ...... & 0.03135 & 0.0315 \\ \hline
   3 & 0.0315 & 0.0315 & 0.03165 & 0.0318 & 0.03255 & ...... & 0.03285 & 0.0330 \\ \hline
   4 & 0.0330 & 0.0330 & 0.03315 & 0.0333 & 0.03405 & ...... & 0.03435 & 0.0345 \\ \hline
   5 & 0.0345 & 0.0345 & 0.03465 & 0.0348 & 0.03555 & ...... & 0.03585 & 0.0360 \\ \hline
   6 & 0.0360 & 0.0360 & 0.03615 & 0.0363 & 0.03705 & ...... & 0.03735 & 0.0375 \\ \hline
   7 & 0.0375 & 0.0375 & 0.03765 & 0.0378 & 0.03855 & ...... & 0.03885 & 0.0390 \\ \hline
    ... & ...... & ...... & ...... & ...... & ......& ...... & ...... & ...... \\ 
    \hline
    \end{tabular}
\end{center}
\end{table}

The weight of the new particle is the product of the weight of the previous particle and the possibility of the velocity. Take the first particle in Table ~\ref{tab:predictParticles} as an example. The weight of new particle under velocity 0 is the product of the original weight for particle 1 and the possibility of the bus at the speed of 0. And the original weight is equal to the sum of new weights under different velocity rates.

One problem for our model in this phase is the expanding of the particle number. Every time, the number of the new particles is multiplied by the number of velocities and this number will keep growing as the model keeps running. This problem will be discussed in the resampling section ~\ref{ref:resampling}.

\subsection{Update phase}

In the update phase, the model recalculates the weight for each of the new produced particles based on the GPS measurement.

The weight for each particle is calculated according to the distance between the GPS data and the particle with equation ~\ref{equ:distance}. The small the distance is the larger probability the weight gets. All the distances are normal distributed with mean 0 and variance 10 and the value of normal pdf (probability density function) of them are the updated weights. The sum of the weights are normalised to 1 through dividing each weight with the sum of all the weights. 


\subsection{Resampling phase}
\label{ref:resampling}
The number of particles is multiplied in the prediction phase. To tackle the problem, we resample the particles by limiting the number of particles for each route. For instance, in the two routes example, we defined the number for each route is 100, so that there were 200 particles in total. 

The algorithm of resampling is as following. For each route, we calculate the overall length of the particles using the maximum distance minus the minimum distance. This returns a segment on the route containing all the particles. Then, we divide the segment equally into the limited number of smaller sections, which are 100 sections in the previous example of two bus routes. Finally, we choose the particle with the biggest weight in each section to represent all the particles in the same part and sum their weights together as the weight for the section.

%%%%%%%%%%%%%%%%%%%%%%
%%% Implementation
\chapter{Implementation}
This chapter describes the details of the implementation of the design. Section~\ref{ref:MCL} explains the Matlab script for MCL algorithm.  In section~\ref{ref:android}, we introduces how measurement data are gathered. Section~\ref{ref:httpclient} explains the Java program that gathers bus routes information.

%%%%% Matlab script

\section{Implementation of MCL in Matlab Script}
\label{ref:MCL}

% Why we use Matlab.
 We used Matlab to simulate the MCL algorithm for several reasons. The basic data element is the matrix. It is easy for us to manipulate our data model in matrix. Matlab has powerful graphical output for displaying the results. This helps us to understand the problem easier. It also has a great number of mathematical functions.

% How did we implement the program.
\subsection{MCL function}
The main function of the program is function MCL. It mainly consists of three parts, parameters configuration, data initialisation and algorithm simulation. Parameters configuration sets up attributes for the program, such as, the total number of particles, the scale factor for the map data, the recursive times of the algorithm and so force.
\begin{verbatim}
% Monte Carlo Localisation algorithm
function MCL
  ... ...
  % Setting up configuration
  global particleSize;
  particleSize = 100;
  ... ...
\end{verbatim}

In the data initialisation step, the program import the coordinates of the bus routes from a file and builds up the route map. Then, the program draws particles equally on each of the bus routes so that initially, the particles cover the entire map space.
\begin{verbatim}
  ... ...
  % get the coordinates of two bus routes
  orig_route_map_39a = importdata('route_39a_oneway.txt', ' ', 0);
  orig_route_map_145 = importdata('route_145_oneway.txt', ' ', 0);
  orig_route_maps{1} = orig_route_map_39a;
  orig_route_maps{2} = orig_route_map_145;
  % convert the coordinates of raw map to one-dimension distance
  route_map_particle = convertMaps(orig_route_maps);
  % generate particles over the entire map space
  particles = generateParticles(route_map_particle);
  ... ...
\end{verbatim}


After these preparations, the program starts to infers the user's location. This part contains three sub functions, ``estParticles'' for predicting user's location, ``calcWeights'' for calculating weights for each of the particles and ``resample'' for particle re-sampling.
\begin{verbatim}
  ... ...
  for k = 1:nSteps
    % prediction phase
    [particles, particle_weights] = estParticles(particles, particle_weights, velocity);
    % update phase
    [particle_weights,distances] = calcWeights(particles, gps, route_map_particle);
    % resampling phase
    [particles, particle_weights] = resample(particles, particle_weights);
  end
  ... ...
end
\end{verbatim}

% Explain the simulation result.
\subsection{Output of the Matlab script}
We used the plot function in Matlab to produce pictures of the output of the program. These pictures can help us to understand the results of our model.

Figure~\ref{fig:Matlab-twobuses} illustrates the simulating results of the program. The green and blue lines represent two bus routes (No.39a and No.145) in the district of Dublin, Ireland. These two routes are plotted based on the coordinates of bus stops (the green and blue dots on the lines). The red star marks are the GPS log data from the sensor on the moving bus heading north-west. It is the measurement data ${Z_t}$ in our localisation model. The small picture below the route lines amplifies the area around the GPS data. The grey points on the routes are the particles for estimating user's position.

\begin{figure}[h]
\centering
\fboxsep 2mm
\framebox{
	\includegraphics[width=12cm]{Matlab-twobuses} 
}
\caption{\label{fig:Matlab-twobuses} Simulation MCL in Matlab.}
\end{figure} 


%\subsection{Implementation details}
%This section explains some details of implementing the data model and algorithms in Matlab.
%\textbf{Data model}


%%%%% GPS logger
\section{GPS Application on Android}
\label{ref:android}
The time-stamped GPS log is the measurement data in our model. These data is recorded by an application on the Android mobile phone with GPS functionality. When the user travelling on the bus with the phone, the application receives the coordinate signals from the built-in GPS of the phone and writes the data to a log file every second. We also developed a function for recording the coordinates manually at certain location, like bus stops. This information can be used to evaluate the accuracy of our program. The evaluation part will be discussed in chapter~\ref{ref:Evaluation}

Android platform provides location related APIs(Application programming interface) for us to interact with its built-in GPS service. We used ``LocationManager'' to access the system location services and ``LocationListener'' to receive notifications from the ``LocationManager'' when the location has changed. The Java program segment below describes how the application gets the location information.

\begin{verbatim}
public class GPSRecorder2Activity extends Activity {
    ... ...
    // Get system location service.
    lm = (LocationManager) getSystemService(Context.LOCATION_SERVICE);
    // Instantiate Mylocationlistener. It extends LocationListener .
    ll = new Mylocationlistener();
    lm.requestLocationUpdates(LocationManager.GPS_PROVIDER, 0, 0, ll);
    ... ...
    // Get the location from LocationManager
    Location location = lm.getLastKnownLocation(LocationManager.GPS_PROVIDER);
    ... ...
\end{verbatim}

The output of the application is a time-stamped GPS log file.
\begin{verbatim}
... ...
2011-07-14 01:26:58 53.30553191 -6.21804154
2011-07-14 01:26:59 53.30550706 -6.21804296
2011-07-14 01:27:00 53.30543627 -6.21800536
2011-07-14 01:27:01 53.30554786 -6.21802309
... ...
\end{verbatim}

%%%%% Java Program
\section{Java Application for gathering Map data}
\label{ref:httpclient}
The route map in our application is built on the coordinates of bus stops. The bus stops information was gathered by a Java application from the website service that provides bus stop information. ~\cite{hittheroad}

The output of the Java program is a text file.
\begin{verbatim}
... ...
-6.217931 53.305494
-6.218776 53.309377
-6.221317 53.311894
-6.225725 53.314633
... ...
\end{verbatim}

The application sends HTTP requests to the web service for a specific bus route information and receives the responses from the server. Then, the application analyse the response strings and abstracts the information of bus stops.  ~\cite{httpclient} These information (the coordinates) are written into a file sequentially. The application uses Apache HTTPClient to interact with web server, sending request and receiving response.


%%%%%%%%%%%%%%%%%%%%%%
%%% Evaluation
\chapter{Evaluation}
\label{ref:Evaluation}

In this chapter, we evaluate our localisation model based on the experiment results. Section~\ref{ref:Inferring} describes the result of our implementation in inferring the bus usage. Section~\ref{ref:Accuracy} discussed the accuracy of our program. Section~\ref{ref:GPSnoise} explains the ability of the modle to resist GPS noise.

\section{Inferring bus route}
\label{ref:Inferring}

The Matlab simulation results show that our program is able to identify the bus usage between two routes. Figure~\ref{fig:InferTwoRoutes} illustrates the simulation process of the our program. This process is also recorded as a video.


\begin{figure}[h]
\centering
\fboxsep 2mm
\framebox{
	\includegraphics[width=12cm]{InferTwoRoutes} 
}
\caption{\label{fig:InferTwoRoutes} Infer the bus route.}
\end{figure}

As can be seen from the figure (A), the user takes the green bus route moving from the south-east of the coordinate axis to the north-west. The red line is the moving trace of the user. It is made up of time sequential GPS data denoted by red star marks. The most front red mark is the current location detected by GPS and the end mark denotes the starting position of the user. The grey line around the red points are the particles. They estimate the possible locations of the user based on the current GPS data (the most front red mark). At this point, the program believes the user can be on either of the bus route because this part of the two routes overlap together. Figure (B) shows that the particles starts to branch when the bus approaching the separating point of the two routes. In the figure (C), the bus moves to the green route and with the bus moving on, the particles on the blue route begins to reduce gradually until none of the particles left on the blue bus route, figure (D) and (E). Finally, the program confirms the user is on the green bus route. Figure (F) illustrates that when the bus enter the overlap section of the two routes again, the program will be not confused about the route usage.

Length of the particles.

\section{Accuracy verification}
\label{ref:Accuracy}

The bus route is the fundamental reference for accuracy verification. To test the accuracy of our localisation model, we compare our inferred locations with the landmarks on the map, like bus stops. These bus stop information is gathered from web service for public transport.~\cite{hittheroad}

When the bus arrives at a stop, we recorded the GPS data through the mobile application manually. By incorporating this GPS data, our model generates particles near the bus stop. Because we have already known the bus stop location through the bus route, we compare the bus stop location with the best particle (with the highest weight) to test the accuracy of the model.

Table~\ref{tab:accuracy} compares the estimated position from our model and the bus stop position from the map. This experiment data is based on 10 particles for each route at the beginning. As can be seen from the table, most of the variances (the column D2) between our estimations and the real locations are ranging around from 0 to 10 meters.

\begin{table}[h!]
\caption{\label{tab:accuracy}Accuracy of the estimated positions. D1 is the distance (in metres) between the GPS data and bus stop position on the route map. D2 is the distance between estimated position and bus stop with 10 particles. D3 is the distance between estimated position and bus stop with 100 particles.}
\begin{center}
    \begin{tabular}{ | c |c c | c  c | c | c | c|}
    \hline
    Index &\multicolumn{2}{|c|}{Bus stop} & \multicolumn{2}{|c|}{Estimated position} & D 1 & D 2 & D 3 \\ \hline
%Index &\multicolumn{2}{|c|}{Bus stop} & \multicolumn{2}{|c|}{GPS data} & \multicolumn{2}{|c|}{Estimated position} & D 1 & D 2 & D 3 \\ \hline
    \hline
%   1 & 53.309377 & -6.218776 & 53.309459 & -6.218860 & 53.309461 & -6.218860 & 10.685  & 10.881 & 11.526 \\ \hline
%   2 & 53.323481 & -6.239064 & 53.323517 & -6.239102 & 53.323516 & -6.239111 & 4.703 & 4.989 & 5.651 \\ \hline
%   3 & 53.332637 & -6.243723 & 53.332654 & -6.243725 & 53.332658 & -6.243742 & 1.841 & 2.654 & 2.681 \\ \hline
%   4 & 53.335312 & -6.246965 & 53.335383 & -6.246939 & 53.335316 & -6.246971 & 8.074 & 0.597 & 0.571 \\ \hline
%   5 & 53.337104 & -6.251835 & 53.337162 & -6.251880 & 53.337098 & -6.251850 & 7.121 & 1.199 & 2.292 \\ \hline
%   6 & 53.34216 & -6.257714 & 53.3409864 & -6.258427 & 53.341241 & -6.257501 & 138.82& 103.162 & 97.664 \\ \hline
%   7 & 53.343454 & -6.259713 & 53.343674 & -6.260290 & 53.343867 & -6.260114 & 45.438 & 53.081 & 53.067 \\ \hline
%   8 & 53.346124 & -6.262308 & 53.346209 & -6.262114 & 53.346054 & -6.262240 & 15.975 & 8.998 & 4.395 \\ \hline
%   9 & 53.345125 & -6.269528 & 53.345137 & -6.269607 & 53.345125 & -6.269592 & 5.4099 & 4.248 & 4.218 \\ \hline
%   10 & 53.345119 & -6.273739 & 53.34507 & -6.273926 & 53.345144 & -6.273940 & 13.603 & 13.629 & 6.897 \\ \hline 
   1 & 53.309377 & -6.218776 &  53.309461 & -6.218860 & 10.685  & 10.881 & 11.526 \\ \hline
   2 & 53.323481 & -6.239064 &  53.323516 & -6.239111 & 4.703 & 4.989 & 5.651 \\ \hline
   3 & 53.332637 & -6.243723 & 53.332658 & -6.243742 & 1.841 & 2.654 & 2.681 \\ \hline
   4 & 53.335312 & -6.246965 & 53.335316 & -6.246971 & 8.074 & 0.597 & 0.571 \\ \hline
   5 & 53.337104 & -6.251835 & 53.337098 & -6.251850 & 7.121 & 1.199 & 2.292 \\ \hline
   6 & 53.34216 & -6.257714  &  53.341241 & -6.257501 & 138.82& 103.162 & 97.664 \\ \hline
   7 & 53.343454 & -6.259713 & 53.343867 & -6.260114 & 45.438 & 53.081 & 53.067 \\ \hline
   8 & 53.346124 & -6.262308 & 53.346054 & -6.262240 & 15.975 & 8.998 & 4.395 \\ \hline
   9 & 53.345125 & -6.269528 & 53.345125 & -6.269592 & 5.4099 & 4.248 & 4.218 \\ \hline
   10 & 53.345119 & -6.273739 & 53.345144 & -6.273940 & 13.603 & 13.629 & 6.897 \\ \hline 
    \end{tabular}
\end{center}
\end{table}

The error estimations appear at the stops 6 and 7, where the distances rise to about 100 and 50 metres. This is due to the big variance of GPS data during this part of route, as shown in figure~\ref{fig:BigGap}. In the middle of the figure, there is a gap in the GPS data (red star marks), which affects the accuracy of the estimation results.

\begin{figure}[h]
\centering
\fboxsep 2mm
\framebox{
	\includegraphics[width=12cm]{BigGap} 
}
\caption{\label{fig:BigGap} Infer the bus route.}
\end{figure}


\section{GPS noise}
\label{ref:GPSnoise}
One objective of our project is to resist the GPS noise from the sensor. This section illustrates how our program can track and predict locations in the presence of loss of GPS signal.

The GPS data log is sequentially recorded every second during the transportation. We randomly took some parts of the GPS data off to simulate the massive loss of GPS data. Then, the Matlab program tracked and inferred the locations based on this modified GPS log.

\begin{figure}[h]
\centering
\fboxsep 2mm
\framebox{
	\includegraphics[width=10cm]{GPSloss1} 
}
\caption{\label{fig:GPSloss1} Simulation of GPS loss.}
\end{figure} 
 Figure~\ref{fig:GPSloss1} illustrates the normal situation in the experiment based on two bus routes. 

In figure~\ref{fig:GPSloss2}, the GPS data was lost for a while. The program estimated the user's location based on historical particles. As can be seen from the figure, the particles began to spread widely along the two bus routes without sensor measurement. The pink points in the picture denotes the particles with the highest weights. This means the program estimate the position of the user is more likely to be on the green route.
\begin{figure}[h]
\centering
\fboxsep 2mm
\framebox{
	\includegraphics[width=10cm]{GPSloss2} 
}
\caption{\label{fig:GPSloss2} Simulation of GPS loss.}
\end{figure} 

Figure~\ref{fig:GPSloss3} illustrates that the program re-built the particles based on the recovered GPS data and historical particles. With new GPS measurement, the program eliminated the confusion on the green route.
\begin{figure}[h]
\centering
\fboxsep 2mm
\framebox{
	\includegraphics[width=10cm]{GPSloss3} 
}
\caption{\label{fig:GPSloss3} Simulation of GPS loss.}
\end{figure} 

\section{Practical concerns}
For evaluation reason, the measurement data are recorded every second from the mobile phone carried by the user. But recording GPS data every second is not practical because the application consumes a large amount of energy, which will constrain the battery of the mobile phone. The data needs to be recorded in less frequency like every ten seconds or every minute.

To test the usability of our application, we simulated this situation based on the original GPS data log. We took the GPS data from every ten records in the log to simulate the ten-second recording frequency and took the GPS data from every 60 records to simulate the one-minute recording frequency. Figure~\ref{fig:1060seconds} illustrates the result of Matlab program with different time intervals.

\begin{figure}[h]
\centering
\fboxsep 2mm
\framebox{
	\includegraphics[width=12cm]{1060seconds} 
}
\caption{\label{fig:1060seconds} Estimation of position with different time intervals. Left figure is the experiment result with 10 second interval. Right figure is with 60 second interval.}
\end{figure}
%%%%%%%%%%%%%%%%%%%%%%
%%% Conclusion
\chapter{Conclusion and future work}

\section{Conclusion}
We have described the foundations and experimental validation of our localisation model that can infer the user's usage of public transport. Our results show that the approach can provide solutions to the problems in our project.

\begin{enumerate}
\item Localisation -- Our work is able to determine the position of the user with good accuracy by incorporating fine sensor data and with acceptable accuracy by incorporating noisy sensor data.
\item Routes identification -- We implemented an application based on our localisation model. The application can identify the transportation usage between the two routes without user's manual operations.
\item GPS ``noise'' -- The recorded GPS data log from mobile application contains noises and error. We also simulate massive GPS losses by manually deleting part of GPS data log. These results show that our model can track and infer user's location with GPS noise.
\end{enumerate}

\section{Future work}
The future work of our project may related to the following aspects:
\begin{itemize}
\item More complicated environment -- Our application only applied our localisation model to the simple case for moving towards one direction with only two bus routes. Future work will build the application in a more complicated environment, such as multiple routes of transportation, multiple moving directions and transit of bus routes.
\item Accuracy -- The accuracy of our project can be improved by several factors. For example, velocity is an essential factor in estimating location. Our velocity model is a  normal distribution based on common knowledge of the transportation. The accuracy of velocity can be improved by analysing historical velocity data through data mining techniques. Besides, the velocity of the transportation on different districts in different time can also be specified differently.
\item Incorporation of other system -- The future work of the project can incorporate the information from other systems, such as public transport time table, to develop applications for personal scheduling.
\end{itemize}


%%%% ADD YOUR BIBLIOGRAPHY HERE
\newpage
% example
\begin{thebibliography}{99}
\bibitem{Book:Robotics} Sebastian Thrun, Wolfram Burgard, and Dieter Fox. \emph{Probabilistic Robotics (Intelligent Robotics and Autonomous Agents series).} 647 pages. ISBN: 9780262201629 0262201623. Intelligent robotics and autonomous agents. The MIT Press, August 2005.
\bibitem{Paper:BayesianLocation} Fox, V.; Hightower, J.; Lin Liao; Schulz, D.; Borriello, G.; , \emph{Bayesian filtering for location estimation}. Pervasive Computing, IEEE , vol.2, no.3, pp. 24- 33, July-Sept. 2003
\bibitem{Paper:RobustMonte} Sebastian Thrun, Dieter Fox, Wolfram Burgard, Frank Dellaert. \emph{Robust Monte Carlo localization for mobile robots}. Artificial Intelligence, Volume 128, Issues 1-2, May 2001, Pages 99-141, ISSN 0004-3702, 10.1016/S0004-3702(01)00069-8.
\bibitem{Paper:particlefilter} Thrun, S. \emph{Particle Filters in Robotics}. Proceedings of the 17th Annual Conference on Uncertainty in AI (UAI). 2002
\bibitem{Paper:learninfer} Lin Liao, Dieter Fox, Henry Kautz. \emph{Learning and Inferring Transportation Routines}. Artificial Intelligence. 2007


\bibitem{C4CourseResources} \emph{http://www.robots.ox.ac.uk/~pnewman/Teaching/C4CourseResources/C4BResources.html} 8/2011 accessed.
\bibitem{hittheroad} \emph{www.hittheroad.ie} 8/2011 accessed.
\bibitem{android:2011} \emph{http://developer.android.com/reference/android/location/package-summary.html} 8/9/2011 accessed.
\bibitem{httpclient} \emph{http://hc.apache.org/httpcomponents-client-ga/} 7/2011 accessed.
\bibitem{haversine} \emph{http://www.mathworks.com/matlabcentral/fileexchange/27785-distance-calculation-using-haversine-formula} 9/2011 accessed.
\bibitem{matlab} \emph{http://www.mathworks.co.uk/products/matlab/description1.html} 9/2011 accessed.
\bibitem{android} \emph{http://developer.android.com/guide/basics/what-is-android.html} 9/2011 accessed.


% magnify in Matlab http://www.mathworks.com/matlabcentral/fileexchange/5961

\end{thebibliography}
\label{endpage}

%%%%%%%%%%%%%%%%%%%%%%
%%% Appendix
\appendix
\chapter{Matlab script}


\begin{verbatim}

% Monte Carlo Localisation algorithm
% for the project of public transprot monitor
% MCL main function of the program
function MCL
% Setting up configuration
close all; clear all;hold on;

% Prepare necessary parameters for the model. Initialising data model.
% define the number of particles
global particleSize;
particleSize = 100;
% the Architect's scale factor
global factor;
factor = 0.00001;
% The time interval of reading GPS data,e.g.10 means every 10 seconds get a
% GPS reading. The minimum is 1.
global interval;
interval = 60;
% gps data log
global gps_data;
raw_gps_data = importdata('GPSLog.txt', ' ', 0);
gps_data = raw_gps_data.data;
gps_data = [gps_data(:,2) gps_data(:,1)];

% the number of movement steps, every step gets one reading
nSteps = 200;
% get the data of two bus routes
orig_route_map_39a = importdata('route_39a_oneway.txt', ' ', 0);
orig_route_map_145 = importdata('route_145_oneway.txt', ' ', 0);
orig_route_maps{1} = orig_route_map_39a;
orig_route_maps{2} = orig_route_map_145;

%% plot the graph of bus route map
% plot the bus routes based on bus stops
plotMap(orig_route_maps);
gps_39a_stops = importdata('GPSLog_for_stops.txt', ' ', 0);
global gps_stops;
gps_stops = [gps_39a_stops.data(:,2), gps_39a_stops.data(:,1)];
plot(gps_stops(:,1),gps_stops(:,2),'o');

%% Initialisation phase
% convert the coordinates of raw map to one-dimention distance
route_map_particle = convertMaps(orig_route_maps);
% generate particles over the entire map space
particles = generateParticles(route_map_particle);
% set the weights of particles to uniform distribution
n = length(particles);
p_w = cell(n,1);
total = sum(cellfun('length', particles));
for i = 1:n
    p_w{i} = ones(length(particles{i}),1)/total;
end
% set the control data, velocity
velocity = getVelocities();

%% recursive MCL algorithm starts
for k = 1:nSteps
    %%%%%%%%%%%%%%%%%%
    % prediction phase
    [particles, p_w] = estParticles(particles, p_w, velocity);
    %%%%%%%%%%%%%%%%%%
    % update phase
    % get a GPS measurement
    gps = getGPSReading(k);
    % calculate the weight for each particle based on GPS measurement
    if ~isempty(gps)
    [p_w,distances] = calcWeights(particles, gps, route_map_particle);
    end
    %%%%%%%%%%%%%%%%%%
    % resampling phase
    % remove the particles whose weight is zero
    [particles, p_w] = resample(particles, p_w);
    % draw the positon of particles on the map
    plotWeightedParticles(particles, p_w, route_map_particle);
end
end

%% functions for velocity
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% estimatie the velocities of the bus
function [velocity] = getVelocities()
    velocity = calculateVelocity(15);
end

%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% update the velocities of the transport based on observed velocity
% 1 m/s = 3.6 km/h; 100 km/h = 28 m/s;
function [velocity] = calculateVelocity(v)
% v_value = [0,10,20,30,40,50,60,70,80,90,100]';
v_value = [0,5,10,15,20,25,30]';
if v < 0
    % initialisation
    v_weight = ones(length(v_value), 1)/length(v_value);
else
    mu = v;
    sigma = 10;
    v_weight = normpdf(v_value, mu, sigma);
    v_weight = v_weight/sum(v_weight);
end
velocity = [v_value v_weight];
end

%% functions for resampling
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% resampling, remove the particles whose weight is zero
function [particles, p_w] = resample(particles, p_w)
n = length(particles);
for i=1:n
    min_weight = 0;
    t_p = particles{i}(p_w{i}>min_weight);
    t_w = p_w{i}(p_w{i} > min_weight);
    [particles{i}, p_w{i}] = reduceSize(t_p, t_w);
end
% normalisation
ls = cellfun('length', p_w);
W = cell2mat(p_w);
W = W/sum(W);
% return the particles to original format
p_w = mat2cell(W,ls);
end

%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% reduce particle size
function [new_particles, new_weights] = reduceSize(particles, weights)
global particleSize;
% sum(weights)
p_max = max(max(particles));
p_min = min(min(particles));
p_interval = (p_max - p_min)/particleSize;
new_particles = zeros(particleSize,1);
new_weights = zeros(particleSize,1);
j = 1;
for i = 1:particleSize
    low = p_min + (i-1)*p_interval;
    high = p_min + i*p_interval;
    index = (particles >= low)&(particles <= high);
    [w_max,idx] = max(weights(index));
    if idx
        tmp = particles(index);
        new_particles(j) = tmp(idx);
        new_weights(j) = sum(weights(index));
        j = j+1;
    end
end
end

%% fuctions for prediction
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% estimation for multiple routes
function [particles, p_w] = estParticles(particles, p_w, velocity)
n = length(particles);
for i = 1:n
    p_wt = p_w{i};
    particle = particles{i};
    [estParticles, ep_w] = estSingleRouteParticles(particle, p_wt, velocity);
    p_w{i} = ep_w;
    particles{i} = estParticles;
end
end

%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% predict particles based on velocity
% for each possible velocity, estimate the next particles
% return the weighted mean of the estimated particles
function [ep, p_wt] = estSingleRouteParticles(particle, p_wt, velocity)
% global route_map_particle;
global factor;
global interval;
% time intervals for updating
dt = 1 * interval;
m = length(particle);
n = length(velocity);
% weight for particles based on different velocities
p_wt = p_wt * velocity(:,2)';
%sum(sum(p_wt))
% the possible distances with different velocities
tmp_d = zeros(m, n);
% estimate distances based on different velocities
% (distances-by-velocityies matrix)
for i = 1:n
    tmp_d(:,i) =  particle(:,1) + ones(m,1).* factor * velocity(i,1) * dt;
end
[ep, p_wt] = mergeParticles(tmp_d, p_wt);
end

%% fuctions for measuremeny update
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% calculate weight for each particles on multiple routes
function [weights,distances] = calcWeights(particles, gps, route_map_particle)
global factor;
n = length(particles);
weights = cell(n,1);

for i = 1:n
    weights{i} = calcWeight(particles{i}, gps, route_map_particle{i});
end

distances = weights;
% remember the length of the weight for particles on each route
ls = cellfun('length', weights);
% weights are the distances between the GPS data and the particles
% assuming the distance between the best particle and the perfect GPS data
% is zero, which means the GPS measurement is perfect.
mu = 0;
% the noise of the distance value
sigma = 10 * factor;
% all the particles in normal distribution
W = cell2mat(weights);
W = normpdf(W, mu, sigma);
W = W/sum(W);
% return the particles to original format
weights = mat2cell(W,ls);
end

%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% calculate difference for each particles comparing to GPS
function [weights] = calcWeight(particles, gps, refRoute)
% routeL containing Latitude and Longitude
% routeL compare with gps reading to get weights
routeL = convertParticles(particles, refRoute);
len = length(routeL);
X = zeros(len,1);
for i = 1:len
    X(i,1) = sqrt((routeL(i,1) - gps(1)).^2 + (routeL(i,2) - gps(2)).^2);
end
weights = X;
end

%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% remove the same particles, sum the weights of the same particles together
function [new_particles, new_weights] = mergeParticles(particles, weights)
[m,n] = size(particles);
new_particles = [];
new_weights = [];
for i = 1:m*n
    index = find(new_particles == particles(i));
    if index
        new_weights(index) = new_weights(index) + weights(i);
    else
        new_particles(length(new_particles)+1) = particles(i);
        new_weights(length(new_weights)+1) = weights(i);
    end
end
new_particles = new_particles';
new_weights = new_weights';
end

%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% convert distance to positions
function [position] = convertParticles(particles, refRoute)
n = length(particles);
position = zeros(n,2);
for i = 1:n
    position(i,:) = convertDistance(particles(i), refRoute);
end
end

%% fuctions for initialisation
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% generate more particles based on original particles of the routes
% simply try dividing the route equally
function [particles] = generateParticles(routes)
n = length(routes);
particles = cell(n,1);
for i=1:n
    particles{i} = initialiseParticles(routes{i});
end
end

% generate more particles based on original particles of the routes
% simply try dividing the route equally
function [particles] = initialiseParticles(bus_route)
global factor;
% distance between particles, assuming the distance is 10 meters
dp = 10 * factor;
total_length = max(bus_route(:,2));
% total number of particles on a route
np = fix(total_length/dp);
particles = zeros(np,1);
for i = 2:np
    particles(i) = particles(i-1)+dp;
end
end

%% fuctions for mapping coordinates to one-dimension map
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% convert locations on map to particles
function [route_map_particle] = convertMaps(orig_route_maps)
n = length(orig_route_maps);
route_map_particle = cell(n,1);
for i=1:n
    route_map = orig_route_maps{i};
    route_map_particle{i} = convertSingleRoute(route_map);
end
end

%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% convert route locations on map to particles
function [map_particles] = convertSingleRoute(map)
% compute distances
d = zeros(length(map),1);
for i = 2:length(map)
    dis = sqrt((map(i,1) - map(i-1,1)).^2 + (map(i,2) - map(i-1,2)).^2);
    d(i) = dis + d(i-1);
end
idx = (1:1:length(map));
map_particles = [idx' d map];
end

%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% convert distance to coordinates (latitude, longitude)
% based on a existing route with positions
% search the nearest two particles that known the lati and long
% basicly these three poinits are in a straight line
% calculate the position
function [position] = convertDistance(distance, refRoute)
[i,j]=size(refRoute);
if isempty(distance) || distance < 0 || distance > refRoute(i, 2)
    position = [0,0];
    return;
end
[min_difference, array_position] = min(abs(refRoute(:,2)-distance));
if min_difference == 0
    % the point is just on the apex, return the position of apex point
    position = refRoute(array_position, 3:4);
else
    if refRoute(array_position,2) - distance > 0
        startPoint = refRoute(array_position - 1,:);
        endPoint = refRoute(array_position,:);
    else
        startPoint = refRoute(array_position,:);
        endPoint = refRoute(array_position + 1,:);
    end
    % calculate the point in the line segment
    lamda = (distance-startPoint(2))/(endPoint(2)-startPoint(2));
    x = lamda * (endPoint(3)-startPoint(3)) + startPoint(3);
    y = lamda * (endPoint(4)-startPoint(4)) + startPoint(4);
    position = [x, y];
end
end

%% functions for GPS measurement
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% get the data from GPS
function [gps] = getGPSReading(k)
global gps_data;
global interval;
offset = 500;
index = interval * k + offset;
% simulate loss of GPS
lossRange = [interval+offset + 50,interval+offset + 50];
if index > 0 && index < length(gps_data)
    if index > lossRange(1,1) && index < lossRange(1,2)
        gps = [];
    else
        gps = gps_data(index,:);
    end
else
    gps = [];
end
plotGPS(gps);
end

%% fuctions for graph
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% draw the particles
function plotWeightedParticles(particles, p_w, refRoute)
ps = [];
pw = [];
for i = 1:length(particles)
    p = convertParticles(particles{i}, refRoute{i});
    ps = [ps ; p];
    pw = [pw; p_w{i}];
end
plotWeightedParticlesSingleRoute(ps, pw);
pause(0.5);
end

%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% draw the particles with weight for single route
function plotWeightedParticlesSingleRoute(positions, weights)
hold on;
% length(positions)
% graph reference that is used for drawing different color of particles
global p_G;
if isempty(p_G)
    p_G = [line, line, line, line, line, line];
end

n = (max(weights) - min(weights))/7;
d_p_0 = positions(weights <= n,:);
d_p_1 = positions(weights > 2*n,:);
d_p_2 = positions(weights > 3*n,:);
d_p_3 = positions(weights > 4*n,:);
d_p_4 = positions(weights > 5*n,:);
d_p_5 = positions(weights > 6*n,:);

c_0 = [0.8,0.8,0.8];
c_1 = [0.7,0.7,0.7];
c_2 = [0.5,0.5,0.5];
c_3 = [0.3,0.3,0.3];
c_4 = [0.2,0.2,0.2];
c_5 = [1,0,1];

set(p_G(1), 'XData',d_p_0(:,1),'YData',d_p_0(:,2),'Color',c_0,'LineStyle','.');
set(p_G(2), 'XData',d_p_1(:,1),'YData',d_p_1(:,2),'Color',c_1,'LineStyle','.');
set(p_G(3), 'XData',d_p_2(:,1),'YData',d_p_2(:,2),'Color',c_2,'LineStyle','.');
set(p_G(4), 'XData',d_p_3(:,1),'YData',d_p_3(:,2),'Color',c_3,'LineStyle','.');
set(p_G(5), 'XData',d_p_4(:,1),'YData',d_p_4(:,2),'Color',c_4,'LineStyle','.');
set(p_G(6), 'XData',d_p_5(:,1),'YData',d_p_5(:,2),'Color',c_5,'LineStyle','.');
end

%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% draw the bus maps based on stops
function plotMap(maps)
map1 = maps{1};
map2 = maps{2};
plot(map1(:,1),map1(:,2),'g.-');
plot(map2(:,1),map2(:,2),'b.-');
title('Two bus routes in Dublin area');
xlabel('Longitude');
ylabel('Latitude');
legend('Bus 39a','Bus 145');
end

%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% draw the data from GPS
function plotGPS(gps)
if ~isempty(gps)
plot(gps(1),gps(2),'r*');
end
end

\end{verbatim}

\end{document}

\end{article}
