\documentclass[10pt,a4paper]{article}
\usepackage{graphicx}
\usepackage[english]{babel}
\usepackage{fullpage}
\usepackage{latexsym}
\usepackage{amssymb}
\usepackage[T1]{fontenc}
\usepackage[sc]{mathpazo}
\linespread{1.05}
\usepackage{subfig}



\title{Applied Statistics\\ Report on the problems}
\author{Mieke Hiltermann and Lotte van den Berg \\ Utrecht University}

\newcommand{\mco}{\mathcal{O}}
\newcommand{\mcg}{\mathcal{G}}
\newcommand{\mcv}{\mathcal{V}}
\newcommand{\mce}{\mathcal{E}}
\newcommand{\mcn}{\mathcal{N}}
\newcommand{\real}{\mathbb{R}}

\begin{document}

% \begin{titlepage}
\maketitle 

% \tableofcontents

\section*{Problem 1}
\textit{A pilot study of new process for making a jet aircraft engine yields observations of the vane diameter (measured in inches), which are recorded in the datafile vanes.txt. The data consists of 20 rational subgroups, each of size 5. The customer requires that the diameters must be  between 0.5005 and 0.5055 inches. The ideal value of the diameter is 0.5030 inches.}

\subsection*{(a)}
\textit{Check whether the process was in statistical control during the pilot study. Give a clear description of the tools that you use in your check. Indicate which
observations may be removed after closer inspection.}\\
\\
\noindent A process is statistically in control when all observations seem to be from the same distribution. When the process is in statistical control there are not assignable causes for variation in the data. \\

\noindent We start with an inspection of the individual data. We used the Box-and-Whisker plot of the vanes data to detect outliers. As we can see in figure \ref{fig:boxwhisker-individual} there are two outliers; the subgroups of these outliers can be removed if these two data points are still outliers after closer inspection.\\
% 			 straks nog erop terug komen dat we die punten niet weghalen!!!  

		\begin{figure*}[h]
			\caption{Box-and-Whisker plot of the individual vanesdata}\label{fig:boxwhisker-individual}
			\centering{
 			\includegraphics[width=7cm]{opg1-boxwhisker-individual.jpg} 
			}
		\end{figure*}

\noindent To test the normality we perform first an graphical inspection of the data in two ways: with the standard normal probability plot and an kernel density estimator plot. We make these figures for the individual data in R. Although the kernel density graph is not very smooth figure \ref{fig:normality-individual} shows its reasonable to assume that our vane diameters are normally distributed. \\

\begin{figure*}[htbp]
\caption{Testing normality for the individual data}\label{fig:normality-individual}
\begin{center}
\subfloat[Kernel density estimator]{\label{fig:kernelI}%
	\includegraphics[width=7cm]{opg1-kerneldensity-individual.jpg}}\qquad
\subfloat[Normal probability plot]{\label{fig:normalI}%
	\includegraphics[width=7cm]{opg1-normalprob-individual.jpg}}\\
\end{center}
\end{figure*}

\noindent Finally we do an formal goodness-of-fit test in R. The Shapiro-Wilk normality test for the individual data gives:
$ W = 0.9856$ and $p-value = 0.3528$. These values give no evidence to reject our null-hypothesis and we can indeed assume the individual data to be normally distributed. The R-code is given below.

	\begin{verbatim}
setwd("C:/Users/Jef/Desktop/Applied Statistics")
library(qcc)

# --- INDIVIDUAL DATA --- 
# Check whether the process was statistical in control and normally distributed 
	> vanes 	<- 	read.table("vanes.txt" )
# generate rational subgroups
	> vanesdata 	<- 	qcc.groups(vanes$V1,vanes$V2) 

#Informal check for outliers, Box and Whisker Plot - individual data
	> boxplot(vanesdata, horizontal = TRUE, main="Box-and-whisker plot of individual vanes
	 data")

#Check the normality:
#Kernel density plot - individual data:
	> plot(density(vanesdata),main="Kernel density estimate of individual vanes data",
	col="red",lwd=3)
#Normal probability plot - individual data:
	> qqnorm(vanesdata,main="Normal probability plot of individual vanes data",pch=19,
	cex=1,fg="red")
	> qqline(vanesdata,lwd=3,col="blue",lty="dashed")

#Standard Normality test - individual data
	> shapiro.test(vanesdata)

        Shapiro-Wilk normality test

	data:  vanesdata 
	W = 0.9856, p-value = 0.3528
\end{verbatim}

\noindent The data is given in 20 rational subgroups; for Shewhart we need normality of the means of the rational subgroups. Of course when the individual data are normally distributed then so are the group means, but we decided to follow the same procedure and we make the same plots to show this. In figure \ref{fig:groups} we can see there are no more outliers, the kerneldensity estimator for the group means is very smooth and also the values from the Shapiro-Wilks normality test are very nice.\\

\noindent Thus during the pilotstudy the process was in statistical control.

\begin{figure*}[htbp]
\caption{Means of the rational subgroups}\label{fig:groups}
\begin{center}
\subfloat[Box-and-whisker plot]{\label{fig:boxwiskerG}%
	\includegraphics[width=7cm]{opg1-boxwhisker-groups.jpg}}\qquad
\subfloat[Kernel density estimator]{\label{fig:kernelG}%
	\includegraphics[width=7cm]{opg1-kerneldensity-groups.jpg}}
%\subfloat[Kernel density estimator]{\label{fig:kernelG}%
%	\includegraphics[width=7cm]{opg1-kerneldensity-groups.jpg}}\qquad
\end{center}
\end{figure*}

\begin{verbatim}
# --- GROUP MEANS ---   
# Matrix to calculate the means and bind them to 'vanesdata' 
	> means 	= 	matrix(nrow = 20, ncol = 1)
	> for(i in 1:20) means[i,1] <- mean(vanesdata[i,])
	> vanesdatam 	<- 	cbind(vanesdata, means)

#Informal check for outliers, Box and Whisker Plot - groupmeans
	> boxplot(vanesdatam[,6], horizontal = TRUE, main="Box-and-whisker plot of the 
	groupmeans vanes data")

#Check if the group means are normally distributed.
#Kernel density plot  - groupmeans
	> plot(density(vanesdatam[,6]),main="Kernel density estimate for groupmeans vanes 
	data",col="red",lwd=3)
#Normal probability plot - groupmeans
	> qqnorm(vanesdatam[,6],main="Normal probability plot of the groupmeans vanes data",
	pch=19,cex=1,fg="red")
	> qqline(vanesdatam[,6],lwd=3,col="blue",lty="dashed")

#Standard Normality test - groupmeans
	> shapiro.test(vanesdatam[,6])
	
        Shapiro-Wilk normality test

	data:  vanesdatam[, 6] 
	W = 0.9876, p-value = 0.9933
\end{verbatim}

% ---------------------------------------------------
\subsection*{(b)}
\textit{Perform a capability analysis, including checking the assumptions. Give estimates for the capability indices $C_p$ and $C_{pk}$. What is the estimated fall-out?}\\
\\
\noindent For a capability analysis the data has to be normally distributed and the data has to be in-control. As we saw in (a) this is not a problem. \\

\noindent The next step is to assess capability. Therefore we use the standard capability indices $C_p$ and $C_{pk}$. In (a) we could see that the process is not really centred, so we prefer $C_{pk}$.

\begin{figure*}[htbp]
	\caption{  }\label{fig:sixpack}
	\centering{
 	\includegraphics[width=16cm]{sixpack.jpg}} 
\end{figure*}
\noindent
\begin{verbatim}
> vanesqcc 	<- 	qcc(vanesdata,type="xbar")
> process.capability.sixpack(vanesqcc,spec.limits=c(0.5005,0.5055),target=0.5030,
nsigmas=3)
> process.capability(vanesqcc,spec.limits=c(0.5005,0.5055),target=0.5030,nsigmas=3)
\end{verbatim}
\noindent
From figure \ref{fig:sixpack} we can conclude the data is in control. The $95 \%$ confidence intervals are: $C_p = (2.75 ; 3.64)$ and $C_{pk} = (2.38 ; 3.17)$, on average thus $3.20$ and $2.77$ respectively. This is higher then required in most cases. The mean of the data is at $0.5033$, which is not exactly equal to the ideal value ($0.5030$) but nothing to worry about. \\

\begin{verbatim}
> process.capability(vanesqcc,spec.limits=c(0.5005,0.5055),target=0.5030,nsigmas=3)

Process Capability Analysis

Call:
process.capability(object = vanesqcc, spec.limits = c(0.5005,0.5055), target = 0.503, 
nsigmas = 3)

Number of obs = 100          Target = 0.503   
       Center = 0.503332        LSL = 0.5005  
       StdDev = 0.0002605242    USL = 0.5055  

Capability indices:

      Value   2.5%  97.5%
Cp    3.199  2.754  3.643
Cp_l  3.623  3.196  4.051
Cp_u  2.774  2.445  3.103
Cp_k  2.774  2.382  3.166
Cpm   1.975  1.627  2.322

Exp<LSL 0%   Obs<LSL 0% 
Exp>USL 0%   Obs>USL 0% 
\end{verbatim}

\begin{figure*}[h]
	\caption{Process capability}\label{fig:capability}
	\centering{
 	\includegraphics[width=10cm]{opg1-pca.jpg}} 
\end{figure*}

\noindent The proportion of non-conforming items of a normally distributed characteristic X equals
\begin{eqnarray*}
1-P(LSL<X<USL)&=&2\Phi(-3C_p)\\
&=&8.230512* 10^{-22}\\
% &\approx& 0\\
\end{eqnarray*}

\noindent We used R to calculate it:
\begin{verbatim}
> Cp<-3.199 
> 2*pnorm(-3*Cp)
[1] 8.230512e-22
\end{verbatim}

\noindent This answer is not quite right because it assumes the proccess is centered around the middle of the specification interval, we can see in the figures this is not completely true. We can also conclude it from the fact that $C_p$ and $C_{pk}$ are quite different from each other. Therefore we need to look at the expected proportion non-conforming items for a non-centred process with normal distribution. \\
\\
\noindent For our process the following assumption holds $\frac{1}{2}(USL+LSL)=0.503\leq \mu= 0.503\leq USL=0.5055$ therefore the following equations are true:
\begin{eqnarray*}
1-P(LSL<X<USL)&=&\Phi(-3(2C_p-C_{pk}))+\Phi(-3C_{pk})\\
&=&4.324566* 10^{-17}\\
\end{eqnarray*}
\begin{verbatim}
Cpk		<-	2.774 
> pnorm(-3*(2*Cp-Cpk))+pnorm(-3*Cpk)
[1] 4.324566e-17
\end{verbatim}
So the number of non-conforming items is equal to $4.32 * 10^{-17}$.

\subsection*{(c)}
\textit{Compute a $\beta$-expectation tolerance interval that contains $99.73 \%$ of the underlying distribution of the data. Compare these intervals with the results of
(b).}\\
\\
\noindent
We want a tolerance interval of the underlying distribution of the data. In (a) we showed that the data has a normal distribution, so we will make a tolerance interval with the normal density function $f(x) = \frac{1}{\sqrt{2\pi \sigma^2}}e^{\frac{1}{2\sigma^2} (x-\mu)^2}$. But then we need of course estimators for $\mu$ and $\sigma$. We use the following unbiased estimators:
$$ \widehat{\mu}_{MLE} = \frac{1}{n} \sum_{i=1}^{n} X_i =\overline{X}$$
$$ \widehat{\sigma}^2_{MLE} = \frac{1}{n-1} \sum_{i=1}^{n} (X_i-\overline{X})^2 $$

\begin{verbatim}
# Compute unbiased estimators for \sigma^2 and \mu;
> mu <- mean(vanes$V1)
> mu
[1] 0.503332
> sigma2 <- 100/99*var(vanes$V1)
> sigma <- sqrt(sigma2)
> sigma
[1] 0.0003316086

# smallest beta-expectation tolerance interval for standard normal:
> qnorm((1-0.9973)/2)
[1] -2.999977
> qnorm(1-(1-0.9973)/2)
[1] 2.999977

# smallest beta-expectation tolerance interval for estimated normal distribution:
> mu + qnorm((1-0.9973)/2)*sigma
[1] 0.5023372
> mu + qnorm(1-(1-0.9973)/2)*sigma
[1] 0.5043268
\end{verbatim}

\noindent Thus the $\beta$-expectation tolerance interval of confidencelevel $0.9973$ is $(0.5023;0.5043)$.\\
\\
\noindent We want to compare this interval with the interval we found in (b), but there the interval is based on the group-data. Those data are smoother, so therefore it is reasonable that this interval -$(0.5030;0.5037)$- is smaller than the interval we found here $(0.5023;0.5043)$.

\subsection*{(d)}
\textit{It is required by customers that the production process has a $C_{pk}$ of $2.0$. Discuss whether the pilot study indicates that the process is sufficiently capable. If the current process is not sufficiently capable, discuss whether the pilot study indicates that with minor changes the process will be able to meet the capability requirements.}\\
\\
\noindent
In our process $C_{pk}=2.774$, therefore the pilot study is  sufficiently capable for the requirement of a $C_{pk}$ value of $2.00$, because the higher the $C_{pk}$ value the better the capability.\\

\noindent If we still want to reduce the capability indices to $2.0$ this can be done in the following way.\\
\\
\noindent
For a non-centered process with normal distribution, for which it holds that $\frac{1}{2}(USL+LSL)\leq \mu\leq USL$ we have: 
\[
C_{pk}=\frac{USL-\mu}{3\sigma}.
\]
In order to let this amount decrease while keeping the USL the same (because the customer still requires the same specification limits for the diameters) either the mean of the process has to increase or the variation of the process has to decrease. In this case it is not desirable to let the mean of the process increase because the center of the process ($0.5033$ inches) is already above the ideal value ($0.5030$ inches). Therefore, in order to get a lower $C_{pk}$ value, the variance needs to decrease. Usually it is very hard to reduce the variance. This usually involves a major change of the production process. Probably it is not possible to reduce the $C_{pk}$ value with only minor changes in the process.\\

\subsection*{(e)}
\textit{An engineer claims that it is not unlikely that during mass production the mean diameter of the vanes shifts by an amount of $0.0005$ inches, while the variance keeps its current value. Calculate the impact on the capability indices of such
a change.}\\
\\
\noindent
Define $\delta:=0.0005$. Suppose the mean has positively shifted over a distance of $\delta$ (inches). Define $\mu:=mean+\delta$.
Then:
\begin{eqnarray*}
C_p		&=&\frac{USL-LSL}{6\sigma}\\
 			&=&3.198679\\
C_{pk}&=&\min\big(\frac{USL-\mu}{3\sigma}, \frac{\mu-LSL}{3\sigma}\big)\\
			&=&	2.134159.\\
\end{eqnarray*}
While the capability indices were $3.199$ and $2.774$ respectively. The $C_p$ value should stay exactly the same when only the mean shift, the change in this indices is due to round off mistakes. The change in $C_{pk}=\frac{2.134159}{2.774}=0.7693435$, thus $77\%$.\\

\noindent If we suppose the mean has negatively shifted over a distance of $\delta$, and define $\mu:=mean-\delta$. Then we get:
\begin{eqnarray*}
C_p		&=&\frac{USL-LSL}{6\sigma}\\
 			&=&3.198679\\
C_{pk}&=&\min\big(\frac{USL-\mu}{3\sigma}, \frac{\mu-LSL}{3\sigma}\big)\\
			&=&	2.983728.\\
\end{eqnarray*}
This time we have a change of $C_{pk}=\frac{2.983728}{2.774}=1.075605$, thus $108\%$.\\
\begin{verbatim}
#defining the variables
mean 		<-	0.503332 
StdDev	<-	0.0002605242
delta		<-	0.0005
LSL		<-	0.5005  
USL		<-	0.5055 
Cporg		<-	3.199 
Cpkorg	<-	2.774 

#suppose a positive shift
mupos		<-	mean+delta
Cppos 	<-	(USL-LSL)/(6*StdDev)
Cpkpos	<-	min((USL-mupos)/(3*StdDev),(mupos-LSL)/(3*StdDev))

#suppose a negative shift
muneg		<-	mean-delta
Cpneg 	<-	(USL-LSL)/(6*StdDev)
Cpkneg	<-	min((USL-muneg)/(3*StdDev),(muneg-LSL)/(3*StdDev))

#change
Cpkpos/Cpkorg
Cpkneg/Cpkorg
\end{verbatim}

\subsection*{(f)}
\textit{Set up two-sided ``improved data driven control charts'' (with in case of the minimum chart $m = 3$), see Albers, W. and Kallenberg, W.C.M. (2006). Improved
data driven control charts. TW-Report No. 1791, using the 100 observations as
individual observations (ignore the group-structure) for the following situations:\\
\begin{enumerate}
	\item use corrections to reduce the bias \\
	\item use corrections for the exceedance probability approach with $\varepsilon = 0.1$ and
				$ \alpha = 0.2$. \\
\end{enumerate}
Take in any case $p = 0.002$. Clearly indicate how you arrived at the control
limits.}\\
\\
\noindent
The  observations given in \textit{`vanes.txt'} are seen as individual Phase I observations $X_1,\ldots,X_n$ with $n=100$, these obseravations are used to estimate the parameters. \\

\noindent First of all we need to see if the data is normally distributed. In order to do this we need to define $X_{(1)}:=\min(X_1,\ldots,X_n)$ and $X_{(n)}:=\max(X_1,\ldots,X_n)$ the mean and variation are based on the Phase I observations in the following way
\begin{eqnarray*}
\overline X&=&\frac{1}{n}\sum_{i=1}^nX_i\\
S^2&=&\frac{1}{n-1}\sum_{i=1}^{n}(X_i-\overline X)^2.\\
\end{eqnarray*}
We can say the data is normally distributed when $d_1\leq\frac{X_{(n)}-\overline X}{S}\leq d_2$ and $d_1\leq\frac{\overline X- X_{(1)}}{S}\leq d_2$, with $\overline X$ the center of the data and $d_1$ and $d_2$ defined as follows:
\begin{eqnarray*}
d_1	&=& u_{\frac{-0.7+0.5\log n}{n}}\\
		&=&	\Phi^{-1}(1-\frac{-0.7+0.5\log n}{n})\\
d_2	&=&	u_{\frac{5}{n\sqrt n}}\\
		&=&	\Phi^{-1}(1-\frac{5}{n\sqrt n}).\\
\end{eqnarray*}
Later on we can use the improved estimated upper control limit of a normal distribution when $d_1\leq\frac{X_{(n)}-\overline X}{S}\leq d_2$ and we can use the improved estimated lower control limit of a normal distribution when $d_1\leq\frac{\overline X- X_{(1)}}{S}\leq d_2$. In order to check this we inserted the following lines in R.\\
\begin{verbatim}
> vanes <- read.table("vanes.txt")
> n<-100
> 
> x1 <-min(vanes$V1)
> x100 <- max(vanes$V1)
> 
> samplenumber<- rep(1:n, each=1)
> vanesdata <- qcc.groups(vanes$V1, samplenumber)
> stats.xbar(vanesdata)
$statistics
[1] 0.503332

$center
[1] 0.503332

> xbar=0.503332
> S <-sd.xbar(vanesdata)
> 
> d1 <- qnorm((1-(-0.7+0.5*log(n))/n))
> d2<-qnorm(1-(5/(n*n^(1/2))))
> 
> LL<-(xbar-x1)/S
> UL<-(x100-xbar)/S
> 
> d1<=LL && LL<=d2
[1] TRUE
> d1<=UL && UL<=d2
[1] FALSE
> UL<=d2
[1] FALSE
\end{verbatim}
It is showed that the UL is not smaller or equal to $d_2$. Better inspection learns that $d_2=2.575829$ and $UL=2.926410$. But we should not forget that in (a) we saw this data was not in-control because it did have two outliers. Exact these outliers are equal to $X_{(1)}$ and $X_{(n)}$ resulting in wrong conclusions. Therefore we assume that this data is normally distributed, just as we assumed in (a).\\
\\
We want to have a False Alarm Rate, $p=0.002$. The Average Run Length and the FAR are dependent of the Phase I observations, therefore they have become random variables. These stochastic values are written in the following way $FAR=P_n$ and $ARL=\frac{1}{P_n}$, these should be close to $p$ and $\frac{1}{p}$ respectively.\\
This can be done in two ways: by reducing the bias ($E(P_n)-p$) or by looking at the exceedance probabilities ($P(P_n>p(1+\varepsilon))<\alpha$ with $\varepsilon=0.1$, and $\alpha=0.2$).\\
\\
To get a small bias or exceedence probability huge samples are needed when estimators of the parameters are simply plugged into the control limits. Corrections of the control limits are available reducing the bias and exceedence probabilities sufficiently well for common size samples. \\
\\
Call the distribution function of the observations $F$, and define $\overline F:=1-F$. Normally we have call the $UCL=\overline F^{\,-1}(p)$ and $LCL=F^{-1}(p)$ for the normal chart. From this we get $FAR=p$. 
Therefore we have the uncorrected control limits $\overline X \pm u_{\frac{p}{2}}S$, with $u_{\frac{p}{2}}=\overline{\Phi}^{\,-1}(\frac{p}{2})$.\\
\\
If our aim is to get $E(P_n)=p$ (so to reduce the bias) then we should take as $LCL$ and $UCL$ the following quantities:
\begin{eqnarray*}
(LCL,UCL)_{bias}	&=&	\big (\overline X - u_{\frac{p}{2}}S(1+\frac{u_{\frac{p}{2}}^2+3}{4n}), \overline X + 				   			u_{\frac{p}{2}}S(1+\frac{u_{\frac{p}{2}}^2+3}{4n})\big)\\
					&=&(0.5022777, 0.5043863)
\end{eqnarray*}
This was obtained with the following lines in R.
\begin{verbatim}
> p<-0.002
> LCLbias<- xbar - qnorm(1-p/2)*S*(1+((qnorm(1-p/2))^2+3)/(4*n))
> UCLbias<-xbar + qnorm(1-p/2)*S*(1+((qnorm(1-p/2))^2+3)/(4*n))
\end{verbatim}
\noindent
If our aim is to get $P(P_{LCL}>\frac{p}{2}(1+\varepsilon))\leq \alpha$, and $P(P_{UCL}>\frac{p}{2}(1+\varepsilon))\leq \alpha$ then we should have the following quantities for the specification limits.
\begin{eqnarray*}
(LCL,UCL)_{exc}&=&(\overline X - u_{\frac{p}{2}}S (1+\frac{u_{\alpha}(\frac{1}{2}+u_{\frac{p}{2}}^{-2})^{\frac{1}{2}}}{\sqrt{n}} -\frac{\varepsilon}{u_{\frac{p}{2}}^2}), \overline X + u_{\frac{p}{2}}S (1+\frac{u_{\alpha}(\frac{1}{2}+u_{\frac{p}{2}}^{-2})^{\frac{1}{2}}}{\sqrt{n}} -\frac{\varepsilon}{u_{\frac{p}{2}}^2})\\
&=&(0.5020478, 0.5046162)
\end{eqnarray*}
\begin{verbatim}
> epsilon<-0.1
> alpha<-0.2
> LCLexc<- xbar - qnorm(1-p/2)*S*(1 + ((qnorm(1-alpha)*(1/2+(qnorm(1-p/2))^2)^(1/2)) /
(n^(1/2)))-epsilon/((qnorm(1-p/2))^2))
> UCLexc<- xbar + qnorm(1-p/2)*S*(1 + ((qnorm(1-alpha)*(1/2+(qnorm(1-p/2))^2)^(1/2)) /
(n^(1/2)))-epsilon/((qnorm(1-p/2))^2))
\end{verbatim}
The resulting control charts with the control limits for the bias reduction and the exceedence probability are shown in figure \ref{fig:improved}.

\begin{figure*}[h]
\caption{Data driven control charts}\label{fig:improved}
\begin{center}
\subfloat[Bias reduction]{\label{fig:bias}%
	\includegraphics[width=7cm]{opg1-xbarbias.jpg}}\qquad
\subfloat[Exceedence probabiliy]{\label{fig:exc}%
	\includegraphics[width=7cm]{opg1-xbarexc.jpg}}\\
\end{center}
\end{figure*}

\begin{verbatim}
vanesdata 	<-	qcc.groups(vanes$V1,vanes$V2)
vanesqccbias<-	qcc(vanesdata, type="xbar", st.dev=S, center=xbar, limits=c(LCLbias,UCLbias))
vanesqccexc	<-	qcc(vanesdata, type="xbar", st.dev=S, center=xbar, limits=c(LCLexc,UCLexc))
\end{verbatim}
It is clear from both charts that the FAR is very much reduced since the corrections are used.\\


\subsection*{(g)}
\textit{(Here we do not use the data file.) Investigate which building block (the normal,
the parametric or the minimum chart with m = 3) is chosen by the improved
data driven control chart. Consider only the upper control limit. Perform a
simulation study with $n = 100$. \\
\noindent Take $10000$ simulations (each time $100$ observations) from:
\begin{enumerate}
\item the standard normal distribution 
\item the exponential distribution with mean 1. 
\end{enumerate}
\noindent Report for both situations how many times the normal, the parametric or the
minimum chart is chosen and give comments on the results.}\\[.5cm]

\noindent The method wich is used here is described in \textit{Albers, W. and Kallenberg, W.C.M. (2006). Improved 
data driven control charts. TW-Report No. 1791}. The idea behind the selection rule described in the article is to stay as long as possible in the classical normal chart, to move to the parametric chart if the tails are too heavy or too light and to take the non-paramtric MIN chart when the parametric family presumably fails too.\\ 

\noindent The data is telling us which chart to use. If we only consider the upper limit then the data tells us which chart to use in the following way:\\
If 
\[
d_{1N}\leq \frac{X_{(n)}-\overline X}{S}\leq d_{2N}
\]
Then the data is normally distributed. \\
If however:
\[
d_{1P}(\hat\gamma_u)\leq\frac{X_{(n)}-\overline X}{S}\leq d_{2P}(\hat\gamma_u)
\]
then the data is distributed in a parametric way. \\
If both equations are not true then we need to use the non-parametric MIN chart.\\

\noindent The paramameters are calculated in the following way:
\begin{eqnarray*}
d_{1N}&=&u_{\frac{-0.7+0.5\log n}{n}}\\
d_{2N}&=&u_{\frac{5}{n\sqrt n}}\\
d_{1P}(\hat\gamma)&=&c(\hat\gamma)u_{\frac{-0.2+0.5\log n}{n}}^{1+\hat\gamma}\\
d_{2P}(\hat\gamma)&=&c(\hat\gamma)u_{\frac{3}{n\sqrt n}}^{1+\hat \gamma}\\
u_p&=&\overline\Phi^{\,-1}(p)=\Phi^{-1}(1-p)\\
c(\gamma)&=&\pi^{\frac{1}{4}}2^{-\frac{1+\gamma}{2}}\Gamma(\gamma+\frac{3}{2})^{-\frac{1}{2}}\\
\hat\gamma_u&=&1.1218\log\big(\frac{X_{ent(0.95n+1)}-\overline X}{X_{ent(0.75n+1)}-\overline X}\big)-1\\
n&=&100\\
\end{eqnarray*}

\noindent $10.000$ simulations from the standard normal distribution are taken in the following way:
\begin{verbatim}
>   setwd("C:/Users/Jef/Desktop/Applied Statistics")
> library(qcc)
# voor standaard normale variabelen:
n		<-	100
d1N		<-	qnorm(1-(-0.7+0.5*log(n))/n)
d2N		<-	qnorm(1-(5/(n*n^(1/2))))
Norm <- 0
Param <- 0
Minimal <- 0

for(i in 1:10000){
  #trek een sample van 100 standaard normaal verdeelde rv
  NormSample <- rnorm(n)
  #orden deze op grootte : X_(1)<...<X_(n) 
  SortNormSample <- sort(NormSample)
  #bereken mean en S (standard deviatie van deze 100 stochasten)
  Mean <- mean(SortNormSample)
  StDev <- sd(SortNormSample)
  #als d1N<=(X_(100)-mean)/S<=d2N dan is deze normaal verdeeld
  if(d1N<=(SortNormSample[100]-Mean)/StDev && (SortNormSample[100]-Mean)/StDev<= d2N){
    Norm <- Norm + 1
  }
  else{
    Gamma <- 1.1218 * log((SortNormSample[96]-Mean)/(SortNormSample[76]-Mean))-1
    GammaFunctie <- pi^(1/4)*2^(-(1+Gamma)/2)*gamma((Gamma+3/2)^(-1/2))
    d1P <- GammaFunctie*(qnorm(1-(-0.2+0.5*log(n))/n))^(1+Gamma)
    d2P <- GammaFunctie*(qnorm(1-3/(n*n^(1/2))))^(1+Gamma)
    if(d1P<=(SortNormSample[100]-Mean)/StDev&& (SortNormSample[100]-Mean)/StDev <= d2P){
      Param <- Param + 1
    }
    else {
      Minimal <- Minimal + 1
    }
  }  
}
> Norm
[1] 4555
> Param
[1] 2960
> Minimal
[1] 2485
\end{verbatim}
When we do this a few times we find the following values for how many times the normal, the parametric and the minimum chart is chosen: \\

\begin{tabular}{|c|c|c|c|}
\hline
simulation number		& Normal chart 		& Parametric chart		& Minimum Chart \\
\hline
1										& 4555						& 2960								& 2485					\\
2										& 4674						& 2924								& 2402					\\
3										& 4659						& 2901								& 2440					\\
\hline
\end{tabular}
\\
\noindent From this table we can conclude that the way in which the selection of the distribution takes place maybe is not the most optimal way. All these data sets should have been a normal chart, although almost half of the distributions are seen as normal, there are too much distributions that are called parametric and even minimum charts.\\
\\
\noindent With observations from the exponential distribution with mean $1$ we get:
\begin{verbatim}
# voor exponentieel verdeelde variabelen mean 1:
n		<-	100
d1N		<-	qnorm(1-(-0.7+0.5*log(n))/n)
d2N		<-	qnorm(1-(5/(n*n^(1/2))))
Norm <- 0
Param <- 0
Minimal <- 0

for(i in 1:10000){
  #trek een sample van 100 exponentieel verdeelde rv's met mean 1
  NormSample <- rexp(n)
  #orden deze op grootte : X_(1)<...<X_(n) 
  SortNormSample <- sort(NormSample)
  #bereken mean en S (standard deviatie van deze 100 stochasten)
  Mean <- mean(SortNormSample)
  StDev <- sd(SortNormSample)
  #als d1N<=(X_(100)-mean)/S<=d2N dan is deze normaal verdeeld
  if(d1N<=(SortNormSample[100]-Mean)/StDev && (SortNormSample[100]-Mean)/StDev<= d2N){
    Norm <- Norm + 1
  }
  else{
    Gamma <- 1.1218 * log((SortNormSample[96]-Mean)/(SortNormSample[76]-Mean))-1
    GammaFunctie <- pi^(1/4)*2^(-(1+Gamma)/2)*gamma((Gamma+3/2)^(-1/2))
    d1P <- GammaFunctie*(qnorm(1-(-0.2+0.5*log(n))/n))^(1+Gamma)
    d2P <- GammaFunctie*qnorm(1-3/(n*n^(1/2)))^(1+Gamma)
    if(d1P<=(SortNormSample[100]-Mean)/StDev&&(SortNormSample[100]-Mean)/StDev <= d2P){
      Param <- Param + 1
    }
    else {
      Minimal <- Minimal + 1
    }
  }  
}
> Norm
[1] 14
> Param
[1] 5730
> Minimal
[1] 4256
\end{verbatim}

\begin{tabular}{|c|c|c|c|}
\hline
simulation number		& Normal chart 		& Parametric chart		& Minimum Chart \\
\hline
1										& 14							& 5730								& 4256					\\
2										& 23							& 5660								& 4317					\\
3										& 21 							& 5699								& 4280					\\
\hline
\end{tabular}
\\

\noindent For an exponential distribution this selection rule works slightly better then for a normal distribution. Now at least more then half of the distributions are seen as parametric, but still a lot are seen as non-parametric.
From this we can conclude this selection rule is not very precise.

% ------------------------------------------------------------------------------------------------------------------------------------------------
\newpage
\section*{Problem 2}
\textit{Suppose that a measurement of the hardness of a product has a normal distribution. Every hour a sample of $n$ units is drawn and an $\overline{X}$-chart with control limits $\mu_0\pm\frac{3\sigma}{\sqrt n}$ is used.}
\subsubsection*{(a)}
\textit{Compute how many hours it should take on average to detect a shift in the process mean of size $\sigma$ when $n=5$.}\\

\noindent Let $\overline{X}_i$ be the averages of the samples that are drawn each hour. Then $E(\overline{X}_i)=\mu $ \ and $Var(\overline X_i)=\frac{\sigma^2}{n}\  \forall i$.\\

\noindent
The probability that the $\overline X_i$ is out side the control limits is equal to: 

\begin{eqnarray*}
P_{out \ of \ control}(\overline X_i) 
	&=				&1-P(LCL<\overline X_i<UCL\mid\mu=\mu_0+\sigma)\\
	&=				&1-P(\mu_0-\frac{3\sigma}{\sqrt n}		<	\overline X_i	<			\mu_0+\frac{3\sigma}{\sqrt n}\mid\mu=\mu_0+\sigma)\\
	&=				&1-P(\mu_0-\frac{3\sigma}{\sqrt n} -\mu		<	\overline X_i - \mu	<			\mu_0+\frac{3\sigma}{\sqrt n} - \mu \mid\mu=\mu_0+\sigma )\\
	&=				&1-P(\mu_0-\frac{3\sigma}{\sqrt n} -(\mu_0+\sigma)		<	\overline X_i - (\mu_0+\sigma)	<			\mu_0+\frac{3\sigma}{\sqrt n} - (\mu_0+\sigma ) )\\
	&=				&1-P(\frac{-3\sigma - \sqrt{n}\sigma}{\sqrt n} 		<	\overline X_i - (\mu_0+\sigma)	<	\frac{3\sigma - \sqrt{n}\sigma}{\sqrt n}  )\\
	&=				&1-P(\frac{-3\sigma - \sqrt{n}\sigma}{\sqrt n} \frac{\sqrt n}{\sigma}		<	\frac{\overline X_i - (\mu_0+\sigma)}{\sigma / \sqrt{n}}	<	\frac{3\sigma - \sqrt{n}\sigma}{\sqrt n} \frac{\sqrt n}{\sigma} )\\
	&=				&1-P(-3 - \sqrt{n} 		<	\frac{\overline X_i - (\mu_0+\sigma)}{\sigma / \sqrt{n}}	<	3 - \sqrt{n}  )\\
	&=				&1-P(-3 - \sqrt n     < Z < 3-\sqrt n)\\
	&=				&P(Z < -3 - \sqrt n) + P(Z > 3-\sqrt n)\\	
	&=				&\Phi(-3-\sqrt n) + 1-\Phi(3-\sqrt n)\\
	&=				&\Phi(-3-\sqrt 5) + 1-\Phi(3-\sqrt 5)\\
	&\approx	&\Phi(-5.24) + 1-\Phi(0.76)\\		
	&\approx	&0.2225 \\
\end{eqnarray*}


	\begin{verbatim}
		# opgave 2a
>  1-pnorm(3-5^(1/2))+pnorm(-3-5^(1/2))
[1] 0.2224540
> (1-pnorm(3-5^(1/2))+pnorm(-3-5^(1/2)))^(-1)
[1] 4.495312
	\end{verbatim}

\noindent Denote the number of samples until alarm as $N$. Then the average amount of $N$ to detect the shift is equal to $ E(N)$. \\

\noindent $N$ has the geometric distribution so $P(N>K)=(1-p)^k$ with $p \approx 0.2225$. Therefore the average run length (ARL) is equal to $E(N)=\frac{1}{p}\approx\frac{1}{0.2225} \approx 4.5$. \\
\\
We conclude it takes almost $4.5$ hours to notice that the mean has shifted.

\newpage

\subsubsection*{(b)}
\textit{What should be the smallest sample size $n$ so that a shift in the mean of size $\sigma$ would be on average detected in less than 3 hours? }\\
\\
To detect a shift in the mean of size $\sigma$  on average in less than 3 hours we must have $E(N) < 3$ because:\\
$ E(N) = $ average amount of samples to detect the shift $=$ average amount of hours to detect the shift. We want it to be smaller than 3 hours. So we wish $ E(N) < 3$.\\
\\
\noindent We know from (a): $E(N) = \frac{1}{p_n}$, where $p_n$ denotes the probability that the $\overline X_i$ is out side the control limits when we work with a sample size of $n$ units. \\
\\
So we want to find the smallest $n$ such that: 
\begin{eqnarray*}
E(N) 					&= 	& \frac{1}{p_n}			 \\
							&= 	& \frac{1}{\Phi(-3-\sqrt n) + 1-\Phi(3-\sqrt n)}	\quad < \quad  3\\
							&		&		\\
							&		&	\Longleftrightarrow \\
							&		&		\\	
\frac{1}{E(N)}&= 	& \Phi(-3-\sqrt n) + 1-\Phi(3-\sqrt n)	\quad > \quad  \frac{1}{3}\\
							&		&		\\
							&		&	\Longleftrightarrow \\
							&		&		\\	
1- \frac{1}{E(N)}
							&= 	& 1 - (\Phi(-3-\sqrt n) + 1-\Phi(3-\sqrt n))	\\
							&= 	& \Phi(3-\sqrt n) - \Phi(-3-\sqrt n) \quad < \quad  \frac{2}{3}	\\
\end{eqnarray*}

\noindent We use R to calculate the smallest $n$ such that $\Phi(3-\sqrt n) - \Phi(-3-\sqrt n) \quad < \quad  \frac{2}{3}$

	\begin{verbatim}
		# opgave 2b(i)
>  pnorm(3-6^(1/2))-pnorm(-3-6^(1/2))
[1] 0.7090153
>  pnorm(3-7^(1/2))-pnorm(-3-7^(1/2))
[1] 0.6384237
	\end{verbatim}

\noindent And we conclude a minimal sample size of $7$ items is required so that a shift in the mean of size $\sigma$ would be detected on average in less than 3 hours.\\
\\
\noindent \textit{Same question if we would like to detect this change within 3 hours with 95\% confidence.}\\
\\
We want the probability to detect a shift in the mean of size $\sigma$  within 3 hours to be at least 95\%.

\begin{eqnarray*}
P(detect \ change \ within \ 3\ hours) 
			& = &  P ( \ first \ sample \ is \ out \ of \ controllimits \ )\\
			&		& + \ P ( \ second \ sample \ is \ out \ of \ controllimits \mid first\ is\ not \ ) \\
			&		& + \ P ( \ third \ sample \ is \ out \ of \ controllimits \mid first\ and\ second\ are\ not \ ) \\
			& = & 1 - P ( \ first \ three\  samples\ are\ in \ control \ )\\
			& = & 1 - (P_{in \ controllimits})^3\\			
			& = & 1 - (1 - P_{out \ of \ controllimits}) ^3\\
			& = & 1 - ( 1 - \ (\Phi(-3-\sqrt n) + 1-\Phi(3-\sqrt n)))  ^3\\	
			& = & 1 - ( \Phi(3-\sqrt n) -\Phi(-3-\sqrt n) \ )^3\\		
\end{eqnarray*}

\noindent Now we want a $95 \%$-confidence interval; so we want to find $n$ as small as possible such that: 

\begin{eqnarray*}
P(detect \ change \ within \ 3\ hours) 		& \geq & 0.95 \\
1 - (  \Phi(3-\sqrt n) -\Phi(-3-\sqrt n) )^3 
																					& \geq & 0.95 \\		
																					&		&		\\
																					&		\Longleftrightarrow & \\
																					&		&		\\					
(  \Phi(3-\sqrt n) -\Phi(-3-\sqrt n)  )^3 
																					& \leq & 0.05 \\	
\end{eqnarray*}


	\begin{verbatim}
		# opgave 2b(ii)
> (  pnorm(3-11^(1/2))-pnorm(-3-11^(1/2))  )^3
[1] 0.05305741
> (  pnorm(3-12^(1/2))-pnorm(-3-12^(1/2))  )^3
[1] 0.03316510
	\end{verbatim}

\noindent We conclude $n$ must be $12$:\\
We need a sample size of at least $12$ items to detect a shift of the mean of size $\sigma$ within 3 hours with 95\% confidence.

\subsubsection*{(c)} 
\textit{Assume you have 2 options: to sample $n_1=5$ elements every hour or to sample $n_2=10$ elements every two hours. Which would you choose? Clearly indicate the criterion of your choice ans the calculations that you used for your choice.}\\

\textbf{Option 1:}\\
\noindent From part (a) we know that the ARL (average run length) with samples of size $5$ which are taken every hour is equal to $4.5$ hours (when the mean has shifted over a distance of $\sigma$). \\

\textbf{Option 2:}\\
When we take a sample of size $10$, which is taken every two hours we can calculate in the same way as in (a) the probability of a sample to be out of controllimits:
\begin{eqnarray*}
p_n 	&=				& 1-P(LCL<\overline X_i<UCL \mid \mu = \mu_0 +\sigma)\\
			&=				&	1-P(-3-\sqrt n < Z < 3-\sqrt n)\\
			&=				& 1-\Phi(3-\sqrt{10})+\Phi(-3-\sqrt{10})\\
			&\approx	& 0.5645
\end{eqnarray*}

	\begin{verbatim}
		# opgave 2c
  # compute the out-of-control-probability and the ARL for n=5:
> (1-pnorm(3- 5^(1/2))+pnorm(-3- 5^(1/2)))
[1] 0.2224540
> (1-pnorm(3- 5^(1/2))+pnorm(-3- 5^(1/2)))^(-1)
[1] 4.495312
  # compute the out-of-control-probability for n=10:
> (1-pnorm(3-10^(1/2))+pnorm(-3-10^(1/2)))
[1] 0.5644564
	# Samples are taken every two hours, so the ARL for n=10 is:
> 2*(1-pnorm(3-10^(1/2))+pnorm(-3-10^(1/2)))^(-1)
[1] 3.543232	
\end{verbatim}
	 
\noindent In option 2 the samples are taken every two hours, so the average run length $E(N) = 2 * \frac{1}{p_n} = \frac{2}{1-\Phi(3-\sqrt{10})+\Phi(-3-\sqrt{10})} \approx 3.5$. \\
\\
\\
\noindent In option 1 the ARL is 4.5 hours and in option 2 the ARL is 3.5 hours. We want to detect changes as fast as possible. With a smaller ARL we detect out-of-control situations faster, therefore we prefer to take a sample of 10 items every two hours. We choose the second option.

\subsubsection*{(d)}
\textit{Suppose that apart from the standard out-of-control criterion ("one observation outside the control limits") we also use the following runs rule: two successice points within $\frac{\sigma}{\sqrt n}$ of the centre line. Compute the $ARL_{in-control}$ for this control chart.}\\

\noindent Let $p$ be the probability that a point of the control charts falls outside the control limits, let $q$ be the probability that a poine falls into the warning region; this means the point falls between a control limit (CL) and the nearby warning limit (WL), and let $r$ be the probability that a point is between the two warning limits (the safe region). \\
Then we have the following probabilities for $p,\,q$ and $r$:
\begin{eqnarray*}
p	&=& 			P_{point \ falls \ in \ out\ of\ control\ region}\\
	&=&				1-P(LCL < \overline X_i < UCL)\\
	&=&				1-P(\mu_0-\frac{3\sigma}{\sqrt n}<\overline X_i<\mu_0+\frac{3\sigma}{\sqrt n})\\
	&=&				1-P(\frac{\mu_0-\frac{3\sigma}{\sqrt n}-\mu_0}{\frac{\sigma}{\sqrt n}}<\frac{\overline X_i-\mu_0}{\frac{\sigma}{\sqrt n}}<\frac{\mu_0+\frac{3\sigma}{\sqrt n}-\mu_0}{\frac{\sigma}{\sqrt n}})\\
	&=&				1-P(-3<Z<3)\\
	&=&				1-(\Phi(3)-\Phi(-3))\\
	&\approx&	0.0027\\ 
\\
r	&=& 			P_{point \ falls \ in \ safe\ region}\\
	&=&				1-P(LWL < \overline X_i < UWL)\\
	&=&				1-P(\mu_0-\frac{\sigma}{\sqrt n}<\overline X_i<\mu_0+\frac{\sigma}{\sqrt n})\\
	&=&				1-P(\frac{\mu_0-\frac{\sigma}{\sqrt n}-\mu_0}{\frac{\sigma}{\sqrt n}}<\frac{\overline X_i-\mu_0}{\frac{\sigma}{\sqrt n}}<\frac{\mu_0+\frac{\sigma}{\sqrt n}-\mu_0}{\frac{\sigma}{\sqrt n}})\\
	&=&				1-P(-1<Z<1)\\
	&=&				1-(\Phi(1)-\Phi(-1))\\
	&\approx&	0.3173\\
\\
q	&=& 			P_{point \ falls \ in \ warning\ region}\\
	&=&				P(LCL < \overline X_i < LWL) + P(UWL < \overline X_i < UCL)\\
 	&=&				1-P_{point \ falls \ in \ out\ of\ control\ region}-P_{point \ falls \ in \ safe-region}\\
 	&=&				1-p-r\\
&\approx&1-0.0027-0.3173\\
&\approx&0.6800
\end{eqnarray*}

	\begin{verbatim}
		# opgave 2d
> 1-(pnorm(3)-pnorm(-3))  
[1] 0.002699796
> 1-(pnorm(1)-pnorm(-1)) 
[1] 0.3173105
> 1- (1-(pnorm(3)-pnorm(-3))) - (1-(pnorm(1)-pnorm(-1)) )
[1] 0.6799897
	\end{verbatim}

\noindent Let $L$ denote the remaining ARL after a point in the safe region and $L'$ the corresponding ARL after a point in the warning region. Then we have the following two equalities (notice $p+q+r=1$):

\begin{eqnarray*}
L		&=&	r(1+L)+q(1+L')+p*1\\
							&=& 1 + r L + q L'\\
L'	&=&	r(1+L)+q*1+p*1.\\
							&=& 1 + r L
\end{eqnarray*}

\noindent Inserting the values we found for $p,\, q$ and $r$ results in:
\begin{eqnarray*}
L 	&\approx& 1+ 0.3173 L +0.6800 L'\\
L'&\approx&1+0.3173L.
\end{eqnarray*}
This is a set of 2 equations with two unknowns, by combining them we can solve $L$ and $L'$.
\begin{eqnarray*}
L 	&\approx& 1+ 0.3173 L +0.6800 L'\\
					 	&\approx& 1+ 0.3173 L +0.6800 (1+0.3173L)\\
					 	&\approx& 1.6800 + 1.6800 * 0.3173 L\\
					 	&\approx& 1.6800 + 0.5331 L\\	
					 	&& \\	
					 	& \Rightarrow & \\
					 	&& \\
L 	&\approx& \frac{1.6800}{1-0.5331}\\
						&\approx&  	3.5738		\\
						&&\\			 				 	
L'&\approx&1+0.3173*3.5738\\
						&\approx&		2.1354\\
\end{eqnarray*}
Now we can compute the $ARL_{in-control}$:

\begin{eqnarray*}
ARL_{in-control} &=& 				p+q(L+1)+r(L'+1)=p+q+r+qL+rL' \\
								&\approx& 	1+0.6800*3.5738+0.3173*2.1354 \\
								&=&		4.1077.
\end{eqnarray*}								
								
\noindent The old $ARL_{in-control}=\frac{1}{p}\approx 370$. So as we expected a false warning is obtained a lot faster with the extra warning limits. This false alarm rate seems very high, but it is logical when you look at the probabilities. The probability of a point in the warning region is $0.68$, so two successive point here has a probability of $0.46$. While a point outside the control limits has probability $0.003$, this probability is more the 100 times smaller then the probability of two successice points in the warning regions! This explains the high false alarm rate.\\

% ------------------------------------------------------------------------------------------------------------------------------------------------
\newpage
\section*{Problem 3}

\textit{Viscosity of reactants is an important intermediate quality characteristic of a certain chemical process. A monitoring procedure must be developed in order to ensure a good quality of the end-product. Since viscosity is not a characteristic of the final product, it is not essential that viscosity has a constant level. Instead, it is essential that viscosity stays below a certain threshold. In an in-control situation the viscosity follows a normal distribution with variance 1.4 and a mean not exceeding 30.}\\

\subsubsection*{(a)}
\textit{Derive a Generalized Likelihood Ratio statistic for on-line (Phase II) monitoring
this situation.}\\[.5cm]

\noindent Suppose we have observations $X_i$ with a normal distribution with known variance: $\mathcal{N}(\mu_i, 1.4)$. \\
\noindent In the in-control situation the mean $\mu_i$ is $\leq 30$.
\begin{eqnarray*}
	H_{0,i}\ : &\mu_j\leq 30 \ \ \   for \, j=1,\ldots,i\\
	H_{1,j}\ : & \left\{
				\begin{array}{ll}
					\mu_j\leq 30 	&	for \ j=1,\ldots,k\\
					\mu_j>30			&	for \ j=k+1,\ldots,i\\
				\end{array}
							\right.\\
\end{eqnarray*}

\noindent The corresponding generalized likelihood ratio statistic is defined as follows:
\[
\Lambda_i=\max_{1\leq k<i}\frac{\prod_{j=1}^k \sup_{\theta_j\in\Theta_0}f_{\theta_j}(x_j)\prod_{j=k+1}^i \sup_{\eta_j\in\Theta_1} f_{\eta_j}(x_j)}{\prod_{j=1}^i \sup_{\theta_j\in\Theta_0}f_{\theta_j}(x_j)}=
	\max_{1\leq k<i}\frac{\prod_{j=k+1}^i \sup_{\eta_j\in\Theta_1}f_{\eta_j}(x_j)}{\prod_{j=k+1}^i \sup_{\theta_j\in\Theta_0}f_{\theta_j}(x_j)}\\
\]
It is easier to calculate the logarithm of this equation:

\begin{eqnarray*}
%\[
	\begin{array}{ll}
\log\Lambda_i 
		& = \log \left( 
						\max_{1\leq k<i} \frac{ \prod_{j=k+1}^i \sup_{\eta_j\in\Theta_1}f_{\eta_j}(x_j)}{\prod_{j=k+1}^i \sup_{\theta_j\in\Theta_0}f_{\theta_j}(x_j)}
							\right) 		\\
							\\
		& =	\max_{1\leq k<i} \ \log \left( 
						\frac{ \prod_{j=k+1}^i \sup_{\eta_j\in\Theta_1}f_{\eta_j}(x_j)}{\prod_{j=k+1}^i \sup_{\theta_j\in\Theta_0}f_{\theta_j}(x_j)}
							\right)		\\
							\\
		& =\max_{1\leq k<i} \left[\ 
						\log \left( \prod_{j=k+1}^i \sup_{\eta_j\in\Theta_1}f_{\eta_j}(x_j)\right) - \log\left(\prod_{j=k+1}^i \sup_{\theta_j\in\Theta_0}f_{\theta_j}(x_j)\right)
												\right] \\
												\\
		& =\max_{1\leq k<i} \sum_{j=k+1}^i \left[\ 
						\log ( \sup_{\eta_j\in\Theta_1}f_{\eta_j}(x_j)) - \log (\sup_{\theta_j\in\Theta_0}f_{\theta_j}(x_j))
												\right] \\		
												\\
		& =\max_{1\leq k<i} \sum_{j=k+1}^i \left[\ 
						\sup_{\eta_j\in\Theta_1} (\log f_{\eta_j}(x_j)) -  \sup_{\theta_j\in\Theta_0} (\log f_{\theta_j}(x_j))
												\right] \\	
												\\		
		& =\max_{1\leq k<i} \sum_{j=k+1}^i \left[\ 
						\sup_{\mu_j > 30} (\log f_{\mu_j}(x_j)) -  \sup_{\mu_j \leq 30} ( \log f_{\mu_j}(x_j))
												\right] 
	\end{array}
\end{eqnarray*}
				

\noindent Inserting the density function $f_{\mu_j}(x_j)= \frac{1}{\sqrt{2\pi\sigma^2}} e^{-\frac{(x_j-\mu_j)^2}{2\sigma^2}}$ results in:
%heb hieronder in de eerste regel een e-macht toegevoegd, die was je volgens mij vergeten.
\begin{eqnarray*}
		\begin{array}{ll}
\log\Lambda_i 
		& =\max_{1\leq k<i} \sum_{j=k+1}^i \left[\ 
					\sup_{\mu_j > 30} 		\left( \log\left(\frac{1}{\sqrt{2\pi\sigma^2}} e^{-\frac{(x_j-\mu_j)^2}{2\sigma^2}} \right) \right) 
				- \sup_{\mu_j \leq 30} 	\left( \log\left(\frac{1}{\sqrt{2\pi\sigma^2}} e^{-\frac{(x_j-\mu_j)^2}{2\sigma^2}} \right) \right)
											\	\right] \\
											\\		
		& =\max_{1\leq k<i} \sum_{j=k+1}^i \left[\ 
					\sup_{\mu_j > 30} 		\left( \log(\frac{1}{\sqrt{2\pi\sigma^2}}) -\frac{(x_j-\mu_j)^2}{2\sigma^2} \right) 
				- \sup_{\mu_j \leq 30} 	\left( \log(\frac{1}{\sqrt{2\pi\sigma^2}}) -\frac{(x_j-\mu_j)^2}{2\sigma^2} \right)
											\	\right] \\
											\\		
		& =\max_{1\leq k<i} \sum_{j=k+1}^i \left[\ 
					\sup_{\mu_j > 30} 		\left(  -\frac{(x_j-\mu_j)^2}{2\sigma^2} \right) 
				- \sup_{\mu_j \leq 30} 	\left(  -\frac{(x_j-\mu_j)^2}{2\sigma^2} \right)
											\	\right] \\
											\\			
		& = \max_{1\leq k<i} \sum_{j=k+1}^i \frac{1}{2\sigma^2} \left[\ 
					\sup_{\mu_j > 30} 		\left(  -(x_j-\mu_j)^2 \right) 
				- \sup_{\mu_j \leq 30} 	\left(  -(x_j-\mu_j)^2 \right)
											\	\right] \\
											\\	
		& = \max_{1\leq k<i} \sum_{j=k+1}^i \frac{1}{2\sigma^2} \left[\ 
					- (x_j -\max(x_j ; 30))^2 
				- - (x_j -\min(x_j ; 30))^2 
											\	\right] \\
											\\	
		& = \max_{1\leq k<i} \sum_{j=k+1}^i \frac{1}{2\sigma^2} \left[\ 
					- (x_j -\max(x_j ; 30))^2 
					+ (x_j -\min(x_j ; 30))^2 
											\	\right] \\
		\end{array}
\end{eqnarray*}
\\
\noindent 
We write $T_i=\log\Lambda_i$ and $W_j=\frac{1}{2\sigma^2} \left[	- (x_j -\max(x_j ; 30))^2 + (x_j -\min(x_j ; 30))^2 \	\right] $.


\begin{eqnarray*}
T_i 		& = &\max_{1\leq k<i} \sum_{j=k+1}^i W_j \\
				& &\\
T_{i+1} & =& \max_{1\leq k<i+1} \sum_{j=k+1}^{i+1} W_j\\	
				&	=& \max \left( \max_{1\leq k<i} \sum_{j=k+1}^{i+1} W_j \ ; \ \sum_{j=i+1}^{i+1} W_{j} \right)\\
				& =& \max \left( \max_{1\leq k<i} ( \sum_{j=k+1}^{i} W_j + W_{i+1}) \ ; \ W_{i+1} \right)\\
				& =& \max \left( \max_{1\leq k<i} ( \sum_{j=k+1}^{i} W_j) + W_{i+1} \ ; \ W_{i+1} \right)\\
				& =& \max \left( T_i + W_{i+1} \ ; \ W_{i+1} \right)\\
				& =& \max(T_i ; 0) + W_{i+1} 
\end{eqnarray*}
	
\noindent We have thus obtained the following recursive form for the GLR chart:
$$T_{i+1}=  \max(T_i;0) + W_{i+1}$$
\noindent Where $T_i=\log\Lambda_i$ and $W_j=\frac{1}{2\sigma^2} \left[	- (x_j -\max(x_j ; 30))^2 + (x_j -\min(x_j ; 30))^2 \	\right]$.

\subsubsection*{(b)}
\textit{Implement the procedure from (a) in R and derive critical values such that your
procedure satisfies \\ \noindent $ARL_{in-control} = 200$.}\\[.5cm]

\noindent We want to derive the critical value $h^+$ such that in an in-control situation the run length ($ARL_{in-control}$) is on average $200$ samples; i.e. our procedure gives false alarm on average after $200$ samples.

\[ \left\{
\begin{array}{rcl}
\mathrm{if}\ \Lambda_n < h^+ 			&:	& \mathrm{continue\, sampling}\\
\mathrm{if}\ \Lambda_n \geq h^+ 	&:	& \mathrm{accept} \,H_1\\
\end{array} \right.
\]

\noindent For the simulation we choose $\mu = 30$ because it gives an extreme value:\\
In an in-control situation $\mu$ is less or equal than $30$. To derive the critical value $h^+$, we are looking for the smallest $h^+$ such that on average false alarm will come after $200$ samples. In other words we are looking for the smallest $h^+$ such that $E(ARL_{in-control}) \geq 200$. When $\mu < 30$ the average $ARL_{in-control}$ (=$E(ARL_{in-control})$) will be greater than when $\mu = 30$. Therefore:

$$\max_{\mu \leq 30} h^+ \mid_{ \mu ; E(ARL) \geq 200} = h^+ \mid_{ \mu = 30 ; E(ARL) \geq 200}$$
 
$h^+ \mid_{ \mu = 30 ; E(ARL) \geq 200}$ is an extreme value: for every mean $\mu$ and for every $E(ARL) \geq 200$:

$$h^+ \mid_{ \mu = 30 ; E(ARL) = 200} \geq h^+ \mid_{ \mu \leq 30 ; E(ARL) \geq 200}$$
Therefore $h^+ \mid_{ \mu = 30 ; E(ARL) \geq 200}$ is the minimal $h^+$.\\

\noindent We are thus looking for $h^+$ 

\begin{verbatim}
# opgave 3b   

	> v <- 30
# we test different values of the critical value Hplus such that the ARL will become
 on average 200:
	> Hplus <- 7.60
#	we want 10.000 observations of the RL to calculate the ARL:
	> RL <- array(dim=c(10000,1))
	> for(i in 1:10000){
	+   j=0
	+   Tj=0
	+   while(Tj <= Hplus){
	+     j <- j + 1
	+     xj <- rnorm(1, mean=v, sd=sqrt(1.4))
	+     Wj <- -1/(2*(1.4^2))*(  (xj-max(xj,30))^2-(xj-min(xj,30))^2  )
	+     Tj <- Wj + max(0,Tj)
	+   }   
# for all i we want to post up the j:
	+   RL[i,1] <- j
	+ }
	> ARL <- mean(RL)
	> hist(RL)
	> boxplot(RL)
	> ARL
	[1] 198.1204
\end{verbatim}

\noindent We runned this program a few times, first for different values of $h^+$.
\begin{itemize}
\item for $h^+ = 5 \sigma \approx 5.92$ we get an ARL of 128
\item for $h^+ = 6 \sigma \approx 7.10$ we get an ARL of 180
\item for $h^+ = 7 \sigma \approx 8.28$ we get an ARL of 231 
\end{itemize}

\noindent When we runned this program a few times, we can see that the ARL is not really stable. For the value $h^+ = 6.5 \sigma \approx 7.69$ we get an ARL really close to 200:

\begin{itemize}
\item for $h^+ = 6.5 \sigma \approx 7.69$ we get an ARL of 203.01
\item for $h^+ = 6.5 \sigma \approx 7.69$ we get an ARL of 202.77
\item for $h^+ = 6.5 \sigma \approx 7.69$ we get an ARL of 205.93
\end{itemize}

\noindent The value $h^+ = 6.5 \sigma \approx 7.69$ seems to he a little to high, so therefore we run our program another few times with the critical value $h^+ = 7.65$:
\begin{itemize}
\item for $h^+ = 7.65$ we get an ARL of 200.43
\item for $h^+ = 7.65$ we get an ARL of 203.23
\item for $h^+ = 7.65$ we get an ARL of 198.12
\item for $h^+ = 7.65$ we get an ARL of 200.59
\end{itemize} 

\begin{figure*}[h]	
	\centering{
	\caption{Histogram}\label{fig:histogram}
 	\includegraphics[width=10cm]{opg3RLhistogram.jpg} 
 	}
	\end{figure*}

\noindent This seems to be an appropiate value for $h^+$, but we have to remark that the ARL is not really stable. 
Therefore we decided to make an histogram of the ARL, see figure \ref{fig:histogram}. The RL's seem to have a Beta-distribution.
This could be estimated in a better way, for example with a Monte Carlo simulation, but we will not do it here because we have got already an quite accurate value for $h^+$. 

\newpage
\subsubsection*{(c)}
\textit{Run your GLR control chart on the data set `viscosity.txt' and report whether
your control chart detects an out-of-control situation.}\\[.5cm]

\noindent The R-code of the program is written below. We see that from observation number 94 on we see the T-values to be above our $h^+ = 7.65$. 
Our control-chart detects an out-of-control situation after observation number 94.\\

\noindent Program:
\begin{verbatim}
> viscosity <- read.table("viscosity.txt")
> Tvalues <- array(dim=c(170,1))
> Tj <- 0
> for(j in 1:170){   
+   xj <- viscosity[j,1]  
+   Wj <- -1/(2*(1.4^2))*(  (xj-max(xj,30))^2-(xj-min(xj,30))^2  )
+   Tj <- Wj + max(0,Tj)
+   Tvalues[j,1] <- Tj   
+ }
> Tvalues
> Tvalues < 7.65
\end{verbatim}

\noindent When we run this program we find the following results: 
\newpage
\begin{verbatim}
> Tvalues
\end{verbatim}
\begin{tabular}[t]{p{2.9cm} | p{2.9cm} | p{3.2cm} | p{3.2cm} }
  \hspace{1cm} 	[,1] \hspace{1.5cm}
  [1,] -0.1473469388
  [2,] -1.2346938776
  [3,] -1.3494897959
  [4,] -0.9900255102
  [5,] -0.7634948980
  [6,]  0.0146938776
  [7,] -0.3526530612
  [8,] -1.6071683673
  [9,] -0.4114540816
 [10,] -0.0918367347
 [11,] -0.0612500000
 [12,] -1.3849234694
 [13,] -0.8173724490
 [14,] -2.4357397959
 [15,] -0.0800000000
 [16,] -0.0800000000
 [17,]  0.3922448980
 [18,] -0.2855357143
 [19,] -1.2685969388
 [20,] -0.1359438776
 [21,] -1.6458163265
 [22,] -0.0006377551
 [23,] -0.0771683673
 [24,] -0.0185969388
 [25,] -0.2066326531
 [26,]  0.5893877551
 [27,] -0.0394132653
 [28,] -0.0450000000
 [29,]  0.0587755102
 [30,]  0.0618622449
 [31,]  0.0778061224
 [32,] -1.1568877551
 [33,] -0.0663520408
 [34,] -0.4858163265
 [35,] -0.4858163265
 [36,] -0.1285969388
 [37,]  0.0716581633
 [38,] -0.2895918367
 [39,] -0.1044897959
 [40,] -1.9573724490
 [41,] -0.9601020408
 [42,]  0.0663520408
 &
 [43,] -1.6981632653
 [44,] -0.0663520408
 [45,] -0.9800000000
 [46,] -3.5492091837
 [47,] -1.2572448980
 [48,]  0.0471683673
 [49,]  0.2492346939
 [50,] -1.6381122449
 [51,]  0.0493877551
 [52,] -1.4322704082
 [53,] -0.5000000000
 [54,] -1.0204081633
 [55,] -0.7285969388
 [56,] -0.3734948980
 [57,] -0.8082653061
 [58,] -0.3432653061
 [59,] -0.0858163265
 [60,] -1.0825510204
 [61,] -0.5971683673
 [62,] -0.0516581633
 [63,]  0.0716581633
 [64,]  0.1256377551
 [65,] -0.4181377551
 [66,] -0.0637755102
 [67,] -0.0450000000
 [68,] -0.1250000000
 [69,] -0.2812500000
 [70,] -0.3257397959
 [71,]  0.6449234694
 [72,]  1.2979846939
 [73,]  1.9267857143
 [74,]  1.9369897959
 [75,]  1.9140306122
 [76,]  2.0940306122
 [77,]  0.9475000000
 [78,]  0.6768622449
 [79,]  0.6915561224
 [80,]  0.9674744898
 [81,]  1.4393112245
 [82,]  1.2880612245
 [83,]  1.9088775510
 [84,]  3.1774744898
 [85,]  3.2019897959
 &
 [86,]  3.8069897959
 [87,]  5.3382397959
 [88,]  5.3376020408
 [89,]  5.2161479592
 [90,]  6.0335204082
 [91,]  6.0175765306
 [92,]  6.6953571429
 [93,]  6.6585204082
 [94,]  8.1525000000
 [95,]  8.0696173469
 [96,]  9.8746173469
 [97,]  9.5254081633
 [98,] 11.5111479592
 [99,] 11.4130867347
[100,] 11.6433163265
[101,] 11.0769642857
[102,] 11.2882142857
[103,] 14.5212755102
[104,] 14.8079081633
[105,] 16.0651530612
[106,] 17.3681122449
[107,] 16.3980867347
[108,] 16.4104336735
[109,] 17.2647448980
[110,] 17.5097448980
[111,] 16.6832142857
[112,] 16.6841326531
[113,] 16.6510714286
[114,] 16.7004591837
[115,] 17.2294387755
[116,] 17.2303571429
[117,] 17.5795663265
[118,] 17.5465051020
[119,] 17.9451020408
[120,] 19.2251020408
[121,] 20.5512244898
[122,] 20.6657397959
[123,] 20.8959693878
[124,] 21.5572193878
[125,] 21.9064285714
[126,] 21.9043622449
[127,] 21.6977295918
[128,] 24.0089795918
&
[129,] 24.0518622449
[130,] 24.1106377551
[131,] 25.9565816327
[132,] 25.8978061224
[133,] 25.7728061224
[134,] 27.6601530612
[135,] 28.1459693878
[136,] 27.9009693878
[137,] 27.6509438776
[138,] 27.9766836735
[139,] 28.1016836735
[140,] 27.8714540816
[141,] 28.0227040816
[142,] 28.2881122449
[143,] 27.2677040816
[144,] 27.2677040816
[145,] 28.5938265306
[146,] 28.6040306122
[147,] 28.5652295918
[148,] 29.0723979592
[149,] 31.3530357143
[150,] 31.4002040816
[151,] 31.6977551020
[152,] 31.6609183673
[153,] 31.3176530612
[154,] 30.8318367347
[155,] 30.8260969388
[156,] 30.3332142857
[157,] 30.5491326531
[158,] 30.3899234694
[159,] 28.9205357143
[160,] 28.9081887755
[161,] 30.4771683673
[162,] 31.0665561224
[163,] 31.5314795918
[164,] 32.2514795918
[165,] 32.7586479592
[166,] 33.4616071429
[167,] 34.8704081633
[168,] 34.8091581633
[169,] 35.4952806122
[170,] 35.9132397959
\end{tabular}

\newpage 
\begin{verbatim}
> Tvalues < 7.65
\end{verbatim}

\begin{tabular}[t]{p{1.9cm} | p{1.9cm} | p{2.1cm} | p{2.1cm} | p{2.1cm}}
  \hspace{1cm}    [,1]  
  [1,]  TRUE
  [2,]  TRUE
  [3,]  TRUE
  [4,]  TRUE
  [5,]  TRUE
  [6,]  TRUE
  [7,]  TRUE
  [8,]  TRUE
  [9,]  TRUE
 [10,]  TRUE
 [11,]  TRUE
 [12,]  TRUE
 [13,]  TRUE
 [14,]  TRUE
 [15,]  TRUE
 [16,]  TRUE
 [17,]  TRUE
 [18,]  TRUE
 [19,]  TRUE
 [20,]  TRUE
 [21,]  TRUE
 [22,]  TRUE
 [23,]  TRUE
 [24,]  TRUE
 [25,]  TRUE
 [26,]  TRUE
 [27,]  TRUE
 [28,]  TRUE
 [29,]  TRUE
 [30,]  TRUE
 [31,]  TRUE
 [32,]  TRUE
 [33,]  TRUE
 [34,]  TRUE
 & 
 [35,]  TRUE
 [36,]  TRUE
 [37,]  TRUE
 [38,]  TRUE
 [39,]  TRUE
 [40,]  TRUE
 [41,]  TRUE
 [42,]  TRUE
 [43,]  TRUE
 [44,]  TRUE
 [45,]  TRUE
 [46,]  TRUE
 [47,]  TRUE
 [48,]  TRUE
 [49,]  TRUE
 [50,]  TRUE
 [51,]  TRUE
 [52,]  TRUE
 [53,]  TRUE
 [54,]  TRUE
 [55,]  TRUE
 [56,]  TRUE
 [57,]  TRUE
 [58,]  TRUE
 [59,]  TRUE
 [60,]  TRUE
 [61,]  TRUE
 [62,]  TRUE
 [63,]  TRUE
 [64,]  TRUE
 [65,]  TRUE
 [66,]  TRUE
 [67,]  TRUE
 [68,]  TRUE
 [69,]  TRUE
 & 
 [70,]  TRUE
 [71,]  TRUE
 [72,]  TRUE
 [73,]  TRUE
 [74,]  TRUE
 [75,]  TRUE
 [76,]  TRUE
 [77,]  TRUE
 [78,]  TRUE
 [79,]  TRUE
 [80,]  TRUE
 [81,]  TRUE
 [82,]  TRUE
 [83,]  TRUE
 [84,]  TRUE
 [85,]  TRUE
 [86,]  TRUE
 [87,]  TRUE
 [88,]  TRUE
 [89,]  TRUE
 [90,]  TRUE
 [91,]  TRUE
 [92,]  TRUE
 [93,]  TRUE
 [94,] FALSE
 [95,] FALSE
 [96,] FALSE
 [97,] FALSE
 [98,] FALSE
 [99,] FALSE
[100,] FALSE
[101,] FALSE
[102,] FALSE
[103,] FALSE
[104,] FALSE
&
[105,] FALSE
[106,] FALSE
[107,] FALSE
[108,] FALSE
[109,] FALSE
[110,] FALSE
[111,] FALSE
[112,] FALSE
[113,] FALSE
[114,] FALSE
[115,] FALSE
[116,] FALSE
[117,] FALSE
[118,] FALSE
[119,] FALSE
[120,] FALSE
[121,] FALSE
[122,] FALSE
[123,] FALSE
[124,] FALSE
[125,] FALSE
[126,] FALSE
[127,] FALSE
[128,] FALSE
[129,] FALSE
[130,] FALSE
[131,] FALSE
[132,] FALSE
[133,] FALSE
[134,] FALSE
[135,] FALSE
[136,] FALSE
[137,] FALSE
[138,] FALSE
[139,] FALSE
&
[140,] FALSE
[141,] FALSE
[142,] FALSE
[143,] FALSE
[144,] FALSE
[145,] FALSE
[146,] FALSE
[147,] FALSE
[148,] FALSE
[149,] FALSE
[150,] FALSE
[151,] FALSE
[152,] FALSE
[153,] FALSE
[154,] FALSE
[155,] FALSE
[156,] FALSE
[157,] FALSE
[158,] FALSE
[159,] FALSE
[160,] FALSE
[161,] FALSE
[162,] FALSE
[163,] FALSE
[164,] FALSE
[165,] FALSE
[166,] FALSE
[167,] FALSE
[168,] FALSE
[169,] FALSE
[170,] FALSE
\end{tabular}

\end{document}
