% PATH=/usr/local/texlive/2011/bin/x86_64-linux:$PATH 

%\documentclass[a4paper, 12pt] {article}
%\usepackage{amssymb,amsmath}
%\numberwithin{equation}{section}
%\begin{document}
\section{Log Laplace review and MLE derivation}
Log-Laplace models appear in economics and are a favorable modeling distrubution because of their power tails. Log-Laplace captures certain financial data well and the process of fitting data to the Log-Laplace is relatively straight forward. For an in-depth look at Log-Laplace, see Log-Laplace Distributions, Kozuboski and Podgorski, 2003. This section will start with some results presented in Kozuboski and Podgorski, 2003. and perform a step by step derivation of the necessary mathematics.

\subsection{Log Laplace PDF} 
The probability density function of Log Laplace:
\begin{equation} \label{eq:logLaplacePDF}
f(x) = \left\{
\begin{array}{c l}
  \frac{\alpha\beta}{\delta(\alpha+\beta)} (\frac{x}{\delta})^{\beta-1} & 0 \leq x < \delta \\
  \frac{\alpha\beta}{\delta(\alpha+\beta)} (\frac{\delta}{x})^{\alpha+1} & x \geq \delta
\end{array}
\right.
\end{equation}

The density function is described by 3 parameters: $\alpha, \beta$ are the shape parameters for the two tails, while $\delta$ is the location parameter. The rest of this chapter will show how to find the MLE estimators for these paramters.

\subsection{Estimators for Log-Laplace Parameters}
The task of finding MLE estimators is broken down into seperate cases. We start with the basic case where the location parameter $\delta$ is known, deriving estimators for $\alpha$ and $\beta$, then describe a procedure for finding the estimators when all three parameters are unknown.

\textbf{The case of known $\delta$}. The approach here is to find estimators for $\alpha$ and $\beta$. 
Suppose there is a sample $x_1, x_2, \cdots, x_n$ of n iid observations, coming from the Log-Laplace distribution described in \eqref{eq:logLaplacePDF}.
Let $x_{(1)}, x_{(2)}, \cdots , x_{(n)}$ be the sorted version of $x_1, x_2, \cdots, x_n$ such that 
$x_{(1)} \leq x_{(2)} \leq \cdots \leq x_{(n)}$. Let $x_{(r)} \leq \delta \leq x_{(r+1)}$.  
\newline 
The likelihood function for the Log-Laplace is:
\begin{equation} \label{eq:LLlikelihood}
\begin{split}
\emph{L}(\theta | x_1, x_2,\cdots, x_n)  & =  \prod_{i=1}^{n} f(x_i | \theta) = \prod_{i=1}^{n} f(x_{(i)} | \theta) \\ 
  & =\prod_{i=1}^{r} f(x_{(i)} | \theta) \prod_{i=r+1}^{n} f(x_{(i)} | \theta)   \\
  & = \prod_{i=1}^{r} \frac{\alpha\beta}{\delta(\alpha+\beta)} (\frac{x_{(i)}}{\delta})^{\beta-1} \prod_{i=r+1}^{n} \frac{\alpha\beta}{\delta(\alpha+\beta)} (\frac{\delta}{x_{(i)}})^{\alpha+1} \\
  & = (\frac{\alpha\beta}{\delta(\alpha+\beta)})^n \prod_{i=1}^{r} (\frac{x_{(i)}}{\delta})^{\beta-1} \prod_{i=r+1}^{n} (\frac{\delta}{x_{(i)}})^{\alpha+1}
\end{split}
\end{equation}
\newline
The log-likelihood function is:
\begin{equation} \label{eq:LLlogLikelihood}
\begin{split}
\ln (\emph{L}(\theta | x_1, x_2,\cdots, x_n)) = & n\ln(\frac{\alpha\beta}{\delta(\alpha+\beta)})+ (\beta-1)\sum_{i=1}^{r} \ln(\frac{x_{(i)}}{\delta}) \\ 
                                                & + (\alpha + 1)\sum_{i=r+1}^{n}(\frac{\delta}{x_{(i)}})
\end{split}
\end{equation}

The maximum likelihood estimators of $\alpha$ and $\beta$ are the values of $\alpha$ and $\beta$ that maximize equation \eqref{eq:LLlogLikelihood}, 
taking the partial derivative of \eqref{eq:LLlogLikelihood} with respect to $\alpha$ yields:
\begin{equation} \label{eq:partialB1}
\begin{split}
\frac{\partial(\ln \frac{\alpha\beta}{\delta(\alpha+\beta)})}{\partial\alpha} & = \frac{\delta(\alpha+\beta)}{\alpha\beta} \cdot  \frac{\partial( \frac{\alpha\beta}{\delta(\alpha+\beta)})}{\partial\alpha} \\
& =  \frac{\delta(\alpha+\beta)}{\alpha\beta} \cdot \frac{\delta\beta(\alpha+\beta)-\alpha\beta\delta}{(\delta(\alpha+\beta))^2}\\
& = \frac{1}{\alpha\beta} \cdot \frac{\delta\alpha\beta+\delta\beta^2-\delta\alpha\beta}{\delta(\alpha+\beta)} = \frac{\beta}{\alpha(\alpha+\beta)}
\end{split}
\end{equation}
Combining \eqref{eq:LLlogLikelihood} and \eqref{eq:partialB1}:
\begin{equation} \label{eq:derivativeA}
\frac{\partial(\ln(\emph{L}(\theta| x_1, x_2,\cdots, x_n))}{\partial\alpha} = \frac{n\beta}{\alpha(\alpha+\beta)} + \sum_{i=r+1}^{n}(\frac{\delta}{x_{(i)}})
\end{equation}
\newline
Similarly, taking the partial derivative of \eqref{eq:LLlogLikelihood} with respect to $\beta$ yields:
\begin{equation} \label{eq:partialA1}
\begin{split}
\frac{\partial(\ln \frac{\alpha\beta}{\delta(\alpha+\beta)})}{\partial\beta} & = \frac{\delta(\alpha+\beta)}{\alpha\beta} \cdot  \frac{\partial( \frac{\alpha\beta}{\delta(\alpha+\beta)})}{\partial\beta} \\
& =  \frac{\delta(\alpha+\beta)}{\alpha\beta} \cdot \frac{\delta\alpha(\alpha+\beta)-\alpha\beta\delta}{(\delta(\alpha+\beta))^2}\\
& = \frac{1}{\alpha\beta} \cdot \frac{\delta\alpha\beta+\delta\alpha^2-\delta\alpha\beta}{\delta(\alpha+\beta)} = \frac{\alpha}{\beta(\alpha+\beta)}
\end{split}
\end{equation}
Combining \eqref{eq:LLlogLikelihood} and \eqref{eq:partialA1}:
\begin{equation} \label{eq:derivativeB}
\frac{\partial(\ln(\emph{L}(\theta| x_1, x_2,\cdots, x_n))}{\partial\beta} = \frac{n\alpha}{\beta(\alpha+\beta)} + \sum_{i=1}^{r}(\frac{x_{(i)}}{\delta})
\end{equation}
\newline
Setting the partial derivatives to 0, \eqref{eq:derivativeA} and \eqref{eq:derivativeB} form 2 equations with 2 unknowns. Let $D_x =  \sum_{i=r+1}^{n}(\frac{\delta}{x_{(i)}})$ and
$X_d = \sum_{i=1}^{r}(\frac{x_{(i)}}{\delta})$, the equations are:
\begin{equation} \label{eq:system1}
\frac{n\beta}{\alpha(\alpha+\beta)} + D_x = 0 \hspace{1 in} \frac{n\alpha}{\beta(\alpha+\beta)} + X_d = 0
\end{equation}
\newline
Solving for $\beta$ in \eqref{eq:system1}:
\begin{equation} \label{eq:solveB}
\beta = -\frac{D_x\alpha^2}{n+D_x\alpha}
\end{equation}
\newline
Combining \eqref{eq:solveB} and \eqref{eq:system1} and solving for $\alpha$:
\begin{equation} \label{eq:solveA}
\begin{split}
\frac{n\alpha}{\beta(\alpha+\beta)}+X_d  &= \frac{n\alpha}{-\frac{D_x\alpha^2}{n+D_x\alpha} \cdot (\alpha - \frac{D_x\alpha^2}{n+D_x\alpha})}+X_d \\
& = \frac{n\alpha}{-\frac{D_x\alpha^2}{n+D_x\alpha} \cdot \frac{\alpha(n+D_x\alpha)-D_x\alpha^2}{n+D_x\alpha}}+X_d \\
& = \frac{n\alpha}{\frac{-\alpha n D_x \alpha^2}{(n+D_x\alpha)^2}}+X_d = -\frac{(n+D_x\alpha)^2}{D_x\alpha^2}+X_d = 0
\end{split}
\end{equation}
Rewriting \eqref{eq:solveA} into quadratic form:
\begin{equation} \label{eq:solveA2}
\alpha^2(D_x^2-X_d D_x) + \alpha(2nD_x) + n^2 = 0
\end{equation}
Solving \eqref{eq:solveA2} for the MLE estimator of $\alpha$ : $\hat{\alpha}$:
\begin{equation} \label{eq:solveA3}
\begin{split}
\hat{\alpha} & = \frac{-2nD_x \pm \sqrt{4n^2D_x^2-4(D_x^2-X_dD_x)n^2}}{2(D_x^2-X_dD_x)} \\
& = \frac{-2nD_x \pm 2n\sqrt{X_dD_x}}{2(D_x^2-X_dD_x)} = \frac{n(D_x \pm \sqrt{X_dD_x})}{(\sqrt{X_dD_x}-D_x)(\sqrt{X_dD_x}+D_x)}
\end{split}
\end{equation}
Solving for $\alpha$ in \eqref{eq:system1}:
\begin{equation} \label{eq:solveA4}
\alpha = -\frac{X_d\beta^2}{n+X_d\beta}
\end{equation}
Combining \eqref{eq:solveA4} and \eqref{eq:system1} and solving for $\beta$:
\begin{equation} \label{eq:solveB1}
\begin{split}
\frac{n\beta}{\alpha(\alpha+\beta)} + D_x &= \frac{n\beta}{-\frac{X_d\beta^2}{n+X_d\beta} \cdot (\beta- \frac{X_d\beta^2}{n+X_d\beta})} + D_x \\
& = \frac{(n+X_d\beta)^2}{-X_d\beta^2}+D_x = 0
\end{split}
\end{equation}
Rewriting \eqref{eq:solveB1} into quadratic form:
\begin{equation} \label{eq:solveB2}
\beta^2(X_d^2-D_xX_d) + 2nX_d\beta+n^2 = 0
\end{equation}
$\hat{\beta}$, the MLE estimator of $\beta$ is:
\begin{equation} \label{eq:solveB3}
\hat{\beta} = \frac{-2nX_d \pm \sqrt{4n^2X_d^2-4(X_d^2-D_xX_d)n^2}}{2(X_d^2-D_xX_d)}= \frac{-n(X_d \pm \sqrt{D_xX_d})}{2(X_d^2-D_xX_d)}
\end{equation}
\newline
The conditions for the log-laplace parameters are $\alpha > 0$ and $\beta > 0$.  The final forms of $\hat{\alpha}$ and $\hat{\beta}$ are
found by combining \eqref{eq:solveB3} and \eqref{eq:solveA3}:
\begin{equation} \label{eq:solveA4}
\hat{\alpha} = \frac{n}{D_x+\sqrt{X_dD_x}} = \frac{n}{\sum_{i=r+1}^{n}\frac{\delta}{x_{(i)}} + 
\sqrt{
	\sum_{i=1}^{r}\frac{x_{(i)}}{\delta} 
	\cdot 
	\sum_{i=r+1}^{n}
		\frac{\delta}
			 {x_{(i)}
	} %sqrt
} %frac
}
\end{equation}
\begin{equation} \label{eq:solveB4}
\hat{\beta} = \frac{n}{X_d+\sqrt{X_dD_x}} = \frac{n}
{
	\sum_{i=1}^{r}\frac{x_{(i)}}{\delta}
	+ \sqrt 
	{
		\sum_{i=1}^{r}\frac{x_{(i)}}{\delta} \cdot  \sum_{i=r+1}^{n}\frac{\delta}{x_{(i)}}
	}
}
\end{equation}
\newline
\textbf{The case of unknown $\delta$:} The "concentrated" likelihood $L_{conc}(\theta)$ is obtained by substituting the values of $\hat{\alpha}(\delta)$ and $\hat{\beta}(\delta)$ in \eqref{eq:solveA4} and \eqref{eq:solveB4} to the likelihood function:
\begin{equation} \label{eq:concentratedLikelihood}
L_{conc}(\theta) = constant - \ln(\alpha(\delta)+\beta(\delta)) - \frac{\alpha(\delta)\beta(\delta)}{\alpha(\delta)+\beta(\delta)}
\end{equation}
Note that maximizing \eqref{eq:concentratedLikelihood} is the same as minimizing 
\begin{equation} \label{eq:H}
h(\delta) = \ln(\alpha(\delta)+\beta(\delta)) + \frac{\alpha(\delta)\beta(\delta)}{\alpha(\delta)+\beta(\delta)}
\end{equation}
Now the procedure for finding unknown parameters $\alpha, \beta, \delta$ is defined: find the minimum \eqref{eq:H} for all values of $delta$, note that the possible values of $delta$ are the observed values of $x_1, x_2, \cdots, x_n$.
%\end {document}


