#LyX 2.3 created this file. For more info see http://www.lyx.org/
\lyxformat 544
\begin_document
\begin_header
\save_transient_properties true
\origin unavailable
\textclass article
\begin_preamble
\makeatletter
\@addtoreset{section}{part}
\makeatother 

\usepackage{amsmath}


%%%%%%%%%%%%%%%%%%%%%%%
%% TABLE O EQUATIONS %%
%%%%%%%%%%%%%%%%%%%%%%%

%% use tocloft for easily creating new lists of things
\usepackage{tocloft}
%% use xstring for string modification functionality
\usepackage{xstring}

%% make the @ symbol a letter, changing its category code
\makeatletter

\numberwithin{equation}{section}

%% "we use this for our refernces as well" (???)

%\AtBeginDocument{%
%  \renewcommand{\ref}[1]{%
%    \mbox{\autoref{#1}}%
%  }%
%}

%% define heading for list of equations
\newcommand{\listequationsname}{List of Equations}
%% tell tocloft what shall be numbered (myequations) and
%% under what list (equ)
\newlistof{myequations}{equ}{\listequationsname}
%% define the custom command for adding equations to the
%% list of equations
\newcommand{\myequations}[1]{%
% add an item in the equ list
\addcontentsline{equ}{myequations}{%
  \protect\numberline{\theequation}#1}%
}

%% set the width of numbers in the table of equations
%% (optional)

%\setlength{\cftmyequationsnumwidth}{3em}

%%% Redefine equations to add an entry in a list of
%%% equations to be displayed in the document somewhere.

%% store the current equation environment
\let\oldequation=\equation%
\let\endoldequation=\endequation%
\AtBeginDocument{\let\oldlabel=\label}% \AtBeginDocument because hyperref redefines \label

%% define a new label command, which removes the "eq:" part
%% in the label and add that string as label of things to
%% the list of equations, instead of the string with "eq:"
\newcommand{\mynewlabel}[1]{%
  \StrBehind{#1}{eq:}[\Str]% remove "eq:" from labels
  %% make \mynewlabel use \myequations to add an enty in the
  %% list of equations.
  \myequations{\Str}\oldlabel{#1}%
}

%% redefine the equation environment
\renewenvironment{equation}{%
  %% use the old beginning
  \oldequation%
  %% add an entry in the list of equations using a custom
  %% command
  \let\label\mynewlabel%
}{%
  %% use the old ending
  \endoldequation%
}

%% make the @ symbol an "other", changing its category code
%% back
\makeatother
\end_preamble
\use_default_options true
\maintain_unincluded_children false
\language american
\language_package default
\inputencoding auto
\fontencoding global
\font_roman "default" "default"
\font_sans "default" "default"
\font_typewriter "default" "default"
\font_math "auto" "auto"
\font_default_family default
\use_non_tex_fonts false
\font_sc false
\font_osf false
\font_sf_scale 100 100
\font_tt_scale 100 100
\use_microtype false
\use_dash_ligatures true
\graphics default
\default_output_format default
\output_sync 0
\bibtex_command default
\index_command default
\float_placement h
\paperfontsize default
\spacing other 1.2
\use_hyperref true
\pdf_title "Linear and Logistic Regression"
\pdf_author "Zelphir Kaltstahl"
\pdf_subject "machine learning"
\pdf_keywords "machine learning, data science,"
\pdf_bookmarks true
\pdf_bookmarksnumbered false
\pdf_bookmarksopen true
\pdf_bookmarksopenlevel 2
\pdf_breaklinks true
\pdf_pdfborder true
\pdf_colorlinks true
\pdf_backref section
\pdf_pdfusetitle true
\papersize a4paper
\use_geometry true
\use_package amsmath 2
\use_package amssymb 2
\use_package cancel 1
\use_package esint 1
\use_package mathdots 1
\use_package mathtools 1
\use_package mhchem 1
\use_package stackrel 1
\use_package stmaryrd 1
\use_package undertilde 1
\cite_engine basic
\cite_engine_type default
\biblio_style plain
\use_bibtopic false
\use_indices false
\paperorientation portrait
\suppress_date false
\justification true
\use_refstyle 1
\use_minted 0
\index Index
\shortcut idx
\color #008000
\end_index
\leftmargin 3cm
\topmargin 2cm
\rightmargin 3cm
\bottommargin 2cm
\headheight 1cm
\headsep 0.5cm
\footskip 1cm
\secnumdepth 1
\tocdepth 1
\paragraph_separation skip
\defskip medskip
\is_math_indent 0
\math_numbering_side default
\quotes_style danish
\dynamic_quotes 0
\papercolumns 1
\papersides 1
\paperpagestyle default
\bullet 1 0 9 -1
\bullet 2 0 0 -1
\bullet 3 0 8 -1
\tracking_changes false
\output_changes false
\html_math_output 0
\html_css_as_file 0
\html_be_strict false
\html_math_img_scale 1.2
\end_header

\begin_body

\begin_layout Standard
\begin_inset ERT
status open

\begin_layout Plain Layout


\backslash
begin{titlepage}
\end_layout

\end_inset


\end_layout

\begin_layout Title
Linear Regression and Logistic Regression
\end_layout

\begin_layout Standard
\begin_inset ERT
status open

\begin_layout Plain Layout


\backslash
end{titlepage}
\end_layout

\end_inset


\end_layout

\begin_layout Standard
\begin_inset CommandInset toc
LatexCommand tableofcontents

\end_inset


\end_layout

\begin_layout Standard
\begin_inset Newpage newpage
\end_inset


\end_layout

\begin_layout Standard
\begin_inset ERT
status open

\begin_layout Plain Layout


\backslash
listofmyequations
\end_layout

\end_inset


\end_layout

\begin_layout Standard
\begin_inset Newpage newpage
\end_inset


\end_layout

\begin_layout Part
Definitions
\end_layout

\begin_layout Section
Definitions of symbols and notation
\end_layout

\begin_layout Standard
\begin_inset FormulaMacro
\newcommand{\var}[1]{\textrm{#1}}
{\textrm{#1}}
\end_inset


\end_layout

\begin_layout Standard
\begin_inset FormulaMacro
\newcommand{\setr}{\mathbb{R}}
{\mathbb{R}}
\end_inset


\end_layout

\begin_layout Standard
\begin_inset FormulaMacro
\newcommand{\equa}[2]{#1=#2}
{#1=#2}
\end_inset


\end_layout

\begin_layout Standard
\begin_inset FormulaMacro
\newcommand{\given}[2]{#1\mid#2}
{#1\mid#2}
\end_inset


\end_layout

\begin_layout Standard
\begin_inset FormulaMacro
\newcommand{\condprob}[2]{P(\given{#1}{#2})}
{P(\given{#1}{#2})}
\end_inset


\end_layout

\begin_layout Standard
\begin_inset FormulaMacro
\newcommand{\paren}[1]{\left(#1\right)}
{\left(#1\right)}
\end_inset


\end_layout

\begin_layout Standard
\begin_inset FormulaMacro
\newcommand{\gaussian}[2]{\mathcal{N}(#1,#2)}
{\mathcal{N}(#1,#2)}
\end_inset


\end_layout

\begin_layout Standard
\begin_inset FormulaMacro
\newcommand{\deriv}[2]{\frac{\delta}{\delta#1}#2}
{\frac{\delta}{\delta#1}#2}
\end_inset


\end_layout

\begin_layout Standard
\begin_inset FormulaMacro
\newcommand{\distrib}{\textrm{distribution}}
{\textrm{distribution}}
\end_inset


\end_layout

\begin_layout Standard
\begin_inset FormulaMacro
\newcommand{\score}{\operatorname{score}}
{score}
\end_inset


\end_layout

\begin_layout Description
\begin_inset Formula $s$
\end_inset

: selection function, selects an element of a vector
\end_layout

\begin_layout Description
\begin_inset Formula $s(vec,i)$
\end_inset

: select the 
\begin_inset Formula $i$
\end_inset

-th element of the vector 
\begin_inset Formula $vec$
\end_inset


\end_layout

\begin_layout Description
\begin_inset Formula $\theta$
\end_inset

: (Greek letter lowercase 
\begin_inset Quotes ald
\end_inset

theta
\begin_inset Quotes ard
\end_inset

) vector of weights (or 
\begin_inset Quotes ald
\end_inset

parameters
\begin_inset Quotes ard
\end_inset

)
\end_layout

\begin_layout Description
\begin_inset Formula $\theta_{n}$
\end_inset

: the n-th vector of weights
\end_layout

\begin_layout Description
\begin_inset Formula $s(\theta_{n},i)$
\end_inset

: i-th element of the vector 
\begin_inset Formula $\theta_{n}$
\end_inset


\end_layout

\begin_layout Description
\begin_inset Formula $\hat{\theta}$
\end_inset

: optimal vector of weights for the model
\end_layout

\begin_layout Description
\begin_inset Formula $h_{\theta}(s(x,i))$
\end_inset

: 
\end_layout

\begin_deeper
\begin_layout Itemize
hypothesis function for weights 
\begin_inset Formula $\theta$
\end_inset


\end_layout

\begin_layout Itemize
function, which makes predictions given a data point 
\begin_inset Formula $s(x,i)$
\end_inset


\end_layout

\begin_layout Itemize
calculated by performing the matrix multiplication 
\begin_inset Formula $\theta^{T}s(x,i)$
\end_inset


\end_layout

\end_deeper
\begin_layout Description
\begin_inset Formula $x$
\end_inset

: (vector of) data points (or 
\begin_inset Quotes ald
\end_inset

samples
\begin_inset Quotes ard
\end_inset

) without labels
\end_layout

\begin_layout Description
\begin_inset Formula $s(x,i)$
\end_inset

: the i-th data point, but with 
\begin_inset Formula $s(s(x,i),0)=1$
\end_inset

, to enable having an intercept or bias, a constant addend, which determins,
 where the the fit line intersects with the y-axis
\end_layout

\begin_layout Description
\begin_inset Formula $s(x,k)$
\end_inset

: k-th feature of a data point
\end_layout

\begin_layout Description
\begin_inset Formula $y$
\end_inset

: (vector of) correct labels (or 
\begin_inset Quotes ald
\end_inset

targets
\begin_inset Quotes ard
\end_inset

, or 
\begin_inset Quotes ald
\end_inset

output variable
\begin_inset Quotes ard
\end_inset

)
\end_layout

\begin_layout Description
\begin_inset Formula $s(y,i)$
\end_inset

: the i-th data point's label
\end_layout

\begin_layout Description
\begin_inset Formula $m$
\end_inset

: number of training data points (or 
\begin_inset Quotes ald
\end_inset

training examples
\begin_inset Quotes ard
\end_inset

)
\end_layout

\begin_layout Description
\begin_inset Formula $(x,y)$
\end_inset

: training data points (or 
\begin_inset Quotes ald
\end_inset

training examples
\begin_inset Quotes ard
\end_inset

)
\end_layout

\begin_layout Description
\begin_inset Formula $(s(x,i),s(y,i))$
\end_inset

: i-th training data point
\end_layout

\begin_layout Description
\begin_inset Formula $\deriv xf$
\end_inset

: (Greek letters lowercase 
\begin_inset Quotes ald
\end_inset

delta
\begin_inset Quotes ard
\end_inset

) derivative of 
\begin_inset Formula $f$
\end_inset

 with regard to 
\begin_inset Formula $x$
\end_inset

, the notation relates to the 
\begin_inset Formula $h$
\end_inset

-method 
\begin_inset Formula $\deriv xf=\lim_{h\to0}\paren{\frac{f(x+h)-f(x)}{h}}$
\end_inset

, where 
\begin_inset Formula $h$
\end_inset

 is the difference of 2 values of 
\begin_inset Formula $x$
\end_inset

, 
\begin_inset Formula $x_{a}$
\end_inset

 and 
\begin_inset Formula $x_{b}=x_{a}+h$
\end_inset

 and that difference goes towards 
\begin_inset Formula $0$
\end_inset

, to get towards ever more precise values of the slope at 
\begin_inset Formula $x_{a}$
\end_inset

.
\end_layout

\begin_layout Description
\begin_inset Formula $L(\theta)$
\end_inset

: Likelihood, a conditional probability, see 
\begin_inset CommandInset ref
LatexCommand ref
reference "sec:Probability-and-Likelihood"
plural "false"
caps "false"
noprefix "false"

\end_inset

.
\end_layout

\begin_layout Description
\begin_inset Formula $\ell(\theta)$
\end_inset

: log-likelihood, the logarithm of the likelihood: 
\begin_inset Formula $\log(L(\theta))$
\end_inset


\end_layout

\begin_layout Description
\begin_inset Formula $\condprob A{B;v}$
\end_inset

: The probability of 
\begin_inset Formula $A$
\end_inset

 given 
\begin_inset Formula $B$
\end_inset

 is parameterized by 
\begin_inset Formula $v$
\end_inset

.
\end_layout

\begin_layout Description
\begin_inset Formula $\condprob A{B;v}\sim\distrib$
\end_inset

: The probability of 
\begin_inset Formula $A$
\end_inset

 given 
\begin_inset Formula $B$
\end_inset

 parameterized by 
\begin_inset Formula $v$
\end_inset

 is distributed as expressed by the distribution 
\begin_inset Quotes ald
\end_inset

distribution
\begin_inset Quotes ard
\end_inset

.
\end_layout

\begin_layout Section
Probability and Likelihood
\begin_inset CommandInset label
LatexCommand label
name "sec:Probability-and-Likelihood"

\end_inset


\end_layout

\begin_layout Standard
A likelihood is also a probability, but it is a conditional probability.
 With regard to machine learning, it is often used as in cases, where one
 wants to ask the following question: 
\begin_inset Quotes ald
\end_inset

How likely are we to see this data, assuming these parameters?
\begin_inset Quotes ard
\end_inset

 This is the same question as: 
\begin_inset Quotes ald
\end_inset

What is the probability to observe this data, if the generating process
 behind it follows the model, which is parameterized using these parameters?
\begin_inset Quotes ard
\end_inset

 For linear regression for example:
\end_layout

\begin_layout Standard
\begin_inset Formula 
\[
L(\theta)=\condprob y{x;\theta}
\]

\end_inset


\end_layout

\begin_layout Standard
In natural language: The likelihood of our parameters 
\begin_inset Formula $\theta$
\end_inset

 is the probability of seeing the labels 
\begin_inset Formula $y$
\end_inset

, given the feature values 
\begin_inset Formula $x$
\end_inset

, parameterized with 
\begin_inset Formula $\theta$
\end_inset

.
\end_layout

\begin_layout Standard
So the labels and feature values are fixed, and the probability is a function
 of the parameters 
\begin_inset Formula $\theta$
\end_inset

.
\end_layout

\begin_layout Section
Principle of Maximum Likelihood
\end_layout

\begin_layout Standard
Choose 
\begin_inset Formula $\theta$
\end_inset

 to maximize 
\begin_inset Formula $\equa{L(\theta)}{\condprob y{x;\theta}}$
\end_inset

:
\end_layout

\begin_layout Standard
\begin_inset Formula 
\begin{align}
\max_{\theta}\paren{L(\theta)} & = & \max_{\theta}\paren{\condprob y{x;\theta}}\nonumber \\
 & = & \max_{\theta}\paren{\prod_{i=1}^{m}\paren{\condprob{s(y,i)}{s(x,i);\theta}}}\label{eq:Principle of Maximum Likelihood}
\end{align}

\end_inset


\end_layout

\begin_layout Part
Linear Regression
\end_layout

\begin_layout Section
Introduction
\end_layout

\begin_layout Standard
Linear regression is a 
\emph on
parametric learning algorithm
\emph default
.
 Parametric learning algorithms are algorithms, which have a fixed set of
 parameters, which are changed during a learning phase, to fit a model to
 some data.
 In contrast to 
\emph on
non-parametric learning algorithms
\emph default
 the number of parameters does not grow with the number of training data
 points.
\end_layout

\begin_layout Section
Cost Function
\end_layout

\begin_layout Standard
To get a measure of error we would make, if we used a vector of weights
 
\begin_inset Formula $\theta_{n}$
\end_inset

 to predict the values of 
\begin_inset Formula $y$
\end_inset

 given the 
\begin_inset Formula $x$
\end_inset

, we calculate the difference between the predicted values of 
\begin_inset Formula $y$
\end_inset

 and the correct values in the given data set.
 This is done for all data points and the differences are added up, so that
 an error in prediction influences the measure, no matter for which training
 data point the error in prediction is made.
\end_layout

\begin_layout Standard
However, not a simple sum is used, but the sum of the squared errors.
 This has the effect, that all summands are positive, only ever increasing
 the sum of errors and that bigger errors weigh more than small errors.
 Then the mean is taken of that sum of squared errors.
 This is often called 
\emph on
MSE
\emph default
, which is the abbreviation for 
\emph on
mean squared error
\emph default
.
\end_layout

\begin_layout Standard
\begin_inset Formula 
\begin{equation}
\frac{1}{m}\sum_{i=0}^{m}\paren{(h_{\theta}(s(x,i))-s(y,i)){}^{2}}\label{eq:Mean Squared Error (MSE)}
\end{equation}

\end_inset


\end_layout

\begin_layout Standard
This is not yet the final form of the cost function.
 A few modifications should be made to the measure, before defining it as
 the cost function.
\end_layout

\begin_layout Standard
The factor 
\begin_inset Formula $\frac{1}{m}$
\end_inset

 in front of the sum always divides by a constant 
\begin_inset Formula $m$
\end_inset

, which depends on the training data set.
\end_layout

\begin_layout Subsection
Why use squared error?
\end_layout

\begin_layout Standard
If the sum of the plain differences was used errors could be positive or
 negative, depending on whether the predicted value is above or below the
 value in the actual data set.
 Such positive and negative errors could make up for each other, resulting
 in a sum, which is close to zero, misleading to think, that the chosen
 weights are good, while they actually cause big errors in prediction.
 It is therefore important for minimization of error, that all summed values
 are positive numbers.
\end_layout

\begin_layout Standard
One could think, that one could use the absolute errors instead of the squared
 errors.
 That is sometimes done as well.
 It is not affected as much by outliers, because error terms are not squared
\begin_inset Foot
status open

\begin_layout Plain Layout
Big errors become even bigger in comparison to small errors, when squared.
\end_layout

\end_inset

.
 When errors are not squared, it is easier for multiple small errors to
 adding up to make the measure of quality look just as bad as few big errors,
 as they will be averaged later.
 However, using the absolute error makes the mathematics required later
 more difficult
\begin_inset Foot
status open

\begin_layout Plain Layout
There are more reasons for using squared errors instead of absolute errors,
 which I did not personally understand in detail and will not go into in
 this document, unless I understand them well one day.
\end_layout

\end_inset

.
\end_layout

\begin_layout Standard
A more detailed reason is, that the mean squared error, given a few specific
 assumptions, follow naturally from maximizing the likelihood of 
\begin_inset Formula $\theta$
\end_inset

.
 For more information see 
\begin_inset CommandInset ref
LatexCommand ref
reference "sec:Maximum-Likelihood"
plural "false"
caps "false"
noprefix "false"

\end_inset

.
\end_layout

\begin_layout Subsection
Cost Function Definition and Minimization
\end_layout

\begin_layout Standard
To find the weights 
\begin_inset Formula $\hat{\theta}$
\end_inset

, which minimize the cost (mean squared error), giving the best predictions,
 making the best model for the training data from which the model learns,
 we try to minimize the costs by changing the weights 
\begin_inset Formula $\theta$
\end_inset

.
 This is mathematically written as follows:
\end_layout

\begin_layout Standard
\begin_inset Formula 
\[
\min_{\theta}J(\theta)
\]

\end_inset


\end_layout

\begin_layout Standard
In natural language: 
\emph on
Minimize the value of 
\begin_inset Formula $J$
\end_inset

 by trying values for the weights 
\begin_inset Formula $\theta$
\end_inset

.
\end_layout

\begin_layout Standard
Using mean squared errors as cost function 
\begin_inset Formula $J(\theta)$
\end_inset

:
\end_layout

\begin_layout Standard
\begin_inset Formula 
\[
\min_{\theta}\paren{\frac{1}{m}\sum_{i=0}^{m}\left((h_{\theta}(s(x,i))-s(y,i))^{2}\right)}
\]

\end_inset


\end_layout

\begin_layout Standard
For minimization, it does not matter, if we add a constant positive factor
 in front of the term one minimizes, because the factor will be applied
 to all resulting terms equally and will not change the sign: 
\begin_inset Formula $\min_{\theta}(f(\theta,x))=\min_{\theta}(k\cdot f(\theta,x))$
\end_inset

, where 
\begin_inset Formula $k\in\setr\wedge k>0$
\end_inset

.
 This allows us to simplify the math later on, by adding a constant factor
 
\begin_inset Formula $\frac{1}{2}$
\end_inset

 in front of the minimized term:
\end_layout

\begin_layout Standard
\begin_inset Formula 
\[
\min_{\theta}\paren{\frac{1}{2}\cdot\frac{1}{m}\cdot\sum_{i=0}^{m}\left((h_{\theta}(s(x,i))-s(y,i))^{2}\right)}
\]

\end_inset


\end_layout

\begin_layout Standard
Simplified:
\end_layout

\begin_layout Standard
\begin_inset Formula 
\[
\min_{\theta}\paren{\frac{1}{2m}\cdot\sum_{i=0}^{m}\left((h_{\theta}(s(x,i))-s(y,i))^{2}\right)}
\]

\end_inset


\end_layout

\begin_layout Standard
Now we are ready to define the cost function 
\begin_inset Formula $J(\theta)$
\end_inset

 in its final form:
\begin_inset Foot
status open

\begin_layout Plain Layout
For convenience reasons, we will not write 
\begin_inset Formula $J$
\end_inset

 as dependent on 
\begin_inset Formula $x$
\end_inset

 and 
\begin_inset Formula $y$
\end_inset

.
 We will write only 
\begin_inset Formula $J(\theta)$
\end_inset

 instead of 
\begin_inset Formula $J(\theta,x,y)$
\end_inset

.
 
\begin_inset Formula $J$
\end_inset

 will be derived with regard to 
\begin_inset Formula $\theta$
\end_inset

 and not with regard to 
\begin_inset Formula $x$
\end_inset

 or 
\begin_inset Formula $y$
\end_inset

 anyway.
\end_layout

\end_inset

 as follows (equal to the above):
\end_layout

\begin_layout Standard
\begin_inset Formula 
\begin{equation}
J(\theta)=\frac{1}{2m}\sum_{i=0}^{m}\paren{(h_{\theta}(s(x,i))-s(y,i))^{2}}\label{eq:Cost Function J}
\end{equation}

\end_inset


\end_layout

\begin_layout Section
(Batch) Gradient Descent
\end_layout

\begin_layout Standard
Now, that we defined the cost function 
\begin_inset Formula $J$
\end_inset

, we need a way to systematically change the values of the weights vector
 
\begin_inset Formula $\theta$
\end_inset

.
 We could select random values repeatedly, but we would have no guarantee
 to make progress that way, so we need something better than random values.
\end_layout

\begin_layout Standard
Gradient descent is an iterative algorithm, which will be used for minimizing
 the cost function.
 This version is often called 
\begin_inset Quotes ald
\end_inset

batch gradient descent
\begin_inset Quotes ard
\end_inset

, because it looks at all training data points in each iteration and not
 only a subset of the training data points.
\end_layout

\begin_layout Subsection
Algorithm
\end_layout

\begin_layout Enumerate
Choose an 
\begin_inset Formula $\epsilon$
\end_inset

 as tolerance.
\end_layout

\begin_layout Enumerate
Choose an initial weights vector 
\begin_inset Formula $\theta_{0}$
\end_inset

.
\end_layout

\begin_layout Enumerate
Repeat until the following stop criteria is met: 
\begin_inset Formula 
\begin{equation}
\left|J(\theta_{n+1})-J(\theta_{n})\right|<\epsilon\label{eq:Gradient Descent Stop Criteria}
\end{equation}

\end_inset


\end_layout

\begin_deeper
\begin_layout Enumerate
Simultaneously
\begin_inset Foot
status open

\begin_layout Plain Layout
\begin_inset Quotes ald
\end_inset

Simultaneously
\begin_inset Quotes ard
\end_inset

 means, that all elements in 
\begin_inset Formula $\theta_{n}$
\end_inset

 are updated at the same moment, after calculating each respective derivative.
 None of the updated weights take part in calculating the partial derivative
 of another updated weight.
 All updated weights are based on non-updated weights exclusively.
\end_layout

\end_inset

 update all elements in 
\begin_inset Formula $\theta_{n}$
\end_inset

 by updating each weight with the respective partial derivative of the cost
 function as follows: 
\begin_inset Formula 
\begin{equation}
\theta_{n+1}=\theta_{n}-\alpha\frac{\delta}{\delta\theta_{n}}J\label{eq:Gradient Descent Weights Update Formula}
\end{equation}

\end_inset


\end_layout

\end_deeper
\begin_layout Subsubsection
Explanation of the weights vector update formula
\end_layout

\begin_layout Standard
To calculate the new weight, we want to take advantage of the progress made
 towards the minimum costs.
 So we change the old values of 
\begin_inset Formula $\theta_{n}$
\end_inset

 instead of choosing independent new values for 
\begin_inset Formula $\theta_{n+1}$
\end_inset

.
 In the update formula this is represented by substracting from 
\begin_inset Formula $\theta_{n}$
\end_inset

 on the right hand side of the update formula.
\end_layout

\begin_layout Standard
There is also the derivative of the cost function 
\begin_inset Formula $\frac{\delta}{\delta\theta_{n}}J$
\end_inset

 in the update formula.
 The first derivative is the function, which gives the slope of the cost
 function.
 To get to a (local, potentially global) minimum of the cost function, we
 want to follow the slope towards lower cost values, until the values are
 not getting much lower any longer.
 The slope depends on all weights in the weights vector 
\begin_inset Formula $\theta_{n}$
\end_inset

.
 They tell us how much the slope depends on the feature values of a data
 points.
 They are the numbers for how much each feature weighs in.
 As such, an 
\begin_inset Formula $s(\theta_{n},k)$
\end_inset

 determins, how much the value of the function 
\begin_inset Formula $J$
\end_inset

 changes, when changing the corresponding 
\begin_inset Formula $s(s(x,i),k)$
\end_inset

 (i-th data point, k-th feature).
 This means, that the weight 
\begin_inset Formula $s(\theta_{n},k)$
\end_inset

 determins, how much the cost function 
\begin_inset Formula $J$
\end_inset

 is sloped in the direction of feature 
\begin_inset Formula $k$
\end_inset

.
\end_layout

\begin_layout Standard
Then there is the 
\begin_inset Formula $\alpha$
\end_inset

 as a factor for the derivative in the update formula.
 It is also known as the 
\emph on
step width
\emph default
, or 
\emph on
learning rate
\emph default
.
 It serves as a changing factor, for adjusting how much we move in the direction
 of the slope.
 This will help to prevent overshooting the minimum while moving along the
 slope.
 Initially we might want to make big steps towards the minimum, to make
 quick progress.
 When we almost arrived at the minimum, we want to make only small steps,
 to make sure, we are not overshooting the minimum, moving further away
 from the minimum, than we were before.
 If one wanted to work without an 
\begin_inset Formula $\alpha$
\end_inset

 one could set 
\begin_inset Formula $\alpha$
\end_inset

 to the neutral element of multiplication, which is 1
\begin_inset Foot
status open

\begin_layout Plain Layout
I am not sure whether this would even work.
 Probably one would almost always overshoot the cost minimum, while trying
 to optimize the weights.
\end_layout

\end_inset

 and it would have no effect.
 It only adds flexibility to the algorithm, by allowing a learning rate
 not equal to 1.
\end_layout

\begin_layout Subsection
Derivation of the cost function
\end_layout

\begin_layout Standard
Having an intuition for the cost function, we still face the task of determining
, what that derivative of the cost function is.
 Once this is expressed in operations easily performed by a computer, one
 can go about implementing the optimization.
\end_layout

\begin_layout Standard
The math involved with finding the derivative of the cost function is the
 reason, why we previously preprended an unexplained factor of 
\begin_inset Formula $\frac{1}{2}$
\end_inset

 to the cost function.
 The cost function contains a squaring, which will compliment the factor
 in the derivative.
 To find the derivative, we will look at the case of only having one training
 data point and then generalize.
\end_layout

\begin_layout Standard
The derivative then looks as follows:
\end_layout

\begin_layout Standard
\begin_inset Formula 
\[
\deriv{\theta_{n}}{J(\theta)}=\deriv{\theta_{n}}{\frac{1}{2m}(h_{\theta}(x)-y)^{2}}
\]

\end_inset


\end_layout

\begin_layout Standard
The partial derivative with respect to the one available data point.
 There is no need for indices for 
\begin_inset Formula $x$
\end_inset

 and 
\begin_inset Formula $y$
\end_inset

 because there is only one data point.
 Also 
\begin_inset Formula $m$
\end_inset

 is 1, but lets not simplify that, as we want to generalize to many data
 points later on and will need the 
\begin_inset Formula $m$
\end_inset

.
\end_layout

\begin_layout Standard
The structure of the term to derive calls for the so called 
\emph on
chain rule
\emph default
: 
\end_layout

\begin_layout Standard
If 
\begin_inset Formula $f(x)=p(q(x))$
\end_inset

, where 
\begin_inset Formula $q$
\end_inset

 is the inner function and 
\begin_inset Formula $p$
\end_inset

 is the outer function, then 
\begin_inset Formula $\frac{\delta}{\delta x}f=\frac{\delta}{\delta x}p(q(x))\cdot\frac{\delta}{\delta x}q(x)$
\end_inset

.
\end_layout

\begin_layout Standard
For example: 
\begin_inset Formula $f(x)=(x^{a}-b)^{n}$
\end_inset

 then 
\begin_inset Formula $u(x)=v^{n}$
\end_inset

, where 
\begin_inset Formula $v$
\end_inset

 is seen as a kind of blackbox, which depends on 
\begin_inset Formula $x$
\end_inset

, and 
\begin_inset Formula $v(x)=x^{a}-b$
\end_inset

 and the derivative is 
\begin_inset Formula $\deriv xf=\deriv x{u(v(x))}\cdot\deriv x{v(x)}$
\end_inset

.
 
\end_layout

\begin_layout Standard
\begin_inset Formula $u$
\end_inset

 is the 
\emph on
outer function
\emph default
 and 
\begin_inset Formula $v$
\end_inset

 is the 
\emph on
inner function
\emph default
.
 We need 
\begin_inset Formula $\deriv xu$
\end_inset

 and 
\begin_inset Formula $\deriv xv$
\end_inset

 in addition to 
\begin_inset Formula $u$
\end_inset

 and 
\begin_inset Formula $v$
\end_inset

 to write down the derivative.
 In our example we have the term:
\end_layout

\begin_layout Standard
\begin_inset Formula 
\[
\frac{1}{2m}(h_{\theta}(x)-y)^{2}
\]

\end_inset


\end_layout

\begin_layout Standard
The outer function is 
\begin_inset Formula $u(v)=\frac{1}{2m}(v)^{2}$
\end_inset

 and the inner function is 
\begin_inset Formula $v(\theta,x)=h_{\theta}(x)-y$
\end_inset

.
\end_layout

\begin_layout Standard
Since 
\begin_inset Formula $\theta$
\end_inset

 is not only one variable, but actually a vector, a partial derivative needs
 to be calculated with respect to each of the elements of 
\begin_inset Formula $\theta$
\end_inset

.
 This is also what we need to perform the update of the single elements
 of the weights vector.
\end_layout

\begin_layout Standard
\begin_inset Formula 
\[
\frac{\delta}{\delta s(\theta,i)}u(v)=\frac{\delta}{\delta s(\theta,i)}\frac{1}{2m}(v)^{2}
\]

\end_inset


\end_layout

\begin_layout Standard
According to the 
\emph on
power rule
\emph default
 for derivation: 
\begin_inset Formula $f(x)=ax^{n}$
\end_inset

 then 
\begin_inset Formula $\deriv xf=nax^{n-1}$
\end_inset

.
 In other words: Multiply by the exponent and substract 1 from the exponent:
\end_layout

\begin_layout Standard
\begin_inset Formula 
\[
\begin{aligned}\deriv{s(\theta,i)}{u(v)} & = & \deriv{s(\theta,i)}{\frac{1}{2m}(v)^{2}}\\
 & = & 2\cdot\frac{1}{2m}v\\
 & = & \frac{2}{2m}v\\
 & = & \frac{1}{m}v
\end{aligned}
\]

\end_inset


\end_layout

\begin_layout Standard
\begin_inset Formula $v$
\end_inset

 is again seen as a blackbox, dependent on 
\begin_inset Formula $s(\theta,i)$
\end_inset

 in the above transformations.
 
\end_layout

\begin_layout Standard
Now the derivative of 
\begin_inset Formula $v(\theta,x)=h_{\theta}(x)-y$
\end_inset

:
\end_layout

\begin_layout Standard
\begin_inset Formula 
\[
\frac{\delta}{\delta s(\theta,i)}v(x,\theta)=\frac{\delta}{\delta s(\theta,i)}(h_{\theta}(x)-y)
\]

\end_inset


\end_layout

\begin_layout Standard
Substituting what 
\begin_inset Formula $h_{\theta}(x)$
\end_inset

 actually looks like:
\end_layout

\begin_layout Standard
\begin_inset Formula 
\[
\frac{\delta}{\delta s(\theta,i)}(h_{\theta}(x)-y)=\frac{\delta}{\delta s(\theta,i)}((s(\theta,0)\cdot s(x,0)+s(\theta,1)\cdot s(x,1)\ldots+s(\theta,n)\cdot s(x,n))-y)
\]

\end_inset


\end_layout

\begin_layout Standard
Taking partial derivative with respect to 
\begin_inset Formula $s(\theta,i)$
\end_inset

 will cause all terms of 
\begin_inset Formula $h_{\theta}(x)$
\end_inset

 without 
\begin_inset Formula $s(\theta,i)$
\end_inset

 part to disappear, because they do not depend on 
\begin_inset Formula $s(\theta,i)$
\end_inset

:
\end_layout

\begin_layout Standard
\begin_inset Formula 
\[
\begin{aligned}\deriv{s(\theta,i)}v & = & \frac{\delta}{\delta s(\theta,i)}((s(\theta,0)\cdot s(x,0)+s(\theta,1)\cdot s(x,1)\ldots+s(\theta,n)\cdot s(x,n))-y)\\
 & = & \frac{\delta}{\delta s(\theta,i)}s(\theta,i)\cdot s(x,i)\\
 & = & s(x,i)
\end{aligned}
\]

\end_inset


\end_layout

\begin_layout Standard
So now we have 
\begin_inset Formula $u$
\end_inset

, 
\begin_inset Formula $v$
\end_inset

, 
\begin_inset Formula $\deriv{s(\theta,i)}u$
\end_inset

 and 
\begin_inset Formula $\deriv{s(\theta,i)}v$
\end_inset

:
\end_layout

\begin_layout Standard
\begin_inset Formula 
\[
\begin{aligned}u & = & \frac{1}{2m}(v)^{2}\\
\deriv{s(\theta,i)}u & = & \frac{1}{m}v\\
v & = & h_{\theta}(x)-y\\
\deriv{s(\theta,i)}v & = & x_{i}
\end{aligned}
\]

\end_inset


\end_layout

\begin_layout Standard
Now we need to put the pieces together according to the chain rule.
 In case of the cost function 
\begin_inset Formula $J$
\end_inset

 this means that the derivative 
\begin_inset Formula $\deriv{s(\theta,i)}J$
\end_inset

 is 
\begin_inset Formula $\deriv{s(\theta,i)}{u(v(\theta))}\cdot\deriv{s(\theta,i)}{v(\theta)}$
\end_inset

 :
\end_layout

\begin_layout Standard
\begin_inset Formula 
\[
\begin{aligned}\deriv{s(\theta,i)}{J(\theta)} & = & \deriv{s(\theta,i)}{\frac{1}{2m}(h_{\theta}(x)-y)^{2}}\\
 & = & \deriv{s(\theta,i)}{u(v(x))}\cdot\deriv{s(\theta,i)}{v(x)}\\
 & = & \frac{1}{m}(h_{\theta}(x)-y)\cdot x_{i}
\end{aligned}
\]

\end_inset


\end_layout

\begin_layout Standard
So the 
\begin_inset Formula $\theta_{n+1}$
\end_inset

according to the update formula of gradient descent is then:
\end_layout

\begin_layout Standard
\begin_inset Formula 
\[
\begin{aligned}\theta_{n+1} & = & \theta_{n}-\alpha\deriv{\theta_{n}}{J(\theta)}\\
 & = & \theta_{n}-\alpha\left(\frac{1}{m}(h_{\theta}(x)-y)\cdot x_{i}\right)
\end{aligned}
\]

\end_inset


\end_layout

\begin_layout Section
Stochastic Gradient Descent
\end_layout

\begin_layout Standard
Stochastic gradient descent (SGD) is also called incremental gradient descent
 and is a slightly modified version of batch gradient descent.
 Its main idea as opposed to batch gradient descent is, that one only uses
 a subset, a so called 
\emph on
mini-batch
\emph default
, of training data points to update the weights.
 The rest of the algorithm stays the same.
\end_layout

\begin_layout Enumerate
Choose a size of the mini-batches 
\begin_inset Formula $b$
\end_inset

, the number of data points contained in the mini-batch at each iteration.
\end_layout

\begin_layout Enumerate
Choose an 
\begin_inset Formula $\epsilon$
\end_inset

 as tolerance.
\end_layout

\begin_layout Enumerate
Choose an initial weights vector 
\begin_inset Formula $\theta_{0}$
\end_inset

.
\end_layout

\begin_layout Enumerate
Repeat until the following stop criteria is met: 
\begin_inset Formula 
\begin{equation}
\left|J_{B}(\theta_{n+1})-J_{B}(\theta_{n})\right|<\epsilon\label{eq:Stochastic Gradient Descent Stop Criteria}
\end{equation}

\end_inset


\end_layout

\begin_deeper
\begin_layout Enumerate
Select 
\begin_inset Formula $b$
\end_inset

 random unique indices into the training data set.
 All data points at these indices will make the next mini-batch.
\end_layout

\begin_layout Enumerate
Simultaneously
\begin_inset Foot
status open

\begin_layout Plain Layout
\begin_inset Quotes ald
\end_inset

Simultaneously
\begin_inset Quotes ard
\end_inset

 means, that all elements in 
\begin_inset Formula $\theta_{n}$
\end_inset

 are updated at the same moment, after calculating each respective derivative.
 None of the updated weights take part in calculating the partial derivative
 of another updated weight.
 All updated weights are based on non-updated weights exclusively.
\end_layout

\end_inset

 update all elements in 
\begin_inset Formula $\theta_{n}$
\end_inset

 by updating each weight with the respective partial derivative of the cost
 function as follows: 
\begin_inset Formula 
\begin{equation}
\theta_{n+1}=\theta_{n}-\alpha\frac{\delta}{\delta\theta_{n}}J_{B}\label{eq:Stochastic Gradient Descent Weights Update Formula}
\end{equation}

\end_inset

where 
\begin_inset Formula $J_{B}$
\end_inset

 is the cost function for the current mini-batch:
\begin_inset Formula 
\[
J_{B}(\theta)=\frac{1}{2b}\sum_{i=0}^{b}\paren{(h_{\theta}(s(x_{b},i))-s(y_{b},i))^{2}}
\]

\end_inset

with 
\begin_inset Formula $b$
\end_inset

 being the mini-batch size, 
\begin_inset Formula $x_{b}$
\end_inset

 training data points of the mini-batch and 
\begin_inset Formula $y_{b}$
\end_inset

 the labels of the mini-batch.
\end_layout

\end_deeper
\begin_layout Standard
This algorithm is especially useful for training data sets with many data
 points, for which batch gradient descent might be a too slow.
\end_layout

\begin_layout Section
Predictions
\end_layout

\begin_layout Standard
After optimizing the weights for minimized costs, meaning minimized error
 in prediction for values in the training data set, we can make predictions
 for completely new data points.
 This is simply done by using the optimized weights in the hypothesis function:
\end_layout

\begin_layout Standard
\begin_inset Formula 
\[
h_{\hat{\theta}}(x_{\var{new}})=\theta^{T}x_{\var{new}}=y_{\var{pred}}
\]

\end_inset


\end_layout

\begin_layout Section
Maximum likelihood and MSE
\begin_inset CommandInset label
LatexCommand label
name "sec:Maximum-Likelihood"

\end_inset


\end_layout

\begin_layout Standard
In this section, we will explore maximum likelihood and how it is connected
 to mean squared error.
\end_layout

\begin_layout Standard
Maximum likelihood is a simple idea, but at the same time a bit of a brain
 twister.
 The whole point of linear regression is to find weights 
\begin_inset Formula $\hat{\theta}$
\end_inset

, which give optimal predictions.
 The weights used in the linear formula for making predictions should simulate
 the real generating process behind the observed as precisely as possible,
 to give the predictions, which are close to what will be the real outcome
 for values of the features in the training data set.
\end_layout

\begin_layout Standard
This means finding the weights, for which the observations, the training
 data set, seem most likely to happen, because the weights get closest to
 the actual process generating the training data.
 As a mathematical formula:
\end_layout

\begin_layout Standard
\begin_inset Formula 
\begin{equation}
\max_{\theta}\paren{L(\theta)}=\max_{\theta}\paren{\prod_{i=1}^{m}\paren{\condprob{s(y,i)}{s(x,i);\theta}}}\label{eq:Likelihood}
\end{equation}

\end_inset


\end_layout

\begin_layout Standard
In natural language: 
\emph on
The likelihood of the weights 
\begin_inset Formula $\theta$
\end_inset

 is the poduct of all probabilities of labels 
\begin_inset Formula $s(y,i)$
\end_inset

 in the training data set, given the feature values 
\begin_inset Formula $s(x,i)$
\end_inset

, parameterized by 
\begin_inset Formula $\theta$
\end_inset

, where 
\begin_inset Formula $P$
\end_inset

 is calculated according to the error distribution assumed by the model
 and cost function.
 This likelihood is maximized by trying values for the weights 
\begin_inset Formula $\theta$
\end_inset

.
\end_layout

\begin_layout Standard
Lets make some assumptions.
\end_layout

\begin_layout Enumerate
The label 
\begin_inset Formula $s(y,i)$
\end_inset

 can be represented by a linear function of the features 
\begin_inset Formula $s(x,i)$
\end_inset

, plus some kind of epsilon 
\begin_inset Formula $s(\epsilon,i)$
\end_inset

, called 
\emph on
error term
\emph default
: 
\begin_inset Formula $s(y,i)=\theta^{T}s(x,i)+s(\epsilon,i)$
\end_inset

.
 Such error term 
\begin_inset Formula $s(\epsilon,i)$
\end_inset

 can model, deviations (errors in prediction) from that linear function.
 Reasons for such deviations could be for example:
\end_layout

\begin_deeper
\begin_layout Enumerate
There are more features of the observed objects, which were not captured
 in the data set, which cause slight errors to occur, when the predictions
 are made without them in the features of the data set.
 
\end_layout

\begin_layout Enumerate
The function, which is actually produced the observations captured in the
 data set, is not linear, so using a linear function for prediction will
 most likely result in errors.
\end_layout

\end_deeper
\begin_layout Enumerate
The errors made up for by the error term 
\begin_inset Formula $s(\epsilon,i)$
\end_inset

 are distributed in a Gaußian distribution
\begin_inset Foot
status open

\begin_layout Plain Layout
Why assume Gaußian? Apparently when looking at many linear regression problems,
 the error is in fact distributed like this and that makes it a good assumption
 and the other reason is, that it seems to make the involved math easier
 (source: Andrew Ng Stanford Machine Learning lecture).
 So really no other reason than counting cases and math.
\end_layout

\end_inset

, meaning, that bigger errors are less likely than smaller errors: 
\begin_inset Formula $s(\epsilon,i)\sim\mathcal{N}(\mu,\sigma²)$
\end_inset

 with 
\begin_inset Formula $\mu=0$
\end_inset

.
 Then the probability for an 
\begin_inset Formula $s(\epsilon,i)$
\end_inset

 is given by 
\begin_inset Formula $s(\epsilon,i)=\frac{1}{\sqrt{2\pi}\sigma}\exp\paren{-\frac{s(\epsilon,i)\text{²}}{2\sigma\text{²}}}$
\end_inset

.
 Then 
\begin_inset Formula $\equa{P(s(y,i)\mid s(x,i);\theta)}{\frac{1}{\sqrt{2\pi}\sigma}\exp\paren{-\frac{(s(y,i)-\theta^{T}s(x,i))\text{²}}{2\sigma\text{²}}}}$
\end_inset

 (inserting 
\begin_inset Formula $s(y,i)$
\end_inset

 into the probability function, 
\begin_inset Formula $\theta$
\end_inset

 is not seen as a random variable, only as an unknown, spoken as 
\begin_inset Quotes ald
\end_inset

parameterized by 
\begin_inset Formula $\theta$
\end_inset


\begin_inset Quotes ard
\end_inset

), which is the same statement as 
\begin_inset Formula $\given{s(y,i)}{s(x,i);\theta}\sim\gaussian{\theta^{T}s(x,i)}{\sigma²}$
\end_inset

.
\end_layout

\begin_layout Enumerate
The 
\begin_inset Formula $s(\epsilon,i)$
\end_inset

 are independently but identically distributed (they are IID).
\begin_inset Foot
status open

\begin_layout Plain Layout
TODO: I am not sure, why this assumption is important.
 It was only mentioned in the lecture I extracted this knowledge from, so
 I guess it is important.
 I mean, the assumption of identically distributed makes sense, because
 otherwise one cannot easily write down a formula for the probability distributi
on, but why does it need to be independently distributed?
\end_layout

\end_inset


\end_layout

\begin_layout Standard
With these assumptions in place proceed.
\end_layout

\begin_layout Standard
Then the likelihood of 
\begin_inset Formula $\theta$
\end_inset

 is written as 
\begin_inset Formula $L(\theta)$
\end_inset

, where 
\begin_inset Formula $\equa{L(\theta)}{\condprob y{x;\theta}}$
\end_inset

, the probability of seeing the labels 
\begin_inset Formula $y$
\end_inset

, given the features of the data points 
\begin_inset Formula $x$
\end_inset

, parameterized by the weights 
\begin_inset Formula $\theta$
\end_inset

.
 
\end_layout

\begin_layout Standard
If one calculates the probabilities for all training data points, those
 independent probabilities need to be multiplied together
\begin_inset Foot
status open

\begin_layout Plain Layout
Adding them all up makes no sense and could result in probabilities 
\begin_inset Formula $>1$
\end_inset

.
\end_layout

\end_inset

.
 So the likelihood of 
\begin_inset Formula $\theta$
\end_inset

, which is the probability of the data parameterized by 
\begin_inset Formula $\theta$
\end_inset

, is given as follows:
\end_layout

\begin_layout Standard
\begin_inset Formula 
\begin{align*}
L(\theta) & = & \prod_{i=1}^{m}\paren{\condprob{s(y,i)}{s(x,i);\theta}}\\
 & = & \prod_{i=1}^{m}\paren{\frac{1}{\sqrt{2\pi}\sigma}\exp\paren{-\frac{(s(y,i)-\theta^{T}s(x,i))^{2}}{2\sigma\text{²}}}}
\end{align*}

\end_inset


\end_layout

\begin_layout Standard
For mathematical convenience we take the logarithm of the likelihood.
 This will later extinguish the exponentiation of the Gaußian and simplify
 the formula.
 We can do this, because we will later try to maximize the likelihood and
 taking the logarithm does change the result of maximization, because logarithm
 is a monotonically increasing function.
 This logarithm of the likelihood is called 
\emph on
log-likelihood
\emph default
 and is written 
\begin_inset Formula $\ell(\theta)$
\end_inset

.
\end_layout

\begin_layout Standard
\begin_inset Formula 
\begin{align}
\ell(\theta) & = & \log\paren{L(\theta)}\nonumber \\
 & = & \log\paren{\prod_{i=1}^{m}\paren{\condprob{s(y,i)}{s(x,i);\theta}}}\label{eq:log-likelihood}\\
 & = & \log\prod_{i=1}^{m}\paren{\frac{1}{\sqrt{2\pi}\sigma}\exp\paren{-\frac{(s(y,i)-\theta^{T}s(x,i))^{2}}{2\sigma\text{²}}}}\nonumber 
\end{align}

\end_inset


\end_layout

\begin_layout Standard
Logarithm of a product is the same as the sum of logarithms of the factors:
\end_layout

\begin_layout Standard
\begin_inset Formula 
\begin{align*}
\log\paren{L(\theta)} & = & \log\prod_{i=1}^{m}\paren{\frac{1}{\sqrt{2\pi}\sigma}\exp\paren{-\frac{(s(y,i)-\theta^{T}s(x,i))^{2}}{2\sigma\text{²}}}}\\
 & = & \sum_{i=1}^{m}\paren{\log\paren{\frac{1}{\sqrt{2\pi}\sigma}\exp(\cdots)}}
\end{align*}

\end_inset


\end_layout

\begin_layout Standard
According to the product rule for logarithms: 
\begin_inset Formula $\log_{b}(xy)=\log_{b}(x)+\log_{b}(y)$
\end_inset

.
 So we can split up the logarithm inside the sum:
\end_layout

\begin_layout Standard
\begin_inset Formula 
\begin{align*}
\sum_{i=1}^{m}\paren{\log\paren{\frac{1}{\sqrt{2\pi}\sigma}\exp(\cdots)}} & = & \sum_{i=1}^{m}\paren{\log\paren{\frac{1}{\sqrt{2\pi}\sigma}}+\log\paren{\exp(\cdots)}}
\end{align*}

\end_inset


\end_layout

\begin_layout Standard
This means, that we are adding the constant 
\begin_inset Formula $\log\paren{\frac{1}{\sqrt{2\pi}\sigma}}$
\end_inset

 in total 
\begin_inset Formula $m$
\end_inset

 times and since it is a constant, we can move it out of the sum:
\end_layout

\begin_layout Standard
\begin_inset Formula 
\begin{align*}
\sum_{i=1}^{m}\paren{\log\paren{\frac{1}{\sqrt{2\pi}\sigma}}+\log\paren{\exp(\cdots)}} & = & m\cdot\log\paren{\frac{1}{\sqrt{2\pi}\sigma}}+\sum_{i=1}^{m}\paren{\log\paren{\exp(\cdots)}}
\end{align*}

\end_inset


\end_layout

\begin_layout Standard
\begin_inset Formula $\log$
\end_inset

 and 
\begin_inset Formula $\exp$
\end_inset

 are each other's inverted operations, so they negate each other:
\end_layout

\begin_layout Standard
\begin_inset Formula 
\begin{align*}
m\cdot\log\paren{\frac{1}{\sqrt{2\pi}\sigma}}+\sum_{i=1}^{m}\paren{\log\paren{\exp(\cdots)}} & = & m\cdot\log\paren{\frac{1}{\sqrt{2\pi}\sigma}}+\sum_{i=1}^{m}\paren{\cdots}\\
 & = & m\cdot\log\paren{\frac{1}{\sqrt{2\pi}\sigma}}+\sum_{i=1}^{m}\paren{-\frac{(s(y,i)-\theta^{T}s(x,i))^{2}}{2\sigma\text{²}}}
\end{align*}

\end_inset


\end_layout

\begin_layout Standard
Since we are choosing 
\begin_inset Formula $\theta$
\end_inset

 to maximize the likelihood, the other symbols are seen as constants.
 This means, that for maximizing the likelihood, the part 
\begin_inset Formula $m\cdot\log\paren{\frac{1}{\sqrt{2\pi}\sigma}}$
\end_inset

 is actually a constant and does not change, when we change 
\begin_inset Formula $\theta$
\end_inset

.
 That means it does not matter for the whole maximization and we can simply
 remove it:
\end_layout

\begin_layout Standard
\begin_inset Formula 
\begin{align*}
 & \max_{\theta}\paren{m\cdot\log\paren{\frac{1}{\sqrt{2\pi}\sigma}}+\sum_{i=1}^{m}\paren{-\frac{(s(y,i)-\theta^{T}s(x,i))^{2}}{2\sigma\text{²}}}}\\
= & \max_{\theta}\paren{\sum_{i=1}^{m}\paren{-\frac{(s(y,i)-\theta^{T}s(x,i))^{2}}{2\sigma\text{²}}}}
\end{align*}

\end_inset


\end_layout

\begin_layout Standard
Here we maximizing a sum of something with a negative sign in front.
 This is equivalent to minimizing the sum with the negative sign removed:
\end_layout

\begin_layout Standard
\begin_inset Formula 
\begin{align*}
\max_{\theta}\paren{\sum_{i=1}^{m}\paren{-\frac{(s(y,i)-\theta^{T}s(x,i))^{2}}{2\sigma\text{²}}}} & = & \min_{\theta}\paren{\sum_{i=1}^{m}\paren{\frac{(s(y,i)-\theta^{T}s(x,i))^{2}}{2\sigma\text{²}}}}
\end{align*}

\end_inset


\end_layout

\begin_layout Standard
Furthermore, we can simplify the denominator, as it only divides each addend
 by a constant 
\begin_inset Formula $2\sigma^{2}$
\end_inset

:
\end_layout

\begin_layout Standard
\begin_inset Formula 
\[
\min_{\theta}\paren{\sum_{i=1}^{m}\paren{\frac{(s(y,i)-\theta^{T}s(x,i))^{2}}{2\sigma\text{²}}}}=\min_{\theta}\paren{\sum_{i=1}^{m}\paren{\frac{(s(y,i)-\theta^{T}s(x,i))^{2}}{2}}}
\]

\end_inset


\end_layout

\begin_layout Standard
Since every addend is divided by the same denominator, we can also divide
 the whole sum instead of its parts (distributive property):
\end_layout

\begin_layout Standard
\begin_inset Formula 
\[
\min_{\theta}\paren{\frac{\sum_{i=1}^{m}\paren{(s(y,i)-\theta^{T}s(x,i))^{2}}}{2}}
\]

\end_inset


\end_layout

\begin_layout Standard
Note, that the value of 
\begin_inset Formula $\sigma$
\end_inset

 does not matter for the minimization or maximization at all.
 We will arrive at the same values for 
\begin_inset Formula $\theta$
\end_inset

, no matter what the variance of the Gaußian is.
\end_layout

\begin_layout Standard
The minimized term is the same as the not averaged cost function 
\begin_inset Formula $J(\theta)$
\end_inset

:
\begin_inset Foot
status open

\begin_layout Plain Layout
We defined the cost function 
\begin_inset Formula $J(\theta)$
\end_inset

 earlier as averaged by dividing by 
\begin_inset Formula $m$
\end_inset

, however, this averaging is not required.
 So with a slightly different definition of 
\begin_inset Formula $J(\theta)$
\end_inset

, one could say that it is even the same as the cost function 
\begin_inset Formula $J(\theta)$
\end_inset

.
\end_layout

\end_inset


\end_layout

\begin_layout Standard
\begin_inset Formula 
\[
\frac{\sum_{i=1}^{m}\paren{(s(y,i)-\theta^{T}s(x,i))^{2}}}{2}=\frac{1}{2}\cdot\sum_{i=1}^{m}\paren{(s(y,i)-\theta^{T}s(x,i))^{2}}
\]

\end_inset


\end_layout

\begin_layout Standard
What does it mean, that the log-likelihood with the assumption of (IID)
 Gaußian distribution of the error terms is the same as the cost function
 
\begin_inset Formula $J(\theta)$
\end_inset

? It means, that using the mean squared error, on which the cost function
 is based, comes naturally under the given assumptions.
 With these assumptions using mean squared error is mathematically justified.
\end_layout

\begin_layout Part
Logistic Regression
\end_layout

\begin_layout Standard
While linear regression is about predicting continuous values for data points,
 logistic regression is about predicting classes for data points, which
 means assigning one of a set of labels, often only 1 or 0, to data points.
 As such the output of the prediction is not a continuous value, but a class'
 label.
\end_layout

\begin_layout Section
On using linear regression for classification
\end_layout

\begin_layout Standard
One could have the idea, to simply use linear regression for classification,
 by defining a threshold value for the continuous output of the prediction
 function 
\begin_inset Formula $h_{\hat{\theta}}$
\end_inset

 and defining a decision function, which outputs 0 for all predictions below
 the threshold and 1 for all predictions above the threshold.
\end_layout

\begin_layout Standard
However, this idea quickly fails, when simply giving more data points above
 the threshold than below the threshold, or vice versa.
 In that case, the fit line of linear regression will be influenced more
 by those above or below the threshold and the predicted values for values
 previously closer to the threshold will change, resulting in different
 assigned labels, although the actual data does not indicate such a change
 as plausible.
\end_layout

\begin_layout Standard
Here is an example for this effect:
\end_layout

\begin_layout Standard
TODO: insert plots here
\end_layout

\begin_layout Standard
Instead of using linear regression for predicting classes, we need something
 different.
 This is where logistic regression comes in.
\end_layout

\begin_layout Section
Predicting classes
\end_layout

\begin_layout Subsection
Binary classification and multiclass classification
\end_layout

\begin_layout Standard
Binary classification is classication using only 2 values for labels, often
 called 0 and 1.
\end_layout

\begin_layout Standard
Multiclass classification assigns one of a set of more than 2 values for
 the label to data points.
\end_layout

\begin_layout Standard
Lets imagine we have some data set, which has a label, which can take on
 multiple values.
 Using a single binary classification, it would not be possible to assign
 all different label values to a new data point, because we learned only
 how to predict 2 label values.
\end_layout

\begin_layout Standard
In such situation there may be more efficient ways to learn, how to predict
 all different label values, but one way that is generally applicable is
 the following:
\end_layout

\begin_layout Standard
Say the different classes are named 0, 1, 2, …, until 
\begin_inset Formula $n$
\end_inset

, so that we have 
\begin_inset Formula $n$
\end_inset

 classes.
\end_layout

\begin_layout Enumerate
Repeat until there is a model for every class:
\end_layout

\begin_deeper
\begin_layout Enumerate
Pick a class for which there is no model yet.
\end_layout

\begin_layout Enumerate
Learn a model, which predicts, whether a data point belongs to that class
 or not.
\end_layout

\end_deeper
\begin_layout Standard
We will have multiple models, each making predictions for data points belonging
 to one specific class or not.
 Although each model only learned a binary classification of its specific
 class, taken all together, we have a model for multiple classes.
 The case of 2 or more binary classification models all predicting a data
 point to belong to their class can be resolved by looking at how confident
 each of those models is in its prediction or we could make a random choice
 about which model we trust more.
\end_layout

\begin_layout Standard
Taken this into consideration, we need not to worry about multiple classes.
 It is only necessary to think about binary classification for exploring
 logistic regression.
 So from here on, assume binary classification.
\end_layout

\begin_layout Subsection
Calculating classes
\end_layout

\begin_layout Standard
The approach to predicting classes in logistic regression is still to calculate
 a value based on a linear function of the features, optimizing some weights
 
\begin_inset Formula $\theta$
\end_inset

, just like in linear regression.
 However, instead of simply introducing an additional threshold value for
 making the decision of what label to assign to a data point, the hypothesis
 function of logistic regression makes use of the so called logistic function
 or sigmoid function, to calculate a probability of a data point belonging
 to one class or the other.
\end_layout

\begin_layout Standard
TODO: explain the hypothesis function of logistic regression
\end_layout

\begin_layout Section
Scoring Function
\end_layout

\begin_layout Standard
To determin a class for a data point we need a way to calculate some kind
 of value from the feature values of a the data point.
 A function doing this is called a 
\emph on
scoring function
\emph default
:
\end_layout

\begin_layout Standard
\begin_inset Formula 
\[
\score(x_{i})
\]

\end_inset


\end_layout

\begin_layout Section
Weights Vector
\end_layout

\begin_layout Standard
The scoring function need so operate on the feature values or a subset of
 them.
 In linear regression we use one weight per feature, which gives us a weight
 vector 
\begin_inset Formula $w_{k}$
\end_inset

.
 This weight vector decides, how much the value of a feature influences
 the overall score.
 The weights vector will need to be learned in the learning phase of the
 model.
 As such the 
\begin_inset Formula $w_{k}$
\end_inset

 will be updated, possibly many times, before arriving at the optimal weights
 vector 
\begin_inset Formula $\hat{w}$
\end_inset

.
 The index 
\begin_inset Formula $k$
\end_inset

 stands for the current weight vector.
 Single weights of a 
\begin_inset Formula $w_{k}$
\end_inset

 will be expressed by using square brackets: 
\begin_inset Formula $w_{1}[j]$
\end_inset

, where 
\begin_inset Formula $j$
\end_inset

 is the feature index for the 
\begin_inset Formula $j$
\end_inset

-th feature.
\end_layout

\begin_layout Section
Feature Function
\end_layout

\begin_layout Standard
Furthermore the scoring function can operate on the actual feature values
 or apply a transformation on them before we use them.
 Such a transformation for one feature is expressed by a 
\emph on
feature function
\emph default
 
\begin_inset Formula $h_{j}$
\end_inset

 for the 
\begin_inset Formula $j$
\end_inset

-th feature:
\end_layout

\begin_layout Standard
\begin_inset Formula 
\[
h_{j}(x_{i})
\]

\end_inset


\end_layout

\begin_layout Standard
The feature function for all features of a data point 
\begin_inset Formula $x_{i}$
\end_inset

 is then:
\end_layout

\begin_layout Standard
\begin_inset Formula 
\[
h(x_{i})
\]

\end_inset


\end_layout

\begin_layout Section
Scoring Function in Detail
\end_layout

\begin_layout Standard
To apply the weighting of features given by the weights vector to the features
 and apply any chosen feature function, we multiply the transposed (indicated
 by 
\begin_inset Formula $\square^{T}$
\end_inset

) weight vector with the feature function of the features:
\end_layout

\begin_layout Standard
\begin_inset Formula 
\[
\score(w_{k}^{T}\cdot h(x_{i}))\in\setr\cup\lbrace-\infty,+\infty\rbrace
\]

\end_inset


\end_layout

\begin_layout Standard
This is nothing other but the following sum:
\end_layout

\begin_layout Standard
\begin_inset Formula 
\[
\sum_{j=0}^{n}(w_{k}[j]\cdot h_{j}(x_{i}))
\]

\end_inset


\end_layout

\begin_layout Standard
For linear regression we would be done at this step.
 The score is the value, which the model predicts.
 For logistic regression, we need to do more, because we need to make a
 decision about a class assignment for data points based on their score.
 The actual classification part still needs to be done.
\end_layout

\begin_layout Section
Estimate of the Probability of a Class
\end_layout

\begin_layout Standard
To classify a data point we need to calculate from the score of the data
 pointan estimate of how likely the data point is in a class.
 This will only be an estimate, because the real process behind the creation
 of the data might not be a linear process.
 Probabilities lie between 0 and 1.
 However, the scoring function gives real numbers between 
\begin_inset Formula $-\infty$
\end_inset

 and 
\begin_inset Formula $+\infty$
\end_inset

.
 This is why we need yet another function to map from the target set of
 values of the scoring function to 
\begin_inset Formula $(0;1)$
\end_inset

.
 The so called 
\emph on
link function
\emph default
:
\end_layout

\begin_layout Standard
\begin_inset FormulaMacro
\newcommand{\link}{\operatorname{link}}
{\mathrm{link}}
\end_inset


\begin_inset Formula 
\[
\hat{P}(y=1|x_{i})=\link(\score(x_{i}))=g(\score(x_{i}))
\]

\end_inset


\end_layout

\begin_layout Standard
(
\begin_inset Formula $\hat{P}$
\end_inset

 stands for 
\begin_inset Quotes ald
\end_inset

the best possible estimate from this model
\begin_inset Quotes ard
\end_inset

, where 
\begin_inset Formula $P$
\end_inset

 is only one estimate of probability.)
\end_layout

\begin_layout Standard
The link function 
\begin_inset Formula $g$
\end_inset

 for logistic regression is the 
\emph on
sigmoid function
\emph default
:
\end_layout

\begin_layout Standard
\begin_inset FormulaMacro
\newcommand{\sigmoid}{\operatorname{sigmoid}}
{\mathrm{sigmoid}}
\end_inset


\begin_inset Formula 
\[
\sigmoid(v)=\frac{1}{1+e^{(-v)}}
\]

\end_inset


\end_layout

\begin_layout Standard
Applied to the score of a data point:
\end_layout

\begin_layout Standard
\begin_inset Formula 
\[
\sigmoid(\score(x_{i}))=\frac{1}{1+e^{(-\score(x_{i}))}}
\]

\end_inset


\end_layout

\begin_layout Standard
If the value of the link funktion is 
\begin_inset Formula $>0.5$
\end_inset

 it means, that the probability the model estimates, that the data point
 is in class 1, is higher than 50%, which means the model classifies the
 data point as part of class 1.
\end_layout

\begin_layout Part
Optimization of Weights
\end_layout

\begin_layout Standard
Now that we know, how the probability of a data point belonging to a class
 is estimated by the model, we still need a way to get the optimal weights,
 which the score is based on.
 We need a way to optimize them based on the data points presented to the
 model during the learning phase.
 Optimizing the weights is what actually makes the learning of the model.
 The result will be 
\begin_inset Formula $\hat{w}$
\end_inset

 the optimal weights.
\end_layout

\begin_layout Standard
What we need are three things:
\end_layout

\begin_layout Enumerate
an initial vector of weights 
\begin_inset Formula $w_{0}$
\end_inset


\end_layout

\begin_layout Enumerate
\begin_inset FormulaMacro
\newcommand{\q}{\operatorname{q}}
{\mathrm{q}}
\end_inset

a function for calculating how good a vector of weights is (quality measure
 
\begin_inset Formula $\q$
\end_inset

) or a function for calculating how much error we make when using the weights
 vector to make predictions (error measure, cost function)
\end_layout

\begin_layout Enumerate
a way of updating the weights, so that there is a chance of getting better
 weights
\end_layout

\begin_layout Section
Initial vector of weights
\end_layout

\begin_layout Standard
The initial vector of weights can be chosen at random or set to some predefined
 value.
\end_layout

\begin_layout Section
Quality Measure
\end_layout

\begin_layout Standard
Lets enumerate things we can deduce about a quality measure 
\begin_inset Formula $\q$
\end_inset

 for the weights vector:
\end_layout

\begin_layout Itemize
\begin_inset Formula $\q$
\end_inset

 should measure the effect the weights vector has on the score for all data
 points, which are use to learn the model.
 This is the effect on predictions made by the model using the weights vector.
 If 
\begin_inset Formula $\q$
\end_inset

 did not consider all data points, we would risk missing errors in prediction
 for some data points.
\end_layout

\begin_layout Itemize
\begin_inset Formula $\q$
\end_inset

 should return values in a within known bounds, otherwise we do not know
 how to interpret the values.
 We would not know, whether a value expresses a good quality or a bad quality
 of the weights vector.
\end_layout

\begin_layout Itemize
\begin_inset Formula $\q$
\end_inset

 should somehow relate to the results of 
\begin_inset Formula $w^{T}\cdot h(x_{i})$
\end_inset

 (the score) for all 
\begin_inset Formula $x_{i}\in X$
\end_inset

.
\end_layout

\begin_layout Itemize
\begin_inset Formula $\q$
\end_inset

 should give a value meaning good quality, if both of the following are
 true for most data points:
\end_layout

\begin_deeper
\begin_layout Itemize
\begin_inset Formula $w_{k}$
\end_inset

 causes the result of the link funktion of the score to be closer to 1,
 if the data point really is of class 1.
\end_layout

\begin_layout Itemize
\begin_inset Formula $w_{k}$
\end_inset

 causes the result of the lin
\family roman
\series medium
\shape up
\size normal
\emph off
\bar no
\strikeout off
\xout off
\uuline off
\uwave off
\noun off
\color none
k
\family default
\series default
\shape default
\size default
\emph default
\bar default
\strikeout default
\xout default
\uuline default
\uwave default
\noun default
\color inherit
k funktion of the score to be closer to 0, if the data point really is of
 class 0.
\end_layout

\end_deeper
\begin_layout Itemize
\begin_inset Formula $\q$
\end_inset

 should give a value meaning bad quality, if both of the following are true
 for most data points:
\end_layout

\begin_deeper
\begin_layout Itemize
\begin_inset Formula $w_{k}$
\end_inset

 causes the result of the link funktion of the score to be closer to 0,
 if the data point really is of class 1.
\end_layout

\begin_layout Itemize
\begin_inset Formula $w_{k}$
\end_inset

 causes the result of the link funktion of the score to be closer to 1,
 if the data point really is of class 0.
\end_layout

\end_deeper
\begin_layout Standard
So what could 
\begin_inset Formula $\q$
\end_inset

 be?
\begin_inset FormulaMacro
\newcommand{\c}{\operatorname{c}}
{\mathrm{c}}
\end_inset


\end_layout

\begin_layout Standard
We can calculate the probabilities for all data points, which the model
 estimates using the current weights vector 
\begin_inset Formula $w_{k}$
\end_inset

, where 
\begin_inset Formula $y_{i}$
\end_inset

 is the function giving the correct class of a data point, according to
 the labels in the data set, the class, which the data point according to
 observation really has:
\end_layout

\begin_layout Standard
\begin_inset Formula 
\[
P(y=y_{i}\mid x_{0},w_{k})
\]

\end_inset


\end_layout

\begin_layout Standard
\begin_inset Formula 
\[
P(y=y_{i}\mid x_{1},w_{k})
\]

\end_inset


\end_layout

\begin_layout Standard
\begin_inset Formula 
\[
P(y=y_{i}\mid x_{2},w_{k})
\]

\end_inset


\end_layout

\begin_layout Standard
\begin_inset Formula 
\[
\vdots
\]

\end_inset


\end_layout

\begin_layout Standard
All those probabilities are between 0 and 1.
 To be more precise, they cannot be exactly 0 or 1, because the link function,
 
\begin_inset Formula $\sigmoid$
\end_inset

 never really becomes 0 or 1 for any value 
\begin_inset Formula $\in\setr$
\end_inset

.
 This means, that their product will also be a value in the interval 
\begin_inset Formula $(0;1)$
\end_inset

.
 We could use the product of all estimated probabilities as a quality measure:
\end_layout

\begin_layout Standard
\begin_inset Formula 
\[
\q(X,w_{k})=\prod_{i=0}^{N}(P(y=y_{i}\mid x_{i},w_{k}))
\]

\end_inset


\end_layout

\begin_layout Standard
In natural language: 
\emph on

\begin_inset Quotes ald
\end_inset

The quality 
\begin_inset Formula $\q$
\end_inset

 of a given weight vector 
\begin_inset Formula $w_{k}$
\end_inset

 for a data set 
\begin_inset Formula $X$
\end_inset

 is the product of estimates of probability for all data points 
\begin_inset Formula $x_{i}$
\end_inset

 in 
\begin_inset Formula $X$
\end_inset

.
\begin_inset Quotes ard
\end_inset


\end_layout

\begin_layout Standard
(This is also called the 
\begin_inset Quotes ald
\end_inset

data likelyhood
\begin_inset Quotes ard
\end_inset

.) Things to note about this product:
\end_layout

\begin_layout Itemize
If all the probabilities were perfectly estimated and close to 1 for the
 correct classes, the result of this product would also be close to 1.
\end_layout

\begin_layout Itemize
If the probability are badly estimated by the weights and close to 0 for
 the correct classes, always predicting the wrong class, the result of the
 product would also be close to 0.
\end_layout

\begin_layout Itemize
Furthermore this considers all data points as required.
\end_layout

\begin_layout Itemize
\begin_inset Formula $\q$
\end_inset

 relates to the 
\begin_inset Formula $\score$
\end_inset

 of a data point, because the scores are used to calculate the estimate
 for the probabilities, which 
\begin_inset Formula $\q$
\end_inset

 makes a product of.
\end_layout

\begin_layout Itemize
\begin_inset Formula $\q$
\end_inset

 outputs values in the interval 
\begin_inset Formula $(0;1)$
\end_inset

, if we only ever allow weights 
\begin_inset Formula $w_{k}[j]>0$
\end_inset

.
\end_layout

\begin_layout Standard
It seems, that this product of estimates of probabilities satisfies all
 of our requirements.
\end_layout

\begin_layout Standard
We would want this 
\begin_inset Formula $\q(X,w_{k})$
\end_inset

 to be as close to 1 as possible, as far away from 0 as possible.
 This means we need to maximize it.
 What we are looking at is an optimization problem, more specifically a
 maximization problem:
\end_layout

\begin_layout Standard
\begin_inset Formula 
\[
\max_{w}(q(X,w_{k}))=\max_{w}\left(\prod_{i=0}^{N}\left(\frac{1}{1+e^{(-\score(x_{i}))}}\right)\right)
\]

\end_inset


\end_layout

\begin_layout Standard
The result will be 
\begin_inset Formula $\hat{w}$
\end_inset

.
 To get 
\begin_inset Formula $\hat{w}$
\end_inset

 will be the main goal of the linear regression or logistic regression algorithm.
\end_layout

\begin_layout Standard
To maximize the quality of the weights, we can use the so called 
\emph on
gradient ascent
\emph default
 algorithm.
\end_layout

\begin_layout Subsection
Weights Vector Update Function
\end_layout

\begin_layout Standard
To maximize the quality of the predictions of the model, the weights need
 to be optimized.
 We need a formula for iteratively updating the weights.
 The formula we need is:
\end_layout

\begin_layout Standard
\begin_inset Formula 
\[
w_{k+1}[j]=w_{k}+\alpha\cdot\frac{d\q(X,w_{k})}{dw_{k}[j]}
\]

\end_inset


\end_layout

\begin_layout Standard
In natural language: 
\emph on

\begin_inset Quotes ald
\end_inset

The weights vector 
\begin_inset Formula $w_{k}$
\end_inset

 is updated by the derivative of the quality measure (slope in the quality
 measure plot landscape) with regard to the current weights.
\begin_inset Quotes ard
\end_inset


\end_layout

\begin_layout Standard
However, let us get there step by step, so that the formula is explained.
\end_layout

\begin_layout Enumerate
We move stepwise from the current values of 
\begin_inset Formula $w_{k}$
\end_inset

 to a hopefully better 
\begin_inset Formula $w_{k+1}$
\end_inset

, 
\begin_inset Formula $w_{k+2}$
\end_inset

, 
\begin_inset Formula $w_{k+3}$
\end_inset

 and so on, where the next value is always based on the previous value.
 Each vector of weights stores some progress on our way to the top of the
 hill.
 The next values of the weights vector are based on the current values,
 to make use of the already made progress.
 That is why the current weights are part of the updated value in the formula.
 In theory one could take completely arbitrary updated weight values, but
 that would not guarantee progress towards the top of the hill.
\end_layout

\begin_layout Enumerate
In general the way to find the maximum or minimum of a function is to take
 its derivative and find the point, where the slope is 0, because that will
 be in a valley (local minimum) or on top of a hill (local maximum) visually.
\end_layout

\begin_layout Enumerate
If we take the derivative of the quality measure and walk in its direction,
 we will walk towards the (local) optimum.
 We can imagine this visually by imagining standing on a slope of a hill.
 The mathematical slope of the hill at that point will point downwards and
 upwards the hill.
 If we move upwards in the direction it is pointing, we are bound to move
 a bit higher.
 If we repeat this process multiple times, we are bound to arrive at higher
 and higher places.
\end_layout

\begin_layout Enumerate
Wanting to find an optimum and walking in the direction of the slope together
 mean, that we need to take the derivative of the quality measure, because
 the derivative gives us the slope at all points.
\end_layout

\begin_layout Enumerate
The slope and thereby the direction of the slope is determined by the derivative
 of the quality measure.
 However, there are multiple derivatives one can derive from the quality
 measure function, because there are multiple independent weights in the
 weights vector.
 For each of the weights one can derive a partial derivate.
 To get closer to the local (potentially global) maximum of the quality
 measure function, one needs to consider all the weights, which means deriving
 all the derivatives and updating all the weights.
 This is called a 
\emph on
total derivative
\emph default
.
 This will result in a step in the direction of the slope of the quality
 measure function.
 So this is why we take partial derivatives and change each weight according
 to its partial derivative.
\end_layout

\begin_layout Enumerate
TODO: Why do we need a step width 
\begin_inset Formula $\alpha$
\end_inset

?
\end_layout

\begin_layout Enumerate
For each weight in 
\begin_inset Formula $w_{k}$
\end_inset

 we need to figure out, what the .
 We need to update each of the weights in 
\begin_inset Formula $w_{k}$
\end_inset

 (so all 
\begin_inset Formula $w_{k}[j]$
\end_inset

) based on the derivative with regard to weight itself.
 This is called partial derivative.
 
\end_layout

\begin_layout Standard
TODO
\end_layout

\begin_layout Section
Cost Function
\end_layout

\begin_layout Standard
An alternative to the data likelyhood is making use of a cost function,
 whose interpretation is not, how good the weights vector 
\begin_inset Formula $w_{k}$
\end_inset

 is, but instead how bad it is, by calculating the error in prediction,
 which is made by the model using the weight vector 
\begin_inset Formula $w_{k}$
\end_inset

.
 One would want to minimize error in prediction to get the best weights.
 This means, that the optimization problem using a cost function becomes
 a minimization problem, instead of a maximization problem.
\end_layout

\begin_layout Subsection
Weights Vector Update Function
\end_layout

\begin_layout Standard
TODO
\end_layout

\begin_layout Standard
TODO: pipes in math formulas -> correct tex math character
\end_layout

\begin_layout Part
Gradien Ascent Algorithm
\end_layout

\begin_layout Part
Miscellaneous
\end_layout

\begin_layout Section
Model Persistence
\end_layout

\begin_layout Standard
If one stores the learned weights vector 
\begin_inset Formula $w_{k}$
\end_inset

, one basically persists the linear regression or logistic regression model,
 because the usage of the weights vector is defined in the algorithm.
\end_layout

\end_body
\end_document
