\chapter{Statistical Learning Theory}
\label{app:statistical_learning}

% Set section numbering for Appendix D
\renewcommand{\thesection}{D.\arabic{section}}
\renewcommand{\thesubsection}{D.\arabic{section}.\arabic{subsection}}
\renewcommand{\thesubsubsection}{D.\arabic{section}.\arabic{subsection}.\arabic{subsubsection}}

This appendix presents the core concepts from statistical learning theory that underpin the theoretical analysis of \ClaudeCode{} systems. We cover the PAC learning framework, generalization bounds, concentration inequalities, online learning, and multi-armed bandit theory, with particular emphasis on their applications to AI-powered code intelligence systems.

\section{PAC Learning Framework}

\subsection{Basic Setup}

\begin{definition}[Learning Problem]
\label{def:learning_problem}
A learning problem consists of:
\begin{itemize}
    \item Instance space $\mathcal{X}$
    \item Label space $\mathcal{Y}$ (e.g., $\{0, 1\}$ for binary classification)
    \item Unknown distribution $\mathcal{D}$ over $\mathcal{X} \times \mathcal{Y}$
    \item Hypothesis class $\mathcal{H}$ of functions $h: \mathcal{X} \rightarrow \mathcal{Y}$
    \item Loss function $\ell: \mathcal{Y} \times \mathcal{Y} \rightarrow \R^+$
\end{itemize}
\end{definition}

\begin{definition}[Risk and Empirical Risk]
\label{def:risk}
For hypothesis $h \in \mathcal{H}$:
\begin{align}
\text{Population risk: } \quad R(h) &= \E_{(x,y) \sim \mathcal{D}}[\ell(h(x), y)] \\
\text{Empirical risk: } \quad \hat{R}_S(h) &= \frac{1}{m} \sum_{i=1}^m \ell(h(x_i), y_i)
\end{align}
where $S = \{(x_1, y_1), \ldots, (x_m, y_m)\}$ is the training sample.
\end{definition}

\begin{definition}[PAC Learnability]
\label{def:pac_learning}
A hypothesis class $\mathcal{H}$ is PAC learnable if there exists an algorithm $A$ and polynomial function $p(\cdot, \cdot, \cdot)$ such that for any $\epsilon, \delta \in (0, 1)$, any distribution $\mathcal{D}$, if $m \geq p(1/\epsilon, 1/\delta, n)$ where $n$ is the representation size, then with probability at least $1 - \delta$:
\begin{equation}
R(A(S)) - \min_{h \in \mathcal{H}} R(h) \leq \epsilon
\end{equation}
\end{definition}

\subsection{Fundamental Results}

\begin{theorem}[Fundamental Theorem of PAC Learning]
\label{thm:fundamental_pac}
For binary classification with 0-1 loss, the following are equivalent:
\begin{enumerate}
    \item $\mathcal{H}$ is PAC learnable
    \item $\mathcal{H}$ is agnostically PAC learnable  
    \item The VC dimension $\text{VCdim}(\mathcal{H})$ is finite
    \item $\mathcal{H}$ has the uniform convergence property
\end{enumerate}
\end{theorem}

\begin{definition}[Empirical Risk Minimization (ERM)]
\label{def:erm}
The ERM algorithm outputs:
\begin{equation}
\hat{h}_S \in \arg\min_{h \in \mathcal{H}} \hat{R}_S(h)
\end{equation}
\end{definition}

\begin{theorem}[ERM Sample Complexity]
\label{thm:erm_sample_complexity}
For ERM with hypothesis class of VC dimension $d$ and 0-1 loss, the sample complexity is:
\begin{equation}
m(\epsilon, \delta) = O\left(\frac{d + \log(1/\delta)}{\epsilon^2}\right)
\end{equation}
\end{theorem}

\section{VC Dimension and Sample Complexity}

\subsection{VC Dimension}

\begin{definition}[Shattering]
\label{def:shattering}
A hypothesis class $\mathcal{H}$ shatters a finite set $C = \{x_1, \ldots, x_m\} \subseteq \mathcal{X}$ if for every subset $S \subseteq C$, there exists $h \in \mathcal{H}$ such that:
\begin{equation}
S = \{x_i \in C : h(x_i) = 1\}
\end{equation}
\end{definition}

\begin{definition}[VC Dimension]
\label{def:vc_dimension}
The VC dimension of hypothesis class $\mathcal{H}$ is:
\begin{equation}
\text{VCdim}(\mathcal{H}) = \max\{m : \exists C \subseteq \mathcal{X}, |C| = m \text{ such that } \mathcal{H} \text{ shatters } C\}
\end{equation}
\end{definition}

\begin{example}[VC Dimension Examples]
\label{ex:vc_examples}
\begin{itemize}
    \item Linear classifiers in $\R^d$: $\text{VCdim} = d + 1$
    \item Axis-aligned rectangles in $\R^2$: $\text{VCdim} = 4$
    \item Neural networks with $W$ parameters: $\text{VCdim} = O(W \log W)$
    \item Decision trees of depth $k$: $\text{VCdim} = O(2^k)$
\end{itemize}
\end{example}

\begin{theorem}[Sauer-Shelah Lemma]
\label{thm:sauer_shelah}
For hypothesis class $\mathcal{H}$ with $\text{VCdim}(\mathcal{H}) = d$ and any set $C$ of size $m$:
\begin{equation}
|\mathcal{H}_C| \leq \sum_{i=0}^d \binom{m}{i} \leq \left(\frac{em}{d}\right)^d
\end{equation}
where $\mathcal{H}_C = \{(h(x_1), \ldots, h(x_m)) : h \in \mathcal{H}\}$ for $C = \{x_1, \ldots, x_m\}$.
\end{theorem}

\subsection{Generalization Bounds}

\begin{theorem}[VC Generalization Bound]
\label{thm:vc_bound}
For any $\delta > 0$ and hypothesis class with VC dimension $d$, with probability at least $1 - \delta$ over the choice of training set $S$ of size $m$:
\begin{equation}
\sup_{h \in \mathcal{H}} |R(h) - \hat{R}_S(h)| \leq \sqrt{\frac{8d \log(2em/d) + 8\log(4/\delta)}{m}}
\end{equation}
\end{theorem}

\begin{theorem}[Rademacher Complexity Bound]
\label{thm:rademacher_bound}
For any $\delta > 0$, with probability at least $1 - \delta$:
\begin{equation}
\sup_{h \in \mathcal{H}} |R(h) - \hat{R}_S(h)| \leq 2\mathcal{R}_S(\mathcal{H}) + \sqrt{\frac{2\log(2/\delta)}{m}}
\end{equation}
where $\mathcal{R}_S(\mathcal{H}) = \E_{\sigma}\left[\sup_{h \in \mathcal{H}} \frac{1}{m}\sum_{i=1}^m \sigma_i h(x_i)\right]$ is the empirical Rademacher complexity.
\end{theorem}

\section{Concentration Inequalities}

\subsection{Basic Concentration Results}

\begin{theorem}[Hoeffding's Inequality (Extended)]
\label{thm:hoeffding_extended}
Let $X_1, \ldots, X_n$ be independent random variables with $X_i \in [a_i, b_i]$ almost surely. For any $t > 0$:
\begin{equation}
\Prob\left(\sum_{i=1}^n X_i - \E\left[\sum_{i=1}^n X_i\right] \geq t\right) \leq \exp\left(-\frac{2t^2}{\sum_{i=1}^n (b_i - a_i)^2}\right)
\end{equation}
\end{theorem}

\begin{theorem}[Bennett's Inequality]
\label{thm:bennett}
Let $X_1, \ldots, X_n$ be independent random variables with $\E[X_i] = 0$ and $|X_i| \leq M$ almost surely. Let $\sigma^2 = \frac{1}{n}\sum_{i=1}^n \Var(X_i)$. Then for any $t > 0$:
\begin{equation}
\Prob\left(\frac{1}{n}\sum_{i=1}^n X_i \geq t\right) \leq \exp\left(-\frac{n\sigma^2}{M^2}h\left(\frac{Mt}{\sigma^2}\right)\right)
\end{equation}
where $h(u) = (1+u)\log(1+u) - u$.
\end{theorem}

\subsection{Martingale-Based Inequalities}

\begin{definition}[Martingale]
\label{def:martingale}
A sequence $\{X_t\}$ is a martingale with respect to filtration $\{\mathcal{F}_t\}$ if:
\begin{itemize}
    \item $X_t$ is $\mathcal{F}_t$-measurable
    \item $\E[|X_t|] < \infty$ for all $t$
    \item $\E[X_{t+1} | \mathcal{F}_t] = X_t$ for all $t$
\end{itemize}
\end{definition}

\begin{theorem}[Azuma's Inequality]
\label{thm:azuma}
Let $\{X_t\}$ be a martingale with bounded differences: $|X_{t+1} - X_t| \leq c_t$ almost surely. Then for any $t > 0$:
\begin{equation}
\Prob(X_n - X_0 \geq t) \leq \exp\left(-\frac{t^2}{2\sum_{i=0}^{n-1} c_i^2}\right)
\end{equation}
\end{theorem}

\begin{theorem}[McDiarmid's Inequality]
\label{thm:mcdiarmid}
Let $X_1, \ldots, X_n$ be independent random variables and let $f: \mathcal{X}^n \rightarrow \R$ satisfy the bounded differences property:
\begin{equation}
\sup_{x_1, \ldots, x_n, x_i'} |f(x_1, \ldots, x_i, \ldots, x_n) - f(x_1, \ldots, x_i', \ldots, x_n)| \leq c_i
\end{equation}
Then for any $t > 0$:
\begin{equation}
\Prob(f(X_1, \ldots, X_n) - \E[f(X_1, \ldots, X_n)] \geq t) \leq \exp\left(-\frac{2t^2}{\sum_{i=1}^n c_i^2}\right)
\end{equation}
\end{theorem}

\section{Online Learning and Regret Bounds}

\subsection{Online Learning Framework}

\begin{definition}[Online Learning Protocol]
\label{def:online_protocol}
At each round $t = 1, 2, \ldots, T$:
\begin{enumerate}
    \item Algorithm chooses prediction $\hat{y}_t \in \mathcal{Y}$
    \item Environment reveals true outcome $y_t \in \mathcal{Y}$
    \item Algorithm suffers loss $\ell(\hat{y}_t, y_t)$
\end{enumerate}
\end{definition}

\begin{definition}[Regret]
\label{def:regret}
The regret against competitor class $\mathcal{H}$ is:
\begin{equation}
\text{Regret}_T = \sum_{t=1}^T \ell(\hat{y}_t, y_t) - \min_{h \in \mathcal{H}} \sum_{t=1}^T \ell(h(x_t), y_t)
\end{equation}
\end{definition}

\subsection{Gradient Descent in Online Setting}

\begin{theorem}[Online Gradient Descent Regret]
\label{thm:ogd_regret}
For convex losses bounded by $G$ and domain diameter $D$, Online Gradient Descent with learning rate $\eta = \frac{D}{G\sqrt{T}}$ achieves:
\begin{equation}
\text{Regret}_T \leq DG\sqrt{T}
\end{equation}
\end{theorem}

\begin{proof}[Proof Sketch]
The key insight is to use the update rule $\mathbf{w}_{t+1} = \Pi_{\mathcal{W}}(\mathbf{w}_t - \eta \nabla \ell_t(\mathbf{w}_t))$ where $\Pi_{\mathcal{W}}$ is projection onto the constraint set. The regret analysis follows from:
\begin{align}
\|\mathbf{w}_{t+1} - \mathbf{u}\|^2 &\leq \|\mathbf{w}_t - \mathbf{u}\|^2 - 2\eta(\ell_t(\mathbf{w}_t) - \ell_t(\mathbf{u})) + \eta^2 G^2
\end{align}
Summing over $t$ and optimizing $\eta$ yields the bound.
\end{proof}

\subsection{Follow-the-Regularized-Leader}

\begin{definition}[FTRL Algorithm]
\label{def:ftrl}
The Follow-the-Regularized-Leader algorithm chooses:
\begin{equation}
\mathbf{w}_t = \arg\min_{\mathbf{w}} \left\{\sum_{s=1}^{t-1} \ell_s(\mathbf{w}) + R(\mathbf{w})\right\}
\end{equation}
where $R(\mathbf{w})$ is a regularization function.
\end{definition}

\begin{theorem}[FTRL Regret Bound]
\label{thm:ftrl_regret}
For $\alpha$-strongly convex regularizer $R$ and $G$-Lipschitz losses, FTRL achieves:
\begin{equation}
\text{Regret}_T \leq \frac{R(\mathbf{u})}{\alpha} + \frac{G^2 T}{2\alpha}
\end{equation}
for any competitor $\mathbf{u}$.
\end{theorem}

\section{Multi-Armed Bandits Fundamentals}

\subsection{Stochastic Multi-Armed Bandits}

\begin{definition}[Multi-Armed Bandit]
\label{def:mab}
A $K$-armed bandit consists of:
\begin{itemize}
    \item $K$ arms (actions) with unknown reward distributions $\nu_1, \ldots, \nu_K$
    \item Mean rewards $\mu_1, \ldots, \mu_K$ where $\mu_i = \E[X_{i,t}]$ for reward $X_{i,t}$ from arm $i$ at time $t$
    \item Gap parameters $\Delta_i = \mu^* - \mu_i$ where $\mu^* = \max_j \mu_j$
\end{itemize}
\end{definition}

\begin{definition}[Regret for Bandits]
\label{def:bandit_regret}
The pseudo-regret after $T$ rounds is:
\begin{equation}
R_T = T\mu^* - \E\left[\sum_{t=1}^T \mu_{A_t}\right]
\end{equation}
where $A_t$ is the arm played at time $t$.
\end{definition}

\subsection{Upper Confidence Bounds}

\begin{definition}[UCB Algorithm]
\label{def:ucb}
The UCB algorithm selects arm:
\begin{equation}
A_t = \arg\max_{i \in [K]} \left\{\hat{\mu}_{i,t} + \sqrt{\frac{2\log t}{T_{i,t}}}\right\}
\end{equation}
where $\hat{\mu}_{i,t}$ is the empirical mean and $T_{i,t}$ is the number of times arm $i$ was played up to time $t$.
\end{definition}

\begin{theorem}[UCB Regret Bound]
\label{thm:ucb_regret}
For $K$-armed bandit with rewards in $[0,1]$, UCB achieves:
\begin{equation}
\E[R_T] \leq \sum_{i: \Delta_i > 0} \frac{8\log T}{\Delta_i} + \left(1 + \frac{\pi^2}{3}\right)\sum_{i=1}^K \Delta_i
\end{equation}
\end{theorem}

\subsection{Lower Bounds}

\begin{theorem}[Lai-Robbins Lower Bound]
\label{thm:lai_robbins}
For any consistent algorithm (regret $o(T^\alpha)$ for all $\alpha > 0$) and any bandit instance, there exist universal constants such that:
\begin{equation}
\liminf_{T \rightarrow \infty} \frac{\E[R_T]}{\log T} \geq \sum_{i: \Delta_i > 0} \frac{\Delta_i}{\text{KL}(\nu_i, \nu^*)}
\end{equation}
where $\text{KL}(\nu_i, \nu^*)$ is the KL divergence between distributions.
\end{theorem}

\subsection{Contextual Bandits}

\begin{definition}[Linear Contextual Bandit]
\label{def:linear_contextual}
At time $t$:
\begin{enumerate}
    \item Observe context $\mathbf{x}_t \in \R^d$
    \item Choose arm $A_t \in [K]$  
    \item Receive reward $Y_t = \mathbf{x}_t^T \boldsymbol{\beta}_{A_t} + \eta_t$
\end{enumerate}
where $\boldsymbol{\beta}_i \in \R^d$ are unknown parameters and $\eta_t$ is noise.
\end{definition}

\begin{definition}[LinUCB Algorithm]
\label{def:linucb}
LinUCB maintains estimates $\hat{\boldsymbol{\beta}}_{i,t}$ and confidence matrices $\mathbf{A}_{i,t}$, then selects:
\begin{equation}
A_t = \arg\max_{i \in [K]} \left\{\mathbf{x}_t^T \hat{\boldsymbol{\beta}}_{i,t} + \alpha \sqrt{\mathbf{x}_t^T \mathbf{A}_{i,t}^{-1} \mathbf{x}_t}\right\}
\end{equation}
\end{definition}

\begin{theorem}[LinUCB Regret Bound]
\label{thm:linucb_regret_bound}
For bounded contexts $\|\mathbf{x}_t\| \leq 1$, parameters $\|\boldsymbol{\beta}_i\| \leq S$, and appropriate choice of $\alpha$, LinUCB achieves regret:
\begin{equation}
R_T = O\left(d\sqrt{T \log T}\right)
\end{equation}
\end{theorem}

\section{Learning in Code Intelligence Systems}

\subsection{Tool Selection as Bandit Problem}

In \ClaudeCode{} systems, tool selection naturally fits the contextual bandit framework:

\begin{definition}[Code Intelligence Contextual Bandit]
\label{def:code_bandit}
\begin{itemize}
    \item Context $\mathbf{x}_t$: task description, codebase features, system state
    \item Arms: available tools $\{$Read, Write, Bash, Grep, etc.$\}$
    \item Reward: task completion quality/efficiency metrics
    \item Goal: maximize cumulative code intelligence performance
\end{itemize}
\end{definition}

\subsection{Sample Complexity for Code Understanding}

\begin{theorem}[Code Understanding Sample Complexity]
\label{thm:code_sample_complexity}
For hypothesis class $\mathcal{H}$ representing code understanding functions with VC dimension $d$, achieving $(\epsilon, \delta)$-PAC learning requires:
\begin{equation}
m = O\left(\frac{d \log(1/\epsilon) + \log(1/\delta)}{\epsilon^2}\right)
\end{equation}
training examples from the code distribution.
\end{theorem}

\begin{corollary}[Implications for \ClaudeCode{}]
\label{cor:claude_implications}
The sample complexity scales polynomially with the complexity of the code understanding task, suggesting that efficient learning is possible with sufficient training data on diverse codebases.
\end{corollary}

\subsection{Online Learning for Dynamic Environments}

Code intelligence systems operate in dynamic environments where:
\begin{itemize}
    \item New programming languages and frameworks emerge
    \item Coding patterns evolve over time
    \item User preferences and task distributions shift
\end{itemize}

\begin{theorem}[Adaptivity to Distribution Shift]
\label{thm:distribution_shift_adaptivity}
For gradually changing distributions with total variation distance $\epsilon_T$ from the initial distribution, an online learning algorithm with adaptive regularization achieves excess regret:
\begin{equation}
\text{ExcessRegret}_T = O(\sqrt{T \epsilon_T} + \sqrt{T \log K})
\end{equation}
compared to the best fixed policy.
\end{theorem}

This theoretical framework provides the foundation for understanding learning and adaptation in AI-powered code intelligence systems, with direct applications to the algorithms and performance guarantees developed in the main chapters.