\chapter{Mathematical Proofs}
\label{app:proofs}

% Set section numbering for Appendix A
\renewcommand{\thesection}{A.\arabic{section}}
\renewcommand{\thesubsection}{A.\arabic{section}.\arabic{subsection}}
\renewcommand{\thesubsubsection}{A.\arabic{section}.\arabic{subsection}.\arabic{subsubsection}}

This appendix contains detailed proofs of the main theorems presented in the book.

\section{Chapter 2 Proofs}

\subsection{Proof of Theorem \ref{thm:hierarchical_improvement}}

\begin{proof}[Proof of Hierarchical Policy Improvement]
We prove that hierarchical policy improvement maintains monotonic improvement in expected return.

Let $\pi_{H,i}$ denote the hierarchical policy using options $\{\mathcal{O}_1, \ldots, \mathcal{O}_i\}$ where each option $\mathcal{O}_j$ has been improved relative to baseline policy $\pi_0$.

\textbf{Step 1:} For any individual option $\mathcal{O}_i$, policy improvement ensures:
\begin{equation}
V^{\mathcal{O}_i}(s) \geq V^{\pi_0}(s) \quad \forall s \in I_{\mathcal{O}_i}
\end{equation}

\textbf{Step 2:} The meta-policy selects options greedily:
\begin{equation}
\pi_H(s) = \arg\max_{\mathcal{O}_i} V^{\mathcal{O}_i}(s)
\end{equation}

\textbf{Step 3:} By construction:
\begin{align}
V^{\pi_H}(s) &= \max_{\mathcal{O}_i} V^{\mathcal{O}_i}(s) \\
&\geq V^{\mathcal{O}_j}(s) \quad \forall j \\
&\geq V^{\pi_0}(s)
\end{align}

Therefore, $V^{\pi_H}(s) \geq V^{\pi_0}(s)$ for all states, completing the proof.
\end{proof}

\subsection{Proof of Theorem \ref{thm:pareto_set}}

\begin{proof}[Proof of Pareto Set Convexity]
We show that the Pareto optimal set forms a convex hull in objective space.

Let $\pi_1, \pi_2$ be two Pareto optimal policies with objective vectors $\mathbf{u}_1, \mathbf{u}_2$ respectively.

Consider the mixed policy $\pi_\lambda = \lambda \pi_1 + (1-\lambda) \pi_2$ for $\lambda \in [0,1]$.

The expected objectives under $\pi_\lambda$ are:
\begin{equation}
\E[\mathbf{U}^{\pi_\lambda}] = \lambda \E[\mathbf{U}^{\pi_1}] + (1-\lambda) \E[\mathbf{U}^{\pi_2}] = \lambda \mathbf{u}_1 + (1-\lambda) \mathbf{u}_2
\end{equation}

This follows from the linearity of expectation and shows that the objective vector lies on the line segment connecting $\mathbf{u}_1$ and $\mathbf{u}_2$.

Since this holds for all Pareto optimal policies, the Pareto set is convex.
\end{proof}

\section{Chapter 3 Proofs}

\subsection{Proof of Theorem \ref{thm:linucb_regret}}

\begin{proof}[Proof of LinUCB Regret Bound]
The proof follows the standard LinUCB analysis with modifications for the multi-tool setting.

\textbf{Step 1: Confidence Bounds}
With probability at least $1-\delta$, for all tools $t$ and times $\tau$:
\begin{equation}
|x_\tau^T(\hat{\beta}_\tau(t) - \beta^*(t))| \leq \alpha \sqrt{x_\tau^T A_\tau(t)^{-1} x_\tau}
\end{equation}

\textbf{Step 2: Regret Decomposition}
The instantaneous regret at time $t$ is:
\begin{align}
r_t &= x_t^T \beta^*(t^*) - x_t^T \beta^*(t_t) \\
&\leq x_t^T \hat{\beta}_t(t^*) + \alpha\sqrt{x_t^T A_t(t^*)^{-1} x_t} - x_t^T \hat{\beta}_t(t_t) + \alpha\sqrt{x_t^T A_t(t_t)^{-1} x_t} \\
&\leq 2\alpha\sqrt{x_t^T A_t(t_t)^{-1} x_t}
\end{align}

where the last inequality uses the optimality of LinUCB's selection.

\textbf{Step 3: Summation}
The cumulative regret is:
\begin{align}
\sum_{t=1}^T r_t &\leq 2\alpha \sum_{t=1}^T \sqrt{x_t^T A_t(t_t)^{-1} x_t}
\end{align}

\textbf{Step 4: Elliptic Potential Lemma}
Applying the elliptic potential lemma:
\begin{equation}
\sum_{t=1}^T \sqrt{x_t^T A_t(a_t)^{-1} x_t} \leq \sqrt{T d \log\left(\frac{1 + T/(d\lambda)}{\delta}\right)}
\end{equation}

This completes the proof.
\end{proof}

\section{Chapter 4 Proofs}

\subsection{Proof of Theorem \ref{thm:greedy_optimal}}

\begin{proof}[Proof of Greedy Context Selection Optimality]
We prove both the approximation ratio and the hardness result.

\textbf{Part 1: Approximation Ratio}
Let $S^*$ denote the optimal solution and $S_k$ the greedy solution after $k$ steps.

At step $i$, the greedy algorithm selects:
\begin{equation}
v_i = \arg\max_{v \notin S_{i-1}} [f(S_{i-1} \cup \{v\}) - f(S_{i-1})]
\end{equation}

By submodularity, this marginal gain is at least:
\begin{equation}
f(S_{i-1} \cup \{v_i\}) - f(S_{i-1}) \geq \frac{f(S^*) - f(S_{i-1})}{|S^*|} \geq \frac{f(S^*) - f(S_{i-1})}{k}
\end{equation}

This gives us the recurrence:
\begin{equation}
f(S_i) - f(S_{i-1}) \geq \frac{f(S^*) - f(S_{i-1})}{k}
\end{equation}

Rearranging:
\begin{equation}
f(S^*) - f(S_i) \leq \left(1 - \frac{1}{k}\right)(f(S^*) - f(S_{i-1}))
\end{equation}

Unrolling this recurrence:
\begin{equation}
f(S^*) - f(S_k) \leq \left(1 - \frac{1}{k}\right)^k f(S^*) \leq \frac{1}{e} f(S^*)
\end{equation}

Therefore: $f(S_k) \geq (1 - 1/e) f(S^*)$.

\textbf{Part 2: Hardness}
The hardness follows from the maximum coverage problem. Given a maximum coverage instance, we can construct a submodular function such that solving it optimally would solve maximum coverage optimally. Since maximum coverage cannot be approximated better than $(1-1/e)$ unless P = NP, the same holds for submodular maximization.
\end{proof}

\section{Information-Theoretic Proofs}

\subsection{Mutual Information Properties}

\begin{lemma}[Submodularity of Mutual Information]
\label{lem:mi_submodular}
For random variables $X, Y, Z$ with $Z = \{Z_1, \ldots, Z_n\}$, the function $f(S) = I(X; Z_S | Y)$ where $Z_S = \{Z_i : i \in S\}$ is submodular in $S$.
\end{lemma}

\begin{proof}
For $S \subseteq T$ and $j \notin T$:
\begin{align}
&f(S \cup \{j\}) - f(S) - [f(T \cup \{j\}) - f(T)] \\
&= I(X; Z_j | Y, Z_S) - I(X; Z_j | Y, Z_T) \\
&= H(Z_j | Y, Z_S) - H(Z_j | X, Y, Z_S) - [H(Z_j | Y, Z_T) - H(Z_j | X, Y, Z_T)]
\end{align}

Since $Z_S \subseteq Z_T$, by the data processing inequality:
\begin{equation}
H(Z_j | Y, Z_S) \geq H(Z_j | Y, Z_T)
\end{equation}

And by conditional independence:
\begin{equation}
H(Z_j | X, Y, Z_S) = H(Z_j | X, Y, Z_T)
\end{equation}

Therefore, $f(S \cup \{j\}) - f(S) \geq f(T \cup \{j\}) - f(T)$, proving submodularity.
\end{proof}

\section{Queueing Theory Proofs}

\subsection{Proof of Stability Conditions}

\begin{proof}[Proof of System Stability]
Consider the queueing network with arrival rate $\lambda$ and service rates $\mu_i$ for $i = 1, \ldots, k$.

\textbf{Necessary Condition:}
If $\rho = \lambda/\mu_{\text{eff}} \geq 1$, then the arrival rate exceeds the service capacity. By Little's Law, the expected queue length would be infinite, contradicting stability.

\textbf{Sufficient Condition:}
If $\rho < 1$, we construct a Lyapunov function $L(X) = \sum_i X_i$ where $X_i$ is the queue length at station $i$.

The drift of this function is:
\begin{equation}
\E[\Delta L | X] = \lambda - \sum_i \mu_i \mathbf{1}_{X_i > 0}
\end{equation}

When the system is non-empty:
\begin{equation}
\E[\Delta L | X] \leq \lambda - \mu_{\text{eff}} < 0
\end{equation}

This negative drift ensures stability by Foster's criterion.
\end{proof}

\section{Concentration Inequality Proofs}

\subsection{Proof of Performance Concentration}

\begin{proof}[Proof of Theorem \ref{thm:performance_concentration}]
Let $U_1, \ldots, U_T$ be the individual utility realizations, assumed to be independent and bounded in $[U_{\min}, U_{\max}]$.

The empirical average is $\hat{U}_T = \frac{1}{T}\sum_{t=1}^T U_t$.

By Hoeffding's inequality for bounded random variables:
\begin{equation}
P(|\hat{U}_T - \E[U]| \geq \epsilon) \leq 2\exp\left(-\frac{2T\epsilon^2}{(U_{\max} - U_{\min})^2}\right)
\end{equation}

This follows directly from the standard Hoeffding bound applied to the sum of bounded random variables.
\end{proof}

\section{Robustness Proofs}

\subsection{Proof of Distribution Robustness}

\begin{proof}[Proof of Theorem \ref{thm:distribution_robustness}]
Let $P_{\text{old}}$ and $P_{\text{new}}$ be the old and new context distributions with $\|P_{\text{new}} - P_{\text{old}}\|_{TV} \leq \epsilon$.

The performance difference is:
\begin{align}
|\E[U_{\text{new}}] - \E[U_{\text{old}}]| &= \left|\int U(x) (dP_{\text{new}} - dP_{\text{old}})\right| \\
&\leq \int |U(x)| |dP_{\text{new}} - dP_{\text{old}}| \\
&\leq \sup_x |U(x)| \cdot \|P_{\text{new}} - P_{\text{old}}\|_{TV}
\end{align}

If $U$ is $L$-Lipschitz, then $\sup_x |U(x)| \leq L \cdot \text{diam}(\mathcal{X})$, giving the bound:
\begin{equation}
|\E[U_{\text{new}}] - \E[U_{\text{old}}]| \leq L \cdot \epsilon
\end{equation}
\end{proof}

\section{Lower Bound Proofs}

\subsection{Proof of Information-Theoretic Lower Bounds}

\begin{proof}[Proof of Context Selection Lower Bound]
We use Yao's minimax principle. Consider a hard distribution over submodular functions where:
- Each function $f$ is determined by a hidden subset $H \subseteq [n]$
- $f(S) = |S \cap H|$ (coverage function)
- $H$ is uniformly random with $|H| = k$

To achieve $(1-1/e-\epsilon)$ approximation, an algorithm must distinguish between sets $S$ with $|S \cap H| \geq (1-1/e-\epsilon)k$ and those with $|S \cap H| < (1-1/e)k$.

By information theory, this requires $\Omega(k/\epsilon)$ evaluations. Since $k = \Theta(n)$ for hard instances, we get the $\Omega(n/\epsilon)$ lower bound.
\end{proof}

\section{Complexity Theory Proofs}

\subsection{Proof of PSPACE-Hardness}

\begin{proof}[Proof of Theorem \ref{thm:policy_hardness}]
We reduce from the PSPACE-complete problem QSAT (Quantified Boolean Satisfiability).

Given a QSAT instance $\exists x_1 \forall x_2 \exists x_3 \ldots Q x_n \phi(x_1, \ldots, x_n)$, we construct a POMDP where:

- States encode partial assignments to variables
- Actions correspond to setting variable values  
- Observations reveal whether constraints are satisfied
- The optimal policy exists iff the QSAT instance is satisfiable

The construction ensures that finding the optimal policy requires solving the QSAT instance, proving PSPACE-hardness.
\end{proof}

\section{Statistical Learning Theory Proofs}

\subsection{PAC Learning Bounds}

\begin{proof}[Proof of PAC Learning Bounds for Tool Effectiveness]
Consider the hypothesis class $\mathcal{H}$ of linear functions with bounded parameters.

By the generalization bound for linear functions:
\begin{equation}
P\left(\sup_{h \in \mathcal{H}} |R(h) - \hat{R}(h)| \geq \epsilon\right) \leq 2|\mathcal{H}|_m \exp(-2m\epsilon^2)
\end{equation}

where $|\mathcal{H}|_m$ is the $m$-th shatter coefficient.

For $d$-dimensional linear functions, $|\mathcal{H}|_m \leq (em/d)^d$.

Setting the right-hand side equal to $\delta$ and solving for $m$ gives:
\begin{equation}
m \geq \frac{d \log(em/d) + \log(2/\delta)}{2\epsilon^2}
\end{equation}

For large $m$, this simplifies to the stated bound.
\end{proof}