\chapter{Optimization Theory}
\label{app:optimization}

% Set section numbering for Appendix E
\renewcommand{\thesection}{E.\arabic{section}}
\renewcommand{\thesubsection}{E.\arabic{section}.\arabic{subsection}}
\renewcommand{\thesubsubsection}{E.\arabic{section}.\arabic{subsection}.\arabic{subsubsection}}

This appendix presents fundamental concepts from optimization theory that are essential for understanding the algorithmic designs and performance analysis in \ClaudeCode{} systems. We cover convex optimization, duality theory, gradient-based methods, submodular optimization, and multi-objective optimization, with applications to code intelligence and tool selection problems.

\section{Convex Optimization Basics}

\subsection{Convex Optimization Problems}

\begin{definition}[Convex Optimization Problem]
\label{def:convex_optimization}
A convex optimization problem has the form:
\begin{align}
\text{minimize} \quad & f(\mathbf{x}) \\
\text{subject to} \quad & g_i(\mathbf{x}) \leq 0, \quad i = 1, \ldots, m \\
& h_j(\mathbf{x}) = 0, \quad j = 1, \ldots, p
\end{align}
where $f, g_1, \ldots, g_m$ are convex functions and $h_1, \ldots, h_p$ are affine functions.
\end{definition}

\begin{theorem}[Global Optimality for Convex Problems]
\label{thm:convex_global_optimal}
For convex optimization problems, any local minimum is a global minimum. If the objective function is strictly convex, the global minimum is unique.
\end{theorem}

\begin{definition}[Feasible Set and Optimal Value]
\label{def:feasible_optimal}
\begin{align}
\text{Feasible set: } \quad \mathcal{F} &= \{\mathbf{x} : g_i(\mathbf{x}) \leq 0, h_j(\mathbf{x}) = 0\} \\
\text{Optimal value: } \quad f^* &= \inf_{\mathbf{x} \in \mathcal{F}} f(\mathbf{x})
\end{align}
\end{definition}

\subsection{First-Order Conditions}

\begin{theorem}[First-Order Necessary Conditions]
\label{thm:first_order_necessary}
If $\mathbf{x}^*$ is optimal for a differentiable convex problem, then there exist $\boldsymbol{\lambda} \geq 0$ and $\boldsymbol{\nu}$ such that:
\begin{align}
\nabla f(\mathbf{x}^*) + \sum_{i=1}^m \lambda_i \nabla g_i(\mathbf{x}^*) + \sum_{j=1}^p \nu_j \nabla h_j(\mathbf{x}^*) &= 0 \\
g_i(\mathbf{x}^*) &\leq 0, \quad i = 1, \ldots, m \\
h_j(\mathbf{x}^*) &= 0, \quad j = 1, \ldots, p \\
\lambda_i &\geq 0, \quad i = 1, \ldots, m \\
\lambda_i g_i(\mathbf{x}^*) &= 0, \quad i = 1, \ldots, m \quad \text{(complementary slackness)}
\end{align}
\end{theorem}

\section{KKT Conditions and Duality}

\subsection{Karush-Kuhn-Tucker Conditions}

\begin{definition}[Regular Point]
\label{def:regular_point}
Point $\mathbf{x}$ is regular if the gradients of active inequality constraints and equality constraints are linearly independent:
\begin{equation}
\text{rank}\begin{bmatrix}
\nabla g_i(\mathbf{x})^T \\
\nabla h_j(\mathbf{x})^T
\end{bmatrix} = \text{number of active constraints}
\end{equation}
\end{definition}

\begin{theorem}[KKT Conditions]
\label{thm:kkt_conditions}
For optimization problem with differentiable functions, if $\mathbf{x}^*$ is optimal and regular, then there exist KKT multipliers $\boldsymbol{\lambda}^*, \boldsymbol{\nu}^*$ satisfying:
\begin{align}
\nabla_{\mathbf{x}} \mathcal{L}(\mathbf{x}^*, \boldsymbol{\lambda}^*, \boldsymbol{\nu}^*) &= 0 \quad \text{(stationarity)} \\
g_i(\mathbf{x}^*) &\leq 0 \quad \text{(primal feasibility)} \\
h_j(\mathbf{x}^*) &= 0 \quad \text{(primal feasibility)} \\
\lambda_i^* &\geq 0 \quad \text{(dual feasibility)} \\
\lambda_i^* g_i(\mathbf{x}^*) &= 0 \quad \text{(complementary slackness)}
\end{align}
where $\mathcal{L}(\mathbf{x}, \boldsymbol{\lambda}, \boldsymbol{\nu}) = f(\mathbf{x}) + \sum_i \lambda_i g_i(\mathbf{x}) + \sum_j \nu_j h_j(\mathbf{x})$.
\end{theorem}

\subsection{Lagrangian Duality}

\begin{definition}[Lagrangian and Dual Function]
\label{def:lagrangian_dual}
\begin{align}
\text{Lagrangian: } \quad \mathcal{L}(\mathbf{x}, \boldsymbol{\lambda}, \boldsymbol{\nu}) &= f(\mathbf{x}) + \sum_{i=1}^m \lambda_i g_i(\mathbf{x}) + \sum_{j=1}^p \nu_j h_j(\mathbf{x}) \\
\text{Dual function: } \quad \mathcal{G}(\boldsymbol{\lambda}, \boldsymbol{\nu}) &= \inf_{\mathbf{x}} \mathcal{L}(\mathbf{x}, \boldsymbol{\lambda}, \boldsymbol{\nu})
\end{align}
\end{definition}

\begin{definition}[Dual Problem]
\label{def:dual_problem}
\begin{align}
\text{maximize} \quad & \mathcal{G}(\boldsymbol{\lambda}, \boldsymbol{\nu}) \\
\text{subject to} \quad & \boldsymbol{\lambda} \geq 0
\end{align}
\end{definition}

\begin{theorem}[Weak Duality]
\label{thm:weak_duality}
For any $\boldsymbol{\lambda} \geq 0$ and $\boldsymbol{\nu}$:
\begin{equation}
\mathcal{G}(\boldsymbol{\lambda}, \boldsymbol{\nu}) \leq f^*
\end{equation}
\end{theorem}

\begin{theorem}[Strong Duality]
\label{thm:strong_duality}
If the primal problem is convex and satisfies Slater's constraint qualification, then:
\begin{equation}
f^* = \max_{\boldsymbol{\lambda} \geq 0, \boldsymbol{\nu}} \mathcal{G}(\boldsymbol{\lambda}, \boldsymbol{\nu})
\end{equation}
\end{theorem}

\begin{definition}[Slater's Condition]
\label{def:slater_condition}
There exists $\mathbf{x} \in \text{relint}(\text{dom}(f))$ such that:
\begin{align}
g_i(\mathbf{x}) &< 0 \quad \text{for } i = 1, \ldots, m \\
h_j(\mathbf{x}) &= 0 \quad \text{for } j = 1, \ldots, p
\end{align}
\end{definition}

\section{Gradient Descent Methods}

\subsection{Basic Gradient Descent}

\begin{definition}[Gradient Descent Algorithm]
\label{def:gradient_descent}
\begin{equation}
\mathbf{x}_{k+1} = \mathbf{x}_k - \alpha_k \nabla f(\mathbf{x}_k)
\end{equation}
where $\alpha_k > 0$ is the step size at iteration $k$.
\end{definition}

\begin{theorem}[Convergence of Gradient Descent]
\label{thm:gd_convergence}
For $L$-smooth convex function $f$, gradient descent with constant step size $\alpha \leq 1/L$ satisfies:
\begin{equation}
f(\mathbf{x}_k) - f^* \leq \frac{\|\mathbf{x}_0 - \mathbf{x}^*\|^2}{2\alpha k}
\end{equation}
\end{theorem}

\begin{theorem}[Convergence for Strongly Convex Functions]
\label{thm:gd_strong_convex}
For $\mu$-strongly convex, $L$-smooth function, gradient descent achieves linear convergence:
\begin{equation}
\|\mathbf{x}_k - \mathbf{x}^*\|^2 \leq \left(1 - \frac{2\mu L}{\mu + L}\right)^k \|\mathbf{x}_0 - \mathbf{x}^*\|^2
\end{equation}
\end{theorem}

\subsection{Accelerated Gradient Methods}

\begin{definition}[Nesterov's Accelerated Gradient]
\label{def:nesterov}
\begin{align}
\mathbf{y}_k &= \mathbf{x}_k + \beta_k(\mathbf{x}_k - \mathbf{x}_{k-1}) \\
\mathbf{x}_{k+1} &= \mathbf{y}_k - \alpha_k \nabla f(\mathbf{y}_k)
\end{align}
where $\beta_k = \frac{t_k - 1}{t_{k+1}}$ and $t_{k+1} = \frac{1 + \sqrt{1 + 4t_k^2}}{2}$.
\end{definition}

\begin{theorem}[Accelerated Gradient Convergence]
\label{thm:accelerated_convergence}
For convex $L$-smooth functions, Nesterov's method achieves:
\begin{equation}
f(\mathbf{x}_k) - f^* \leq \frac{2L\|\mathbf{x}_0 - \mathbf{x}^*\|^2}{(k+1)^2}
\end{equation}
\end{theorem}

\subsection{Stochastic Gradient Descent}

\begin{definition}[Stochastic Gradient Descent]
\label{def:sgd}
For objective $f(\mathbf{x}) = \frac{1}{n}\sum_{i=1}^n f_i(\mathbf{x})$:
\begin{equation}
\mathbf{x}_{k+1} = \mathbf{x}_k - \alpha_k \nabla f_{i_k}(\mathbf{x}_k)
\end{equation}
where $i_k$ is chosen uniformly at random from $\{1, \ldots, n\}$.
\end{definition}

\begin{theorem}[SGD Convergence]
\label{thm:sgd_convergence}
For convex functions with bounded stochastic gradients $\E[\|\nabla f_{i}(\mathbf{x})\|^2] \leq G^2$, SGD with step sizes $\alpha_k = \frac{1}{\sqrt{k}}$ achieves:
\begin{equation}
\E[f(\bar{\mathbf{x}}_k) - f^*] \leq \frac{G\|\mathbf{x}_0 - \mathbf{x}^*\|}{\sqrt{k}}
\end{equation}
where $\bar{\mathbf{x}}_k = \frac{1}{k}\sum_{j=1}^k \mathbf{x}_j$.
\end{theorem}

\section{Submodular Function Theory}

\subsection{Submodular Functions}

\begin{definition}[Submodular Function]
\label{def:submodular_function}
Function $f: 2^V \rightarrow \R$ is submodular if for all $A \subseteq B \subseteq V$ and $v \in V \setminus B$:
\begin{equation}
f(A \cup \{v\}) - f(A) \geq f(B \cup \{v\}) - f(B)
\end{equation}
(diminishing marginal returns property)
\end{definition}

\begin{definition}[Marginal Gain]
\label{def:marginal_gain}
The marginal gain of element $v$ with respect to set $S$ is:
\begin{equation}
f_S(v) = f(S \cup \{v\}) - f(S)
\end{equation}
\end{definition}

\begin{theorem}[Equivalent Characterizations of Submodularity]
\label{thm:submodular_characterizations}
For function $f: 2^V \rightarrow \R$, the following are equivalent:
\begin{enumerate}
    \item $f$ is submodular (diminishing returns)
    \item For all $A, B \subseteq V$: $f(A) + f(B) \geq f(A \cup B) + f(A \cap B)$
    \item The discrete derivative is decreasing along all chains
\end{enumerate}
\end{theorem}

\subsection{Submodular Maximization}

\begin{theorem}[Greedy Algorithm for Monotone Submodular Maximization]
\label{thm:greedy_submodular}
For monotone submodular function $f$ and cardinality constraint $|S| \leq k$, the greedy algorithm achieves:
\begin{equation}
f(S_{\text{greedy}}) \geq \left(1 - \frac{1}{e}\right) f(S^*)
\end{equation}
where $S^*$ is the optimal solution.
\end{theorem}

\begin{proof}[Proof Sketch]
Let $S_i$ be the greedy solution after $i$ steps and $O = \{o_1, \ldots, o_k\}$ be optimal. At step $i$, the greedy choice $v_i$ satisfies:
\begin{equation}
f(S_{i-1} \cup \{v_i\}) - f(S_{i-1}) \geq \frac{1}{k} \sum_{j=1}^k [f(S_{i-1} \cup \{o_j\}) - f(S_{i-1})]
\end{equation}
By submodularity and telescoping, this leads to the $(1-1/e)$ approximation.
\end{proof}

\begin{theorem}[Hardness of Submodular Maximization]
\label{thm:submodular_hardness}
For monotone submodular maximization under cardinality constraints, no polynomial-time algorithm can achieve approximation ratio better than $(1-1/e)$ unless P = NP.
\end{theorem}

\subsection{Non-Monotone Submodular Maximization}

\begin{definition}[Continuous Greedy Algorithm]
\label{def:continuous_greedy}
Solve the continuous relaxation:
\begin{align}
\text{maximize} \quad & F(\mathbf{x}) = \E[f(R(\mathbf{x}))] \\
\text{subject to} \quad & \mathbf{x} \in P
\end{align}
where $R(\mathbf{x})$ includes element $i$ independently with probability $x_i$, and $P$ is a polytope.
\end{definition}

\begin{theorem}[Continuous Greedy Performance]
\label{thm:continuous_greedy}
The continuous greedy algorithm followed by pipage rounding achieves $(1-1/e)$-approximation for monotone submodular maximization over any matroid constraint.
\end{theorem}

\section{Multi-Objective Optimization}

\subsection{Pareto Optimality}

\begin{definition}[Pareto Dominance]
\label{def:pareto_dominance}
For multi-objective problem $\min \mathbf{f}(\mathbf{x}) = (f_1(\mathbf{x}), \ldots, f_m(\mathbf{x}))$, point $\mathbf{x}^1$ dominates $\mathbf{x}^2$ (denoted $\mathbf{x}^1 \prec \mathbf{x}^2$) if:
\begin{align}
f_i(\mathbf{x}^1) &\leq f_i(\mathbf{x}^2) \quad \text{for all } i \\
f_j(\mathbf{x}^1) &< f_j(\mathbf{x}^2) \quad \text{for some } j
\end{align}
\end{definition}

\begin{definition}[Pareto Optimal Set]
\label{def:pareto_optimal}
The Pareto optimal set is:
\begin{equation}
\mathcal{P} = \{\mathbf{x} \in \mathcal{X} : \nexists \mathbf{x}' \in \mathcal{X} \text{ such that } \mathbf{x}' \prec \mathbf{x}\}
\end{equation}
\end{definition}

\begin{definition}[Pareto Front]
\label{def:pareto_front}
The Pareto front is the image of the Pareto set:
\begin{equation}
\mathcal{F} = \{\mathbf{f}(\mathbf{x}) : \mathbf{x} \in \mathcal{P}\}
\end{equation}
\end{definition}

\subsection{Scalarization Methods}

\begin{definition}[Weighted Sum Method]
\label{def:weighted_sum}
\begin{equation}
\min_{\mathbf{x}} \sum_{i=1}^m w_i f_i(\mathbf{x})
\end{equation}
where $w_i > 0$ and $\sum_i w_i = 1$.
\end{definition}

\begin{theorem}[Weighted Sum Pareto Optimality]
\label{thm:weighted_sum_pareto}
If $\mathbf{x}^*$ is optimal for the weighted sum problem with $w_i > 0$, then $\mathbf{x}^*$ is Pareto optimal. The converse holds if the Pareto front is convex.
\end{theorem}

\begin{definition}[$\epsilon$-Constraint Method]
\label{def:epsilon_constraint}
\begin{align}
\text{minimize} \quad & f_j(\mathbf{x}) \\
\text{subject to} \quad & f_i(\mathbf{x}) \leq \epsilon_i, \quad i \neq j \\
& \mathbf{x} \in \mathcal{X}
\end{align}
\end{definition}

\begin{theorem}[Achievement Scalarization]
\label{thm:achievement_scalarization}
The achievement scalarization:
\begin{equation}
\min_{\mathbf{x}} \max_{i} w_i (f_i(\mathbf{x}) - z_i^*)
\end{equation}
where $\mathbf{z}^*$ is a utopia point, always finds Pareto optimal solutions.
\end{theorem}

\section{Applications to Code Intelligence}

\subsection{Tool Selection as Multi-Objective Optimization}

In \ClaudeCode{} systems, tool selection involves multiple competing objectives:

\begin{definition}[Code Intelligence Multi-Objective Problem]
\label{def:code_multi_objective}
\begin{align}
\text{maximize} \quad & (f_{\text{accuracy}}(\mathbf{x}), f_{\text{efficiency}}(\mathbf{x}), f_{\text{safety}}(\mathbf{x})) \\
\text{subject to} \quad & \mathbf{x} \in \mathcal{T} \times \Theta
\end{align}
where $\mathcal{T}$ is the tool set and $\Theta$ is the parameter space.
\end{definition}

\subsection{Context Selection as Submodular Optimization}

Context selection exhibits submodular properties due to diminishing returns in information gain:

\begin{theorem}[Context Selection Submodularity]
\label{thm:context_submodularity}
The mutual information function $f(S) = I(Y; X_S | Z)$ is submodular in $S$, where $Y$ is the target variable, $X_S$ are selected context variables, and $Z$ are fixed variables.
\end{theorem}

\begin{corollary}[Greedy Context Selection]
\label{cor:greedy_context}
The greedy algorithm achieves $(1-1/e)$-approximation for maximizing mutual information subject to cardinality constraints.
\end{corollary}

\subsection{Online Optimization for Dynamic Tool Selection}

\begin{definition}[Online Convex Optimization for Tool Selection]
\label{def:online_tool_selection}
At each time $t$:
\begin{enumerate}
    \item Choose tool parameter vector $\mathbf{x}_t$
    \item Observe convex loss function $\ell_t$
    \item Suffer loss $\ell_t(\mathbf{x}_t)$
\end{enumerate}
\end{definition}

\begin{theorem}[Online Tool Selection Regret]
\label{thm:online_tool_regret}
Using online gradient descent with appropriate step sizes, the regret for tool selection satisfies:
\begin{equation}
\text{Regret}_T = O(\sqrt{T \log |\mathcal{T}|})
\end{equation}
where $|\mathcal{T}|$ is the number of available tools.
\end{theorem}

\section{Advanced Topics}

\subsection{Non-Convex Optimization}

For neural network-based components in code intelligence:

\begin{definition}[Escape from Saddle Points]
\label{def:saddle_escape}
A point $\mathbf{x}$ is an $(\epsilon, \gamma)$-approximate local minimum if:
\begin{equation}
\|\nabla f(\mathbf{x})\| \leq \epsilon \quad \text{and} \quad \lambda_{\min}(\nabla^2 f(\mathbf{x})) \geq -\gamma
\end{equation}
\end{definition}

\begin{theorem}[Noisy Gradient Descent]
\label{thm:noisy_gd}
Gradient descent with appropriate noise injection finds an $(\epsilon, O(\sqrt{\epsilon}))$-approximate local minimum in $O(\epsilon^{-4})$ iterations for smooth functions.
\end{theorem}

\subsection{Distributionally Robust Optimization}

For handling uncertainty in code distributions:

\begin{definition}[Distributionally Robust Problem]
\label{def:dro}
\begin{equation}
\min_{\mathbf{x}} \max_{P \in \mathcal{U}} \E_{P}[\ell(\mathbf{x}, \xi)]
\end{equation}
where $\mathcal{U}$ is an uncertainty set of distributions.
\end{definition}

\begin{theorem}[Wasserstein DRO Tractability]
\label{thm:wasserstein_dro}
When $\mathcal{U} = \{P : W_p(P, P_0) \leq \rho\}$ is a Wasserstein ball, the DRO problem can be reformulated as a finite-dimensional convex optimization problem under certain conditions.
\end{theorem}

This optimization theory foundation provides the mathematical tools necessary for analyzing and designing efficient algorithms for code intelligence systems, particularly in the context of tool selection, context management, and performance optimization.