\chapter{Algorithm Specifications}
\label{app:algorithms}

% Set section numbering for Appendix B
\renewcommand{\thesection}{B.\arabic{section}}
\renewcommand{\thesubsection}{B.\arabic{section}.\arabic{subsection}}
\renewcommand{\thesubsubsection}{B.\arabic{section}.\arabic{subsection}.\arabic{subsubsection}}

This appendix provides detailed algorithmic specifications and pseudocode for the main algorithms discussed in the book.

\section{Online Learning Algorithms}

\subsection{LinUCB for Tool Selection}

\begin{algorithm}[H]
\caption{LinUCB for Multi-Tool Selection}
\label{alg:detailed_linucb}
\SetKwInOut{Input}{Input}
\SetKwInOut{Output}{Output}
\SetKwInOut{Parameters}{Parameters}

\Input{Context sequence $\{x_t\}_{t=1}^T$, tool set $\mathcal{T}$, parameter spaces $\{\Theta_i\}$}
\Parameters{Confidence parameter $\alpha > 0$, regularization $\lambda > 0$}
\Output{Tool selections and parameter estimates}

\textbf{Initialize:}
\For{each tool-parameter pair $(i, \theta) \in \mathcal{T} \times \Theta$}{
    $A_{i,\theta} \leftarrow \lambda I_d$ (regularized identity matrix)\;
    $b_{i,\theta} \leftarrow \mathbf{0}_d$ (zero vector)\;
}

\For{$t = 1, 2, \ldots, T$}{
    Observe context $x_t$\;
    
    \For{each tool-parameter pair $(i, \theta)$}{
        Compute parameter estimate: $\hat{\beta}_{i,\theta} \leftarrow A_{i,\theta}^{-1} b_{i,\theta}$\;
        Compute confidence radius: $\text{rad}_{i,\theta} \leftarrow \alpha \sqrt{x_t^T A_{i,\theta}^{-1} x_t}$\;
        Compute UCB: $\text{UCB}_{i,\theta} \leftarrow x_t^T \hat{\beta}_{i,\theta} + \text{rad}_{i,\theta}$\;
    }
    
    Select: $(i_t, \theta_t) \leftarrow \arg\max_{(i,\theta)} \text{UCB}_{i,\theta}$\;
    
    Execute tool $i_t$ with parameters $\theta_t$\;
    Observe reward $r_t$\;
    
    Update statistics:
    $A_{i_t,\theta_t} \leftarrow A_{i_t,\theta_t} + x_t x_t^T$\;
    $b_{i_t,\theta_t} \leftarrow b_{i_t,\theta_t} + x_t r_t$\;
}
\end{algorithm}

\subsection{Thompson Sampling with Gaussian Priors}

\begin{algorithm}[H]
\caption{Thompson Sampling for Tool Selection}
\label{alg:detailed_thompson}
\SetKwInOut{Input}{Input}
\SetKwInOut{Output}{Output}
\SetKwInOut{Parameters}{Parameters}

\Input{Context sequence, tool set, parameter spaces}
\Parameters{Prior precision $\lambda_0$, noise variance $\sigma^2$}
\Output{Tool selections and posterior distributions}

\textbf{Initialize:}
\For{each tool-parameter pair $(i, \theta)$}{
    $\Sigma_{i,\theta} \leftarrow \lambda_0^{-1} I_d$\;
    $\mu_{i,\theta} \leftarrow \mathbf{0}_d$\;
}

\For{$t = 1, 2, \ldots, T$}{
    Observe context $x_t$\;
    
    \For{each tool-parameter pair $(i, \theta)$}{
        Sample: $\tilde{\beta}_{i,\theta} \sim \mathcal{N}(\mu_{i,\theta}, \Sigma_{i,\theta})$\;
        Compute expected reward: $\hat{r}_{i,\theta} \leftarrow x_t^T \tilde{\beta}_{i,\theta}$\;
    }
    
    Select: $(i_t, \theta_t) \leftarrow \arg\max_{(i,\theta)} \hat{r}_{i,\theta}$\;
    
    Execute tool and observe reward $r_t$\;
    
    Update posterior:
    $\Sigma_{i_t,\theta_t}^{-1} \leftarrow \Sigma_{i_t,\theta_t}^{-1} + \sigma^{-2} x_t x_t^T$\;
    $\mu_{i_t,\theta_t} \leftarrow \Sigma_{i_t,\theta_t}(\Sigma_{i_t,\theta_t}^{-1} \mu_{i_t,\theta_t} + \sigma^{-2} x_t r_t)$\;
}
\end{algorithm}

\section{Context Selection Algorithms}

\subsection{Greedy Submodular Maximization}

\begin{algorithm}[H]
\caption{Greedy Algorithm for Context Selection}
\label{alg:detailed_greedy}
\SetKwInOut{Input}{Input}
\SetKwInOut{Output}{Output}

\Input{Candidate set $V = \{v_1, \ldots, v_n\}$, budget $B$, utility function $f$}
\Output{Selected context set $S \subseteq V$}

Initialize: $S \leftarrow \emptyset$, remaining budget $b \leftarrow B$\;

\While{$b > 0$ and $V \setminus S \neq \emptyset$}{
    $\text{best\_gain} \leftarrow 0$\;
    $\text{best\_element} \leftarrow \text{null}$\;
    
    \For{each $v \in V \setminus S$}{
        \If{$\text{size}(v) \leq b$}{
            $\text{gain} \leftarrow f(S \cup \{v\}) - f(S)$\;
            $\text{efficiency} \leftarrow \text{gain} / \text{size}(v)$\;
            
            \If{$\text{efficiency} > \text{best\_gain}$}{
                $\text{best\_gain} \leftarrow \text{efficiency}$\;
                $\text{best\_element} \leftarrow v$\;
            }
        }
    }
    
    \If{$\text{best\_element} \neq \text{null}$}{
        $S \leftarrow S \cup \{\text{best\_element}\}$\;
        $b \leftarrow b - \text{size}(\text{best\_element})$\;
    }
    \Else{
        break\;
    }
}

\Return{$S$}\;
\end{algorithm}

\subsection{Dynamic Context Update}

\begin{algorithm}[H]
\caption{Online Context Selection with Updates}
\label{alg:dynamic_context}
\SetKwInOut{Input}{Input}
\SetKwInOut{Output}{Output}

\Input{Current context $S_t$, new candidates $V_{\text{new}}$, budget $B$, utility $f$}
\Output{Updated context $S_{t+1}$}

$S \leftarrow S_t$\;
$\text{candidates} \leftarrow V_{\text{new}}$\;

\textbf{Phase 1: Add valuable new elements}
\For{each $v \in \text{candidates}$}{
    $\text{marginal\_value} \leftarrow f(S \cup \{v\}) - f(S)$\;
    \If{$\text{marginal\_value} > \text{threshold}$ and $\text{size}(S) + \text{size}(v) \leq B$}{
        $S \leftarrow S \cup \{v\}$\;
    }
}

\textbf{Phase 2: Remove least valuable existing elements}
\While{$\text{size}(S) > B$}{
    $\text{min\_loss} \leftarrow \infty$\;
    $\text{worst\_element} \leftarrow \text{null}$\;
    
    \For{each $u \in S$}{
        $\text{loss} \leftarrow f(S) - f(S \setminus \{u\})$\;
        \If{$\text{loss} < \text{min\_loss}$}{
            $\text{min\_loss} \leftarrow \text{loss}$\;
            $\text{worst\_element} \leftarrow u$\;
        }
    }
    
    $S \leftarrow S \setminus \{\text{worst\_element}\}$\;
}

\textbf{Phase 3: Local optimization}
$\text{improved} \leftarrow \text{true}$\;
\While{$\text{improved}$}{
    $\text{improved} \leftarrow \text{false}$\;
    
    \For{each $u \in S$ and $v \in V \setminus S$}{
        \If{$\text{size}(S) - \text{size}(u) + \text{size}(v) \leq B$}{
            $\text{current\_value} \leftarrow f(S)$\;
            $\text{new\_value} \leftarrow f((S \setminus \{u\}) \cup \{v\})$\;
            
            \If{$\text{new\_value} > \text{current\_value}$}{
                $S \leftarrow (S \setminus \{u\}) \cup \{v\}$\;
                $\text{improved} \leftarrow \text{true}$\;
                break\;
            }
        }
    }
}

\Return{$S$}\;
\end{algorithm}

\section{Code Exploration Algorithms}

\subsection{Priority-Based Graph Exploration}

\begin{algorithm}[H]
\caption{Priority-Based Code Exploration}
\label{alg:detailed_exploration}
\SetKwInOut{Input}{Input}
\SetKwInOut{Output}{Output}

\Input{Code dependency graph $G = (V, E)$, query $q$, exploration budget $B$}
\Output{Exploration sequence and discovered information}

Initialize priority queue $\mathcal{Q}$ with custom comparator\;
Initialize $\text{visited} \leftarrow \emptyset$, $\text{information} \leftarrow \emptyset$\;
Set $\text{remaining\_budget} \leftarrow B$\;

\textbf{Seed Selection:}
\For{each node $v \in V$}{
    $\text{relevance\_score}[v] \leftarrow \text{ComputeRelevance}(v, q)$\;
    \If{$\text{relevance\_score}[v] > \text{threshold}$}{
        $\mathcal{Q}.\text{push}(v, \text{relevance\_score}[v])$\;
    }
}

\textbf{Main Exploration Loop:}
\While{$\mathcal{Q}.\text{size}() > 0$ and $\text{remaining\_budget} > 0$}{
    $v \leftarrow \mathcal{Q}.\text{pop()}$\;
    
    \If{$v \in \text{visited}$ or $\text{ExplorationCost}(v) > \text{remaining\_budget}$}{
        continue\;
    }
    
    $\text{info} \leftarrow \text{ExploreNode}(v)$\;
    $\text{information} \leftarrow \text{information} \cup \text{info}$\;
    $\text{visited} \leftarrow \text{visited} \cup \{v\}$\;
    $\text{remaining\_budget} \leftarrow \text{remaining\_budget} - \text{ExplorationCost}(v)$\;
    
    \textbf{Add neighbors to queue:}
    \For{each neighbor $u$ of $v$}{
        \If{$u \notin \text{visited}$}{
            $\text{priority} \leftarrow \text{ComputePriority}(u, q, \text{information})$\;
            $\mathcal{Q}.\text{push}(u, \text{priority})$\;
        }
    }
    
    \textbf{Update priorities based on new information:}
    $\mathcal{Q}.\text{UpdatePriorities}(\text{info})$\;
}

\Return{$(\text{visited}, \text{information})$}\;

\vspace{0.5cm}

\textbf{Helper Functions:}

\Fn{ComputeRelevance(node $v$, query $q$)}{
    $\text{text\_similarity} \leftarrow \text{TextSimilarity}(v.\text{content}, q)$\;
    $\text{structural\_importance} \leftarrow \text{PageRank}(v) + \text{Centrality}(v)$\;
    $\text{type\_bonus} \leftarrow \text{GetTypeBonus}(v.\text{type}, q.\text{intent})$\;
    \Return{$\text{text\_similarity} + 0.3 \cdot \text{structural\_importance} + 0.2 \cdot \text{type\_bonus}$}\;
}

\Fn{ComputePriority(node $u$, query $q$, current\_info)}{
    $\text{base\_relevance} \leftarrow \text{ComputeRelevance}(u, q)$\;
    $\text{novelty} \leftarrow \text{ComputeNovelty}(u, \text{current\_info})$\;
    $\text{connectivity} \leftarrow \text{ConnectivityBonus}(u, \text{current\_info})$\;
    \Return{$\text{base\_relevance} + 0.4 \cdot \text{novelty} + 0.3 \cdot \text{connectivity}$}\;
}

\end{algorithm}

\section{Risk Assessment Algorithms}

\subsection{Risk-Aware Action Selection}

\begin{algorithm}[H]
\caption{Risk-Aware Multi-Objective Action Selection}
\label{alg:risk_aware}
\SetKwInOut{Input}{Input}
\SetKwInOut{Output}{Output}

\Input{State $s$, action set $\mathcal{A}$, risk models $\{R_i\}$, constraints}
\Output{Selected action with risk guarantees}

\For{each action $a \in \mathcal{A}$}{
    \textbf{Estimate individual risks:}
    \For{$i = 1$ to $|\text{risk\_types}|$}{
        $\hat{R}_i(s, a) \leftarrow \text{RiskModel}_i.\text{predict}(s, a)$\;
        $\text{confidence}_i(s, a) \leftarrow \text{RiskModel}_i.\text{confidence}(s, a)$\;
    }
    
    \textbf{Compute composite risk:}
    $\hat{R}(s, a) \leftarrow \text{AggregateRisks}(\{\hat{R}_i(s, a)\})$\;
    $\text{risk\_bound}(s, a) \leftarrow \hat{R}(s, a) + \text{ConfidenceRadius}(\text{confidence})$\;
    
    \textbf{Check safety constraints:}
    $\text{is\_safe}(a) \leftarrow (\text{risk\_bound}(s, a) \leq \text{risk\_threshold})$\;
    
    \textbf{Estimate utility:}
    $\hat{U}(s, a) \leftarrow \text{UtilityModel}.\text{predict}(s, a)$\;
}

\textbf{Filter safe actions:}
$\mathcal{A}_{\text{safe}} \leftarrow \{a \in \mathcal{A} : \text{is\_safe}(a)\}$\;

\If{$\mathcal{A}_{\text{safe}} = \emptyset$}{
    \textbf{Fallback to least risky action:}
    $a^* \leftarrow \arg\min_{a \in \mathcal{A}} \text{risk\_bound}(s, a)$\;
    \textbf{Log safety violation and request human intervention}\;
}
\Else{
    $a^* \leftarrow \arg\max_{a \in \mathcal{A}_{\text{safe}}} \hat{U}(s, a)$\;
}

\Return{$a^*$}\;

\vspace{0.5cm}

\textbf{Helper Functions:}

\Fn{AggregateRisks(risk\_vector $\mathbf{r}$)}{
    \textbf{Option 1: Independence assumption}
    \Return{$1 - \prod_i (1 - r_i)$}\;
    
    \textbf{Option 2: Weighted sum}
    \Return{$\sum_i w_i r_i$ where $\sum_i w_i = 1$}\;
    
    \textbf{Option 3: Maximum}
    \Return{$\max_i r_i$}\;
}

\Fn{ConfidenceRadius(confidence\_scores)}{
    $\text{min\_confidence} \leftarrow \min(\text{confidence\_scores})$\;
    \Return{$\text{calibration\_factor} / \sqrt{\text{min\_confidence}}$}\;
}

\end{algorithm}

\section{Concurrency Control Algorithms}

\subsection{Adaptive Concurrency Management}

\begin{algorithm}[H]
\caption{Adaptive Concurrency Control}
\label{alg:concurrency_detailed}
\SetKwInOut{Input}{Input}
\SetKwInOut{Output}{Output}

\Input{Current system state, performance history, resource constraints}
\Output{Optimal concurrency level and task assignment}

$k_{\text{current}} \leftarrow \text{GetCurrentConcurrency}()$\;
$\text{performance\_history} \leftarrow \text{GetRecentMetrics}()$\;

\textbf{Analyze current performance:}
$\text{throughput} \leftarrow \text{ComputeThroughput}(\text{performance\_history})$\;
$\text{latency} \leftarrow \text{ComputeAverageLatency}(\text{performance\_history})$\;
$\text{resource\_usage} \leftarrow \text{ComputeResourceUtilization}()$\;
$\text{contention\_level} \leftarrow \text{EstimateContention}(\text{performance\_history})$\;

$\text{current\_objective} \leftarrow \text{ComputeObjective}(\text{throughput}, \text{latency}, \text{resource\_usage})$\;

\textbf{Test different concurrency levels:}
$\text{candidates} \leftarrow \{\max(1, k_{\text{current}} - 1), k_{\text{current}}, k_{\text{current}} + 1\}$\;

\For{each $k \in \text{candidates}$}{
    \If{$k \neq k_{\text{current}}$}{
        $\text{predicted\_throughput}[k] \leftarrow \text{PredictThroughput}(k, \text{current\_load})$\;
        $\text{predicted\_latency}[k] \leftarrow \text{PredictLatency}(k, \text{contention\_level})$\;
        $\text{predicted\_resources}[k] \leftarrow \text{PredictResourceUsage}(k)$\;
        $\text{predicted\_objective}[k] \leftarrow \text{ComputeObjective}(\text{predicted metrics})$\;
    }
    \Else{
        $\text{predicted\_objective}[k] \leftarrow \text{current\_objective}$\;
    }
}

\textbf{Select best concurrency level:}
$k_{\text{optimal}} \leftarrow \arg\max_k \text{predicted\_objective}[k]$\;

\textbf{Apply hysteresis to avoid oscillation:}
$\text{improvement} \leftarrow \text{predicted\_objective}[k_{\text{optimal}}] - \text{current\_objective}$\;
\If{$\text{improvement} < \text{switch\_threshold}$}{
    $k_{\text{optimal}} \leftarrow k_{\text{current}}$\;
}

\textbf{Implement concurrency change:}
\If{$k_{\text{optimal}} \neq k_{\text{current}}$}{
    \If{$k_{\text{optimal}} > k_{\text{current}}$}{
        $\text{SpawnAdditionalWorkers}(k_{\text{optimal}} - k_{\text{current}})$\;
    }
    \Else{
        $\text{TerminateWorkers}(k_{\text{current}} - k_{\text{optimal}})$\;
    }
}

\textbf{Task assignment with load balancing:}
$\text{active\_tasks} \leftarrow \text{GetPendingTasks}()$\;
$\text{worker\_loads} \leftarrow \text{GetCurrentWorkerLoads}()$\;

\For{each task $t$ in $\text{active\_tasks}$}{
    $\text{estimated\_cost}[t] \leftarrow \text{EstimateTaskCost}(t)$\;
    $\text{worker\_assignment}[t] \leftarrow \arg\min_w (\text{worker\_loads}[w] + \text{estimated\_cost}[t])$\;
    $\text{worker\_loads}[\text{worker\_assignment}[t]] \leftarrow \text{worker\_loads}[\text{worker\_assignment}[t]] + \text{estimated\_cost}[t]$\;
}

\Return{$(k_{\text{optimal}}, \text{worker\_assignment})$}\;

\end{algorithm}

\section{Multi-Objective Optimization Algorithms}

\subsection{Pareto Frontier Approximation}

\begin{algorithm}[H]
\caption{Multi-Objective Policy Optimization}
\label{alg:multiobjective}
\SetKwInOut{Input}{Input}
\SetKwInOut{Output}{Output}

\Input{Objective functions $\{f_1, f_2, \ldots, f_k\}$, constraint set $\mathcal{C}$, policy space $\Pi$}
\Output{Approximation of Pareto frontier}

Initialize $\text{pareto\_set} \leftarrow \emptyset$\;
$\text{weight\_samples} \leftarrow \text{GenerateWeightVectors}(k, \text{num\_samples})$\;

\For{each weight vector $\mathbf{w} \in \text{weight\_samples}$}{
    \textbf{Solve scalarized problem:}
    $\text{objective}(\pi) \leftarrow \sum_{i=1}^k w_i f_i(\pi)$\;
    
    $\pi^* \leftarrow \arg\max_{\pi \in \Pi} \text{objective}(\pi)$ subject to $\pi \in \mathcal{C}$\;
    
    \textbf{Evaluate all objectives:}
    $\text{point} \leftarrow (f_1(\pi^*), f_2(\pi^*), \ldots, f_k(\pi^*))$\;
    
    \textbf{Check if point is dominated:}
    $\text{dominated} \leftarrow \text{false}$\;
    \For{each $p \in \text{pareto\_set}$}{
        \If{$p$ dominates $\text{point}$}{
            $\text{dominated} \leftarrow \text{true}$\;
            break\;
        }
    }
    
    \If{not $\text{dominated}$}{
        \textbf{Remove dominated points:}
        $\text{pareto\_set} \leftarrow \{p \in \text{pareto\_set} : \text{point does not dominate } p\}$\;
        $\text{pareto\_set} \leftarrow \text{pareto\_set} \cup \{(\text{point}, \pi^*)\}$\;
    }
}

\textbf{Refine frontier with local search:}
\For{each $(\text{point}, \pi) \in \text{pareto\_set}$}{
    $\text{improved\_policy} \leftarrow \text{LocalSearch}(\pi, \text{pareto\_set})$\;
    \If{$\text{improved\_policy}$ found}{
        Update $(\text{point}, \pi)$ with improved solution\;
    }
}

\Return{$\text{pareto\_set}$}\;

\vspace{0.5cm}

\textbf{Helper Functions:}

\Fn{GenerateWeightVectors($k$, num\_samples)}{
    $\text{weights} \leftarrow \emptyset$\;
    \For{$i = 1$ to num\_samples}{
        $\mathbf{w} \leftarrow \text{SampleFromSimplex}(k)$ \tcp{Sample uniformly from $(k-1)$-simplex}
        $\text{weights} \leftarrow \text{weights} \cup \{\mathbf{w}\}$\;
    }
    \Return{$\text{weights}$}\;
}

\Fn{LocalSearch(policy $\pi$, pareto\_set)}{
    $\text{best\_improvement} \leftarrow 0$\;
    $\text{best\_policy} \leftarrow \text{null}$\;
    
    \For{small perturbation $\delta$ of $\pi$}{
        $\pi' \leftarrow \pi + \delta$\;
        \If{$\pi'$ improves some objective without worsening others significantly}{
            $\text{improvement} \leftarrow \text{ComputeImprovement}(\pi, \pi')$\;
            \If{$\text{improvement} > \text{best\_improvement}$}{
                $\text{best\_improvement} \leftarrow \text{improvement}$\;
                $\text{best\_policy} \leftarrow \pi'$\;
            }
        }
    }
    
    \Return{$\text{best\_policy}$}\;
}

\end{algorithm}

\section{Belief State Management}

\subsection{Particle Filter for Belief Updates}

\begin{algorithm}[H]
\caption{Particle Filter for Belief State Approximation}
\label{alg:particle_filter}
\SetKwInOut{Input}{Input}
\SetKwInOut{Output}{Output}

\Input{Previous particles $\{s_t^{(i)}, w_t^{(i)}\}_{i=1}^N$, action $a_t$, observation $o_{t+1}$}
\Output{Updated particle set $\{s_{t+1}^{(i)}, w_{t+1}^{(i)}\}_{i=1}^N$}

\textbf{Prediction Step:}
\For{$i = 1$ to $N$}{
    Sample $s_{t+1}^{(i)} \sim T(\cdot | s_t^{(i)}, a_t)$ \tcp{Forward simulate}
}

\textbf{Update Step:}
\For{$i = 1$ to $N$}{
    $w_{t+1}^{(i)} \leftarrow w_t^{(i)} \cdot Z(o_{t+1} | s_{t+1}^{(i)}, a_t)$ \tcp{Weight by likelihood}
}

\textbf{Normalize weights:}
$W \leftarrow \sum_{i=1}^N w_{t+1}^{(i)}$\;
\For{$i = 1$ to $N$}{
    $w_{t+1}^{(i)} \leftarrow w_{t+1}^{(i)} / W$\;
}

\textbf{Resampling (if effective sample size is low):}
$N_{\text{eff}} \leftarrow 1 / \sum_{i=1}^N (w_{t+1}^{(i)})^2$\;
\If{$N_{\text{eff}} < N_{\text{threshold}}$}{
    $\text{indices} \leftarrow \text{MultinomialResample}(\{w_{t+1}^{(i)}\}, N)$\;
    \For{$i = 1$ to $N$}{
        $s_{t+1}^{(i)} \leftarrow s_{t+1}^{(\text{indices}[i])}$\;
        $w_{t+1}^{(i)} \leftarrow 1/N$ \tcp{Reset to uniform weights}
    }
}

\Return{$\{s_{t+1}^{(i)}, w_{t+1}^{(i)}\}_{i=1}^N$}\;
\end{algorithm}

\section{Implementation Notes}

\subsection{Computational Optimizations}

\begin{itemize}
    \item \textbf{Matrix Inversion}: Use Sherman-Morrison formula for rank-1 updates in LinUCB
    \item \textbf{Submodular Optimization}: Implement lazy evaluation to avoid redundant function evaluations
    \item \textbf{Priority Queues}: Use Fibonacci heaps for efficient priority updates in exploration
    \item \textbf{Parallel Execution}: Implement work-stealing for dynamic load balancing
    \item \textbf{Memory Management}: Use memory pools for frequent allocations in particle filters
\end{itemize}

\subsection{Numerical Stability}

\begin{itemize}
    \item Add regularization terms to avoid singular matrices
    \item Use log-space computations for probability calculations
    \item Implement numerically stable matrix decompositions (Cholesky, QR)
    \item Apply gradient clipping in online learning updates
    \item Use double precision for critical calculations
\end{itemize}
