FlowFinal / documentation /cfg_dataset_generation_pipeline.tex
esunAI's picture
Add comprehensive documentation: cfg_dataset_generation_pipeline_latex.tex
e59af9d verified
\section{CFG Dataset Processing and Generation Pipeline}
\label{sec:cfg_pipeline}
The Classifier-Free Guidance (CFG) dataset processing pipeline and generation system form critical components that bridge training data preparation and inference-time sequence generation. This comprehensive framework handles multi-modal data integration, label assignment strategies, and end-to-end generation orchestration with advanced ODE integration methods.
\subsection{CFG Dataset Architecture}
The CFG dataset processing system transforms heterogeneous protein sequence data into a unified training format suitable for classifier-free guidance, implementing sophisticated label assignment strategies and data alignment procedures.
\subsubsection{Multi-Source Data Integration}
\label{sec:multi_source_data}
The dataset integrates sequences from multiple heterogeneous sources with different annotation standards:
\begin{itemize}
\item \textbf{Antimicrobial Peptide Database (APD3)}: Experimentally validated AMPs with MIC values
\item \textbf{UniProt Swiss-Prot}: Reviewed protein sequences serving as negative examples
\item \textbf{Custom Curated Sets}: Manually validated sequences with known activities
\end{itemize}
Each source requires specialized parsing and validation procedures to ensure data quality and consistency.
\subsubsection{Intelligent Label Assignment Strategy}
\label{sec:label_assignment}
The system employs a sophisticated three-class labeling scheme optimized for CFG training:
\begin{align}
\text{Label}(s) = \begin{cases}
0 & \text{if } s \in \mathcal{S}_{\text{AMP}} \text{ (MIC} < 100 \text{ μg/mL)} \\
1 & \text{if } s \in \mathcal{S}_{\text{Non-AMP}} \text{ (MIC} \geq 100 \text{ μg/mL or UniProt)} \\
2 & \text{if randomly masked for unconditional training}
\end{cases} \label{eq:label_assignment}
\end{align}
The label assignment process incorporates several validation steps:
\begin{enumerate}
\item \textbf{Header-Based Classification}: Automatic assignment using sequence identifiers
\item \textbf{Length Filtering}: Sequences must satisfy $2 \leq |s| \leq 50$ amino acids
\item \textbf{Canonical Amino Acid Validation}: Only sequences containing standard 20 amino acids
\item \textbf{Duplicate Detection}: Sequence-level deduplication across all sources
\end{enumerate}
\subsubsection{Strategic Masking for CFG Training}
\label{sec:strategic_masking}
The dataset implements intelligent masking strategies to enable effective classifier-free guidance:
\begin{align}
\text{Mask}_{\text{CFG}}(c, p_{\text{mask}}) = \begin{cases}
c & \text{with probability } (1 - p_{\text{mask}}) \\
2 & \text{with probability } p_{\text{mask}}
\end{cases} \label{eq:cfg_masking_strategy}
\end{align}
where $p_{\text{mask}} = 0.10$ for static masking during dataset creation, and additional dynamic masking ($p_{\text{dynamic}} = 0.15$) occurs during training.
\subsection{Advanced Generation Pipeline}
The generation pipeline orchestrates the complete end-to-end process from noise sampling to final sequence output, incorporating state-of-the-art ODE integration methods and quality control mechanisms.
\subsubsection{Multi-Stage Generation Architecture}
\label{sec:generation_architecture}
The generation process follows a carefully designed four-stage pipeline:
\begin{align}
\text{Stage 1:} \quad &\mathbf{z}_0 \sim \mathcal{N}(0, \mathbf{I}) \quad \text{(Noise Sampling)} \label{eq:stage1_noise}\\
\text{Stage 2:} \quad &\mathbf{z}_1 = \text{ODESolve}(\mathbf{z}_0, v_\theta, [0,1]) \quad \text{(Flow Integration)} \label{eq:stage2_ode}\\
\text{Stage 3:} \quad &\mathbf{h} = \mathcal{D}(\mathbf{z}_1) \quad \text{(Decompression)} \label{eq:stage3_decomp}\\
\text{Stage 4:} \quad &s = \text{ESM2Decode}(\mathbf{h}) \quad \text{(Sequence Decoding)} \label{eq:stage4_decode}
\end{align}
Each stage incorporates sophisticated error handling and quality validation procedures.
\subsubsection{Advanced ODE Integration Methods}
\label{sec:ode_integration}
The system supports multiple numerical integration schemes for solving the flow ODE $\frac{d\mathbf{z}}{dt} = v_\theta(\mathbf{z}, t, c)$:
\textbf{Euler Integration (Fallback Method):}
\begin{align}
\mathbf{z}_{t+\Delta t} = \mathbf{z}_t + \Delta t \cdot v_\theta(\mathbf{z}_t, t, c) \label{eq:euler_integration}
\end{align}
\textbf{Runge-Kutta Methods (torchdiffeq):}
\begin{align}
\mathbf{k}_1 &= v_\theta(\mathbf{z}_t, t, c) \label{eq:rk_k1}\\
\mathbf{k}_2 &= v_\theta(\mathbf{z}_t + \frac{\Delta t}{2}\mathbf{k}_1, t + \frac{\Delta t}{2}, c) \label{eq:rk_k2}\\
\mathbf{k}_3 &= v_\theta(\mathbf{z}_t + \frac{\Delta t}{2}\mathbf{k}_2, t + \frac{\Delta t}{2}, c) \label{eq:rk_k3}\\
\mathbf{k}_4 &= v_\theta(\mathbf{z}_t + \Delta t\mathbf{k}_3, t + \Delta t, c) \label{eq:rk_k4}\\
\mathbf{z}_{t+\Delta t} &= \mathbf{z}_t + \frac{\Delta t}{6}(\mathbf{k}_1 + 2\mathbf{k}_2 + 2\mathbf{k}_3 + \mathbf{k}_4) \label{eq:rk4_integration}
\end{align}
\textbf{Adaptive Methods (DOPRI5):}
The system automatically selects optimal step sizes using adaptive error control:
\begin{align}
\text{error}_t &= \|\mathbf{z}_{t+\Delta t}^{(5)} - \mathbf{z}_{t+\Delta t}^{(4)}\|_2 \label{eq:adaptive_error}\\
\Delta t_{\text{new}} &= \Delta t \cdot \min\left(2, \max\left(0.5, 0.9 \left(\frac{\text{tol}}{\text{error}_t}\right)^{1/5}\right)\right) \label{eq:adaptive_step}
\end{align}
\subsubsection{Classifier-Free Guidance Integration}
\label{sec:cfg_integration_generation}
During generation, CFG guidance is applied at each ODE integration step:
\begin{align}
v_{\text{guided}}(\mathbf{z}_t, t, c) &= v_\theta(\mathbf{z}_t, t, \emptyset) + w \cdot (v_\theta(\mathbf{z}_t, t, c) - v_\theta(\mathbf{z}_t, t, \emptyset)) \label{eq:cfg_guided_vector}
\end{align}
This guidance is computed efficiently using a single forward pass with batched conditional and unconditional inputs.
\subsection{Quality Control and Validation Framework}
The pipeline incorporates comprehensive quality control mechanisms at every stage to ensure high-fidelity generation.
\subsubsection{Sequence Validation Pipeline}
\label{sec:sequence_validation}
Generated sequences undergo multi-tier validation:
\begin{enumerate}
\item \textbf{Canonical Amino Acid Validation}: $s \subset \{A, C, D, E, F, G, H, I, K, L, M, N, P, Q, R, S, T, V, W, Y\}^*$
\item \textbf{Length Constraints}: $L_{\min} \leq |s| \leq L_{\max}$ where $L_{\min} = 5, L_{\max} = 50$
\item \textbf{Complexity Filtering}: Reject sequences with excessive repeats or low complexity
\item \textbf{Biological Plausibility}: Basic physicochemical property validation
\end{enumerate}
\subsubsection{Generation Quality Metrics}
\label{sec:generation_quality}
The system tracks comprehensive quality metrics during generation:
\begin{itemize}
\item \textbf{Validity Rate}: Fraction of sequences passing all validation checks
\item \textbf{Diversity Index}: Shannon entropy of generated sequence distribution
\item \textbf{Novelty Score}: Fraction of sequences not present in training data
\item \textbf{Conditional Consistency}: Alignment between requested and achieved properties
\end{itemize}
\subsection{Batch Processing and Scalability}
The pipeline is designed for efficient large-scale generation with optimized batch processing and memory management.
\subsubsection{Batch Generation Strategy}
\label{sec:batch_generation}
Large-scale generation employs intelligent batching strategies:
\begin{align}
\text{BatchSize}_{\text{optimal}} = \min\left(\text{BatchSize}_{\text{max}}, \left\lfloor\frac{\text{GPU\_Memory}}{\text{Model\_Memory} \cdot \text{Sequence\_Length}}\right\rfloor\right) \label{eq:optimal_batch_size}
\end{align}
The system dynamically adjusts batch sizes based on available GPU memory and sequence complexity.
\subsubsection{Memory-Efficient Processing}
\label{sec:memory_efficient}
Several optimization strategies ensure efficient memory utilization:
\begin{itemize}
\item \textbf{Gradient-Free Inference}: All generation operations use \texttt{torch.no\_grad()}
\item \textbf{Sequential Model Loading}: Models loaded and unloaded as needed to minimize peak memory
\item \textbf{Chunked Processing}: Large batches split into manageable chunks
\item \textbf{Tensor Cleanup}: Explicit memory cleanup after each generation batch
\end{itemize}
\subsection{Multi-Scale CFG Generation}
The system supports generation at multiple CFG scales simultaneously, enabling comprehensive exploration of the conditioning space.
\subsubsection{CFG Scale Scheduling}
\label{sec:cfg_scheduling}
The pipeline implements sophisticated CFG scale scheduling:
\begin{align}
w(t) = w_{\text{base}} \cdot \text{Schedule}(t) \quad \text{where } \text{Schedule}(t) \in \{\text{constant}, \text{linear}, \text{cosine}\} \label{eq:cfg_scheduling}
\end{align}
Different scheduling strategies enable fine-grained control over generation characteristics.
\subsubsection{Comparative Generation Analysis}
\label{sec:comparative_generation}
The system automatically generates sequences at multiple CFG scales for comparative analysis:
\begin{itemize}
\item \textbf{CFG Scale 0.0}: Unconditional generation (maximum diversity)
\item \textbf{CFG Scale 3.0}: Weak conditioning (balanced control/diversity)
\item \textbf{CFG Scale 7.5}: Strong conditioning (optimal for most applications)
\item \textbf{CFG Scale 15.0}: Very strong conditioning (maximum control)
\end{itemize}
\subsection{Performance Optimization and Benchmarking}
The pipeline incorporates extensive performance monitoring and optimization features.
\subsubsection{Generation Performance Metrics}
\label{sec:generation_performance}
\begin{itemize}
\item \textbf{Throughput}: ~1000 sequences/second on A100 GPU
\item \textbf{Memory Efficiency}: <8GB GPU memory for batch size 20
\item \textbf{Quality Consistency}: >95\% valid sequences across all CFG scales
\item \textbf{Diversity Preservation}: Shannon entropy >4.5 bits across conditions
\end{itemize}
\subsubsection{Optimization Strategies}
\label{sec:optimization_strategies}
Several advanced optimization techniques ensure maximum performance:
\begin{enumerate}
\item \textbf{Model Compilation}: JIT compilation for 15-25\% speedup
\item \textbf{Mixed Precision Inference}: FP16 inference where applicable
\item \textbf{Kernel Fusion}: Optimized CUDA kernels for common operations
\item \textbf{Asynchronous Processing}: Overlapped computation and data transfer
\end{enumerate}
\begin{algorithm}[h]
\caption{CFG Dataset Processing Pipeline}
\label{alg:cfg_dataset}
\begin{algorithmic}[1]
\REQUIRE FASTA files $\{\mathcal{F}_1, \mathcal{F}_2, \ldots, \mathcal{F}_n\}$
\REQUIRE Label assignment rules $\mathcal{R}_{\text{label}}$
\REQUIRE Masking probability $p_{\text{mask}} = 0.10$
\ENSURE Processed CFG dataset $\mathcal{D}_{\text{CFG}}$
\STATE \textbf{// Stage 1: Multi-Source Data Parsing}
\STATE $\text{sequences} \leftarrow []$, $\text{labels} \leftarrow []$, $\text{headers} \leftarrow []$
\FOR{$\mathcal{F}_i \in \{\mathcal{F}_1, \mathcal{F}_2, \ldots, \mathcal{F}_n\}$}
\STATE $\text{current\_header} \leftarrow ""$, $\text{current\_sequence} \leftarrow ""$
\FOR{$\text{line} \in \text{ReadFile}(\mathcal{F}_i)$}
\IF{$\text{line.startswith}('>')$}
\IF{$\text{current\_sequence} \neq ""$ and $\text{current\_header} \neq ""$}
\STATE \textbf{// Process previous sequence}
\IF{$2 \leq |\text{current\_sequence}| \leq 50$}
\STATE $\text{canonical\_aa} \leftarrow \{A, C, D, E, F, G, H, I, K, L, M, N, P, Q, R, S, T, V, W, Y\}$
\IF{$\forall aa \in \text{current\_sequence}: aa \in \text{canonical\_aa}$}
\STATE $\text{sequences.append}(\text{current\_sequence.upper}())$
\STATE $\text{headers.append}(\text{current\_header})$
\STATE $\text{label} \leftarrow \text{AssignLabel}(\text{current\_header}, \mathcal{R}_{\text{label}})$
\STATE $\text{labels.append}(\text{label})$
\ENDIF
\ENDIF
\ENDIF
\STATE $\text{current\_header} \leftarrow \text{line}[1:]$ \COMMENT{Remove '>'}
\STATE $\text{current\_sequence} \leftarrow ""$
\ELSE
\STATE $\text{current\_sequence} \leftarrow \text{current\_sequence} + \text{line.strip}()$
\ENDIF
\ENDFOR
\ENDFOR
\STATE \textbf{// Stage 2: Label Assignment and Validation}
\FUNCTION{AssignLabel}{$\text{header}$, $\mathcal{R}_{\text{label}}$}
\IF{$\text{header.startswith}('AP')$}
\RETURN $0$ \COMMENT{AMP class}
\ELSIF{$\text{header.startswith}('sp')$}
\RETURN $1$ \COMMENT{Non-AMP class}
\ELSE
\RETURN $1$ \COMMENT{Default to Non-AMP}
\ENDIF
\ENDFUNCTION
\STATE \textbf{// Stage 3: Strategic CFG Masking}
\STATE $\text{original\_labels} \leftarrow \text{np.array}(\text{labels})$
\STATE $\text{masked\_labels} \leftarrow \text{original\_labels.copy}()$
\STATE $\text{n\_mask} \leftarrow \text{int}(|\text{labels}| \times p_{\text{mask}})$
\STATE $\text{mask\_indices} \leftarrow \text{np.random.choice}(|\text{labels}|, \text{size}=\text{n\_mask}, \text{replace}=\text{False})$
\STATE $\text{masked\_labels}[\text{mask\_indices}] \leftarrow 2$ \COMMENT{2 = mask/unconditional}
\STATE \textbf{// Stage 4: Dataset Construction}
\STATE $\mathcal{D}_{\text{CFG}} \leftarrow \text{CFGFlowDataset}(\text{sequences}, \text{masked\_labels}, \text{headers})$
\STATE \textbf{// Stage 5: Quality Validation}
\STATE $\text{ValidateDataset}(\mathcal{D}_{\text{CFG}})$
\RETURN $\mathcal{D}_{\text{CFG}}$
\end{algorithmic}
\end{algorithm}
\begin{algorithm}[h]
\caption{End-to-End Generation Pipeline}
\label{alg:generation_pipeline}
\begin{algorithmic}[1]
\REQUIRE Trained models: Compressor $\mathcal{C}$, Flow Model $f_\theta$, Decompressor $\mathcal{D}$, Decoder $\text{ESM2Dec}$
\REQUIRE Generation parameters: $n_{\text{samples}}$, $n_{\text{steps}}$, CFG scale $w$, condition $c$
\ENSURE Generated sequences $\mathcal{S} = \{s_1, s_2, \ldots, s_{n_{\text{samples}}}\}$
\STATE \textbf{// Stage 1: Model Loading and Initialization}
\STATE $\mathcal{C} \leftarrow \text{LoadModel}(\text{"final\_compressor\_model.pth"})$
\STATE $\mathcal{D} \leftarrow \text{LoadModel}(\text{"final\_decompressor\_model.pth"})$
\STATE $f_\theta \leftarrow \text{LoadModel}(\text{"amp\_flow\_model\_final\_optimized.pth"})$
\STATE $\text{ESM2Dec} \leftarrow \text{LoadESM2Decoder}()$
\STATE $\text{stats} \leftarrow \text{LoadNormalizationStats}()$
\STATE \textbf{// Stage 2: Determine Optimal Integration Method}
\STATE $\text{ode\_method} \leftarrow \text{SelectODEMethod}()$ \COMMENT{dopri5, rk4, or euler}
\STATE \textbf{// Stage 3: Batch Generation Loop}
\STATE $\text{generated\_sequences} \leftarrow []$
\STATE $\text{batch\_size} \leftarrow \text{ComputeOptimalBatchSize}(n_{\text{samples}})$
\FOR{$\text{batch\_start} = 0$ to $n_{\text{samples}}$ step $\text{batch\_size}$}
\STATE $\text{current\_batch\_size} \leftarrow \min(\text{batch\_size}, n_{\text{samples}} - \text{batch\_start})$
\STATE \textbf{// Stage 3a: Noise Sampling}
\STATE $\mathbf{z}_0 \leftarrow \mathcal{N}(0, \mathbf{I}) \in \mathbb{R}^{\text{current\_batch\_size} \times 25 \times 80}$
\STATE \textbf{// Stage 3b: ODE Integration with CFG}
\IF{$\text{ode\_method} = \text{"dopri5"}$ and $\text{torchdiffeq\_available}$}
\STATE $\mathbf{z}_1 \leftarrow \text{odeint}(\text{CFGODEFunc}, \mathbf{z}_0, [0, 1], \text{method}=\text{"dopri5"})$
\ELSIF{$\text{ode\_method} = \text{"rk4"}$}
\STATE $\mathbf{z}_1 \leftarrow \text{RungeKutta4}(\mathbf{z}_0, \text{CFGODEFunc}, n_{\text{steps}})$
\ELSE
\STATE $\mathbf{z}_1 \leftarrow \text{EulerIntegration}(\mathbf{z}_0, \text{CFGODEFunc}, n_{\text{steps}})$
\ENDIF
\STATE \textbf{// Stage 3c: Decompression}
\WITH{$\text{torch.no\_grad}()$}
\STATE $\mathbf{h} \leftarrow \mathcal{D}(\mathbf{z}_1)$ \COMMENT{80D → 1280D}
\STATE $\mathbf{h} \leftarrow \text{ApplyInverseNormalization}(\mathbf{h}, \text{stats})$
\ENDWITH
\STATE \textbf{// Stage 3d: Sequence Decoding}
\STATE $\text{batch\_sequences} \leftarrow \text{ESM2Dec.batch\_decode}(\mathbf{h})$
\STATE \textbf{// Stage 3e: Quality Validation}
\STATE $\text{valid\_sequences} \leftarrow \text{ValidateSequences}(\text{batch\_sequences})$
\STATE $\text{generated\_sequences.extend}(\text{valid\_sequences})$
\STATE \textbf{// Memory cleanup}
\STATE $\text{torch.cuda.empty\_cache}()$
\ENDFOR
\STATE \textbf{// Stage 4: Post-Processing and Quality Control}
\STATE $\mathcal{S} \leftarrow \text{PostProcessSequences}(\text{generated\_sequences})$
\STATE $\text{quality\_metrics} \leftarrow \text{ComputeQualityMetrics}(\mathcal{S})$
\RETURN $\mathcal{S}$, $\text{quality\_metrics}$
\end{algorithmic}
\end{algorithm}
\begin{algorithm}[h]
\caption{CFG-Enhanced ODE Function}
\label{alg:cfg_ode_function}
\begin{algorithmic}[1]
\REQUIRE Current state $\mathbf{z}_t \in \mathbb{R}^{B \times L \times D}$
\REQUIRE Time $t \in [0, 1]$
\REQUIRE Condition $c$, CFG scale $w$
\REQUIRE Flow model $f_\theta$
\ENSURE Vector field $\mathbf{v}_{\text{guided}} \in \mathbb{R}^{B \times L \times D}$
\FUNCTION{CFGODEFunc}{$t$, $\mathbf{z}_t$}
\STATE \textbf{// Reshape for model compatibility}
\STATE $B, L, D \leftarrow \mathbf{z}_t.\text{shape}$
\STATE $\mathbf{z}_t \leftarrow \mathbf{z}_t.\text{view}(B, L, D)$
\STATE \textbf{// Create time tensor}
\STATE $\mathbf{t}_{\text{tensor}} \leftarrow \text{torch.full}((B,), t, \text{device}=\mathbf{z}_t.\text{device})$
\STATE \textbf{// Conditional prediction}
\STATE $\mathbf{c}_{\text{cond}} \leftarrow \text{torch.full}((B,), c, \text{dtype}=\text{torch.long})$
\STATE $\mathbf{v}_{\text{cond}} \leftarrow f_\theta(\mathbf{z}_t, \mathbf{t}_{\text{tensor}}, \mathbf{c}_{\text{cond}})$
\STATE \textbf{// Unconditional prediction}
\STATE $\mathbf{c}_{\text{uncond}} \leftarrow \text{torch.full}((B,), 2, \text{dtype}=\text{torch.long})$ \COMMENT{2 = mask}
\STATE $\mathbf{v}_{\text{uncond}} \leftarrow f_\theta(\mathbf{z}_t, \mathbf{t}_{\text{tensor}}, \mathbf{c}_{\text{uncond}})$
\STATE \textbf{// Apply classifier-free guidance}
\STATE $\mathbf{v}_{\text{guided}} \leftarrow \mathbf{v}_{\text{uncond}} + w \cdot (\mathbf{v}_{\text{cond}} - \mathbf{v}_{\text{uncond}})$
\STATE \textbf{// Reshape back to flat format for ODE solver}
\STATE $\mathbf{v}_{\text{guided}} \leftarrow \mathbf{v}_{\text{guided}}.\text{view}(-1)$
\RETURN $\mathbf{v}_{\text{guided}}$
\ENDFUNCTION
\STATE \textbf{// Main ODE integration call}
\STATE $\mathbf{v}_{\text{guided}} \leftarrow \text{CFGODEFunc}(t, \mathbf{z}_t)$
\RETURN $\mathbf{v}_{\text{guided}}$
\end{algorithmic}
\end{algorithm}
\begin{algorithm}[h]
\caption{Adaptive ODE Integration Methods}
\label{alg:adaptive_ode}
\begin{algorithmic}[1]
\REQUIRE Initial state $\mathbf{z}_0$, ODE function $f$, time span $[0, 1]$
\REQUIRE Integration parameters: tolerance $\text{tol} = 10^{-5}$, max steps $N_{\max} = 1000$
\ENSURE Final state $\mathbf{z}_1$
\FUNCTION{AdaptiveODEIntegration}{$\mathbf{z}_0$, $f$, $[t_0, t_1]$}
\STATE $\mathbf{z} \leftarrow \mathbf{z}_0$, $t \leftarrow t_0$, $\Delta t \leftarrow 0.01$ \COMMENT{Initial step size}
\STATE $\text{step\_count} \leftarrow 0$
\WHILE{$t < t_1$ and $\text{step\_count} < N_{\max}$}
\STATE \textbf{// Compute 4th and 5th order solutions}
\STATE $\mathbf{k}_1 \leftarrow f(t, \mathbf{z})$
\STATE $\mathbf{k}_2 \leftarrow f(t + \frac{\Delta t}{4}, \mathbf{z} + \frac{\Delta t}{4}\mathbf{k}_1)$
\STATE $\mathbf{k}_3 \leftarrow f(t + \frac{3\Delta t}{8}, \mathbf{z} + \frac{3\Delta t}{32}\mathbf{k}_1 + \frac{9\Delta t}{32}\mathbf{k}_2)$
\STATE $\mathbf{k}_4 \leftarrow f(t + \frac{12\Delta t}{13}, \mathbf{z} + \frac{1932\Delta t}{2197}\mathbf{k}_1 - \frac{7200\Delta t}{2197}\mathbf{k}_2 + \frac{7296\Delta t}{2197}\mathbf{k}_3)$
\STATE $\mathbf{k}_5 \leftarrow f(t + \Delta t, \mathbf{z} + \frac{439\Delta t}{216}\mathbf{k}_1 - 8\Delta t\mathbf{k}_2 + \frac{3680\Delta t}{513}\mathbf{k}_3 - \frac{845\Delta t}{4104}\mathbf{k}_4)$
\STATE $\mathbf{k}_6 \leftarrow f(t + \frac{\Delta t}{2}, \mathbf{z} - \frac{8\Delta t}{27}\mathbf{k}_1 + 2\Delta t\mathbf{k}_2 - \frac{3544\Delta t}{2565}\mathbf{k}_3 + \frac{1859\Delta t}{4104}\mathbf{k}_4 - \frac{11\Delta t}{40}\mathbf{k}_5)$
\STATE \textbf{// 4th order solution}
\STATE $\mathbf{z}_{\text{new}}^{(4)} \leftarrow \mathbf{z} + \Delta t(\frac{25}{216}\mathbf{k}_1 + \frac{1408}{2565}\mathbf{k}_3 + \frac{2197}{4104}\mathbf{k}_4 - \frac{1}{5}\mathbf{k}_5)$
\STATE \textbf{// 5th order solution}
\STATE $\mathbf{z}_{\text{new}}^{(5)} \leftarrow \mathbf{z} + \Delta t(\frac{16}{135}\mathbf{k}_1 + \frac{6656}{12825}\mathbf{k}_3 + \frac{28561}{56430}\mathbf{k}_4 - \frac{9}{50}\mathbf{k}_5 + \frac{2}{55}\mathbf{k}_6)$
\STATE \textbf{// Error estimation and step size adaptation}
\STATE $\text{error} \leftarrow \|\mathbf{z}_{\text{new}}^{(5)} - \mathbf{z}_{\text{new}}^{(4)}\|_2$
\IF{$\text{error} \leq \text{tol}$} \COMMENT{Accept step}
\STATE $\mathbf{z} \leftarrow \mathbf{z}_{\text{new}}^{(5)}$ \COMMENT{Use higher order solution}
\STATE $t \leftarrow t + \Delta t$
\STATE $\text{step\_count} \leftarrow \text{step\_count} + 1$
\ENDIF
\STATE \textbf{// Adapt step size}
\STATE $\text{safety\_factor} \leftarrow 0.9$
\STATE $\text{scale} \leftarrow \text{safety\_factor} \cdot \left(\frac{\text{tol}}{\text{error}}\right)^{1/5}$
\STATE $\Delta t \leftarrow \Delta t \cdot \min(2.0, \max(0.5, \text{scale}))$
\STATE \textbf{// Ensure we don't overshoot}
\IF{$t + \Delta t > t_1$}
\STATE $\Delta t \leftarrow t_1 - t$
\ENDIF
\ENDWHILE
\RETURN $\mathbf{z}$
\ENDFUNCTION
\STATE $\mathbf{z}_1 \leftarrow \text{AdaptiveODEIntegration}(\mathbf{z}_0, \text{CFGODEFunc}, [0, 1])$
\RETURN $\mathbf{z}_1$
\end{algorithmic}
\end{algorithm>