| |
| |
| |
| |
| |
| |
| |
| |
| |
|
|
| use crate::tensor::Tensor; |
| use crate::attention::{AttentionCache, AttentionConfig, MultiHeadAttention}; |
| use crate::ffn::{FfnCache, FfnConfig, FeedForward}; |
|
|
| |
| |
| |
|
|
| |
| pub struct DecoderLayerCache { |
| |
| pub ln1_input: Tensor, |
| |
| pub self_attn_cache: AttentionCache, |
| |
| pub ln3_input: Tensor, |
| |
| pub ffn_cache: FfnCache, |
| |
| pub residual_input: Tensor, |
| } |
|
|
| |
| |
| |
|
|
| |
| #[derive(Debug, Clone)] |
| pub struct DecoderConfig { |
| |
| pub n_layers: usize, |
| |
| pub d_model: usize, |
| |
| pub n_heads: usize, |
| |
| pub d_ff: usize, |
| |
| pub max_seq_len: usize, |
| |
| pub ln_eps: f32, |
| } |
|
|
| impl DecoderConfig { |
| |
| pub fn spf_writer() -> Self { |
| Self { |
| n_layers: 6, |
| d_model: 256, |
| n_heads: 8, |
| d_ff: 1024, |
| max_seq_len: 2048, |
| ln_eps: 1e-5, |
| } |
| } |
|
|
| |
| pub fn small() -> Self { |
| Self { |
| n_layers: 2, |
| d_model: 64, |
| n_heads: 4, |
| d_ff: 256, |
| max_seq_len: 512, |
| ln_eps: 1e-5, |
| } |
| } |
| } |
|
|
| |
| |
| |
|
|
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| pub struct DecoderLayer { |
| |
| pub self_attn: MultiHeadAttention, |
| |
| pub cross_attn: MultiHeadAttention, |
| |
| pub ffn: FeedForward, |
| |
| pub ln1_gamma: Tensor, |
| pub ln1_beta: Tensor, |
| |
| pub ln2_gamma: Tensor, |
| pub ln2_beta: Tensor, |
| |
| pub ln3_gamma: Tensor, |
| pub ln3_beta: Tensor, |
| |
| pub ln_eps: f32, |
| } |
|
|
| impl DecoderLayer { |
| |
| pub fn new(d_model: usize, n_heads: usize, d_ff: usize, ln_eps: f32, seed: u64) -> Self { |
| let self_attn_config = AttentionConfig { |
| d_model, |
| n_heads, |
| causal: true, |
| }; |
| let cross_attn_config = AttentionConfig { |
| d_model, |
| n_heads, |
| causal: false, |
| }; |
| let ffn_config = FfnConfig { d_model, d_ff }; |
|
|
| Self { |
| self_attn: MultiHeadAttention::new(self_attn_config, seed), |
| cross_attn: MultiHeadAttention::new(cross_attn_config, seed + 50), |
| ffn: FeedForward::new(ffn_config, seed + 100), |
| ln1_gamma: Tensor::ones(&[d_model]), |
| ln1_beta: Tensor::zeros(&[d_model]), |
| ln2_gamma: Tensor::ones(&[d_model]), |
| ln2_beta: Tensor::zeros(&[d_model]), |
| ln3_gamma: Tensor::ones(&[d_model]), |
| ln3_beta: Tensor::zeros(&[d_model]), |
| ln_eps, |
| } |
| } |
|
|
| |
| |
| |
| pub fn forward(&self, x: &Tensor, encoder_output: &Tensor) -> Result<Tensor, String> { |
| |
| let normed = x.layer_norm(&self.ln1_gamma, &self.ln1_beta, self.ln_eps)?; |
| let self_attn_out = self.self_attn.forward(&normed)?; |
| let x = x.add(&self_attn_out)?; |
|
|
| |
| let normed = x.layer_norm(&self.ln2_gamma, &self.ln2_beta, self.ln_eps)?; |
| let cross_attn_out = self.cross_attn.forward_cross(&normed, encoder_output)?; |
| let x = x.add(&cross_attn_out)?; |
|
|
| |
| let normed = x.layer_norm(&self.ln3_gamma, &self.ln3_beta, self.ln_eps)?; |
| let ffn_out = self.ffn.forward(&normed)?; |
| x.add(&ffn_out) |
| } |
|
|
| |
| |
| |
| pub fn forward_self_only(&self, x: &Tensor) -> Result<Tensor, String> { |
| |
| let normed = x.layer_norm(&self.ln1_gamma, &self.ln1_beta, self.ln_eps)?; |
| let self_attn_out = self.self_attn.forward(&normed)?; |
| let x = x.add(&self_attn_out)?; |
|
|
| |
|
|
| |
| let normed = x.layer_norm(&self.ln3_gamma, &self.ln3_beta, self.ln_eps)?; |
| let ffn_out = self.ffn.forward(&normed)?; |
| x.add(&ffn_out) |
| } |
|
|
| |
| |
| pub fn forward_self_only_with_cache(&self, x: &Tensor) -> Result<(Tensor, DecoderLayerCache), String> { |
| let residual_input = x.clone(); |
|
|
| |
| let ln1_input = x.clone(); |
| let normed = x.layer_norm(&self.ln1_gamma, &self.ln1_beta, self.ln_eps)?; |
| let (self_attn_out, self_attn_cache) = self.self_attn.forward_with_cache(&normed)?; |
| let x = x.add(&self_attn_out)?; |
|
|
| |
|
|
| |
| let ln3_input = x.clone(); |
| let normed = x.layer_norm(&self.ln3_gamma, &self.ln3_beta, self.ln_eps)?; |
| let (ffn_out, ffn_cache) = self.ffn.forward_with_cache(&normed)?; |
| let output = x.add(&ffn_out)?; |
|
|
| let cache = DecoderLayerCache { |
| ln1_input, |
| self_attn_cache, |
| ln3_input, |
| ffn_cache, |
| residual_input, |
| }; |
|
|
| Ok((output, cache)) |
| } |
|
|
| |
| pub fn num_params(&self) -> usize { |
| let d = self.ln1_gamma.numel(); |
| |
| self.self_attn.num_params() + self.cross_attn.num_params() + self.ffn.num_params() + 6 * d |
| } |
|
|
| |
| pub fn weights(&self) -> Vec<&Tensor> { |
| let mut w = self.self_attn.weights(); |
| w.extend(self.cross_attn.weights()); |
| w.extend(self.ffn.weights()); |
| w.extend([ |
| &self.ln1_gamma, &self.ln1_beta, |
| &self.ln2_gamma, &self.ln2_beta, |
| &self.ln3_gamma, &self.ln3_beta, |
| ]); |
| w |
| } |
|
|
| |
| pub fn weights_mut(&mut self) -> Vec<&mut Tensor> { |
| let mut w = self.self_attn.weights_mut(); |
| w.extend(self.cross_attn.weights_mut()); |
| w.extend(self.ffn.weights_mut()); |
| w.extend([ |
| &mut self.ln1_gamma, &mut self.ln1_beta, |
| &mut self.ln2_gamma, &mut self.ln2_beta, |
| &mut self.ln3_gamma, &mut self.ln3_beta, |
| ]); |
| w |
| } |
| } |
|
|
| |
| |
| |
|
|
| |
| pub struct Decoder { |
| pub config: DecoderConfig, |
| |
| pub pos_encoding: Tensor, |
| |
| pub layers: Vec<DecoderLayer>, |
| |
| pub final_ln_gamma: Tensor, |
| pub final_ln_beta: Tensor, |
| } |
|
|
| impl Decoder { |
| |
| pub fn new(config: DecoderConfig, seed: u64) -> Self { |
| |
| let pos_encoding = crate::encoder::sinusoidal_positional_encoding( |
| config.max_seq_len, |
| config.d_model, |
| ); |
|
|
| let layers: Vec<DecoderLayer> = (0..config.n_layers) |
| .map(|i| { |
| DecoderLayer::new( |
| config.d_model, |
| config.n_heads, |
| config.d_ff, |
| config.ln_eps, |
| seed + (i as u64) * 2000, |
| ) |
| }) |
| .collect(); |
|
|
| Self { |
| final_ln_gamma: Tensor::ones(&[config.d_model]), |
| final_ln_beta: Tensor::zeros(&[config.d_model]), |
| pos_encoding, |
| layers, |
| config, |
| } |
| } |
|
|
| |
| |
| |
| pub fn forward( |
| &self, |
| embeddings: &Tensor, |
| encoder_output: &Tensor, |
| ) -> Result<Tensor, String> { |
| let seq_len = embeddings.shape[1]; |
| if seq_len > self.config.max_seq_len { |
| return Err(format!( |
| "Sequence length {} exceeds max {}", |
| seq_len, self.config.max_seq_len |
| )); |
| } |
|
|
| let mut x = self.add_positional_encoding(embeddings)?; |
|
|
| for layer in &self.layers { |
| x = layer.forward(&x, encoder_output)?; |
| } |
|
|
| x.layer_norm(&self.final_ln_gamma, &self.final_ln_beta, self.config.ln_eps) |
| } |
|
|
| |
| |
| pub fn forward_causal(&self, embeddings: &Tensor) -> Result<Tensor, String> { |
| let seq_len = embeddings.shape[1]; |
| if seq_len > self.config.max_seq_len { |
| return Err(format!( |
| "Sequence length {} exceeds max {}", |
| seq_len, self.config.max_seq_len |
| )); |
| } |
|
|
| let mut x = self.add_positional_encoding(embeddings)?; |
|
|
| for layer in &self.layers { |
| x = layer.forward_self_only(&x)?; |
| } |
|
|
| x.layer_norm(&self.final_ln_gamma, &self.final_ln_beta, self.config.ln_eps) |
| } |
|
|
| |
| |
| pub fn forward_causal_with_cache(&self, embeddings: &Tensor) -> Result<(Tensor, Vec<DecoderLayerCache>), String> { |
| let seq_len = embeddings.shape[1]; |
| if seq_len > self.config.max_seq_len { |
| return Err(format!( |
| "Sequence length {} exceeds max {}", |
| seq_len, self.config.max_seq_len |
| )); |
| } |
|
|
| let mut x = self.add_positional_encoding(embeddings)?; |
| let mut layer_caches = Vec::with_capacity(self.layers.len()); |
|
|
| for layer in &self.layers { |
| let (out, cache) = layer.forward_self_only_with_cache(&x)?; |
| x = out; |
| layer_caches.push(cache); |
| } |
|
|
| let output = x.layer_norm(&self.final_ln_gamma, &self.final_ln_beta, self.config.ln_eps)?; |
| Ok((output, layer_caches)) |
| } |
|
|
| |
| fn add_positional_encoding(&self, embeddings: &Tensor) -> Result<Tensor, String> { |
| let batch = embeddings.shape[0]; |
| let seq_len = embeddings.shape[1]; |
| let d_model = embeddings.shape[2]; |
|
|
| let pos_enc = self.pos_encoding.slice(0, seq_len)?; |
|
|
| let mut data = embeddings.data.clone(); |
| for b in 0..batch { |
| for s in 0..seq_len { |
| for d in 0..d_model { |
| data[(b * seq_len + s) * d_model + d] += pos_enc.data[s * d_model + d]; |
| } |
| } |
| } |
| Tensor::from_data(data, embeddings.shape.clone()) |
| } |
|
|
| |
| pub fn num_params(&self) -> usize { |
| let layer_params: usize = self.layers.iter().map(|l| l.num_params()).sum(); |
| layer_params + 2 * self.config.d_model |
| } |
|
|
| |
| pub fn weights(&self) -> Vec<&Tensor> { |
| let mut w: Vec<&Tensor> = Vec::new(); |
| for layer in &self.layers { |
| w.extend(layer.weights()); |
| } |
| w.push(&self.final_ln_gamma); |
| w.push(&self.final_ln_beta); |
| w |
| } |
|
|
| |
| pub fn weights_mut(&mut self) -> Vec<&mut Tensor> { |
| let mut w: Vec<&mut Tensor> = Vec::new(); |
| for layer in &mut self.layers { |
| w.extend(layer.weights_mut()); |
| } |
| w.push(&mut self.final_ln_gamma); |
| w.push(&mut self.final_ln_beta); |
| w |
| } |
| } |
|
|
| |
| |
| |
|
|
| #[cfg(test)] |
| mod tests { |
| use super::*; |
|
|
| #[test] |
| fn test_decoder_layer_with_encoder() { |
| let layer = DecoderLayer::new(64, 4, 256, 1e-5, 42); |
| let dec_input = Tensor::randn(&[1, 4, 64], 99); |
| let enc_output = Tensor::randn(&[1, 8, 64], 100); |
| let out = layer.forward(&dec_input, &enc_output).unwrap(); |
| assert_eq!(out.shape, vec![1, 4, 64]); |
| assert!(out.data.iter().all(|v| v.is_finite())); |
| } |
|
|
| #[test] |
| fn test_decoder_layer_self_only() { |
| let layer = DecoderLayer::new(64, 4, 256, 1e-5, 42); |
| let x = Tensor::randn(&[1, 6, 64], 99); |
| let out = layer.forward_self_only(&x).unwrap(); |
| assert_eq!(out.shape, vec![1, 6, 64]); |
| assert!(out.data.iter().all(|v| v.is_finite())); |
| } |
|
|
| #[test] |
| fn test_decoder_full_forward() { |
| let config = DecoderConfig::small(); |
| let decoder = Decoder::new(config, 42); |
| let dec_emb = Tensor::randn(&[1, 4, 64], 99); |
| let enc_out = Tensor::randn(&[1, 8, 64], 100); |
| let out = decoder.forward(&dec_emb, &enc_out).unwrap(); |
| assert_eq!(out.shape, vec![1, 4, 64]); |
| assert!(out.data.iter().all(|v| v.is_finite())); |
| } |
|
|
| #[test] |
| fn test_decoder_causal_forward() { |
| let config = DecoderConfig::small(); |
| let decoder = Decoder::new(config, 42); |
| let x = Tensor::randn(&[1, 6, 64], 99); |
| let out = decoder.forward_causal(&x).unwrap(); |
| assert_eq!(out.shape, vec![1, 6, 64]); |
| assert!(out.data.iter().all(|v| v.is_finite())); |
| } |
|
|
| #[test] |
| fn test_decoder_seq_exceeds_max() { |
| let config = DecoderConfig { max_seq_len: 10, ..DecoderConfig::small() }; |
| let decoder = Decoder::new(config, 42); |
| let x = Tensor::randn(&[1, 20, 64], 99); |
| let enc = Tensor::randn(&[1, 5, 64], 100); |
| assert!(decoder.forward(&x, &enc).is_err()); |
| } |
|
|
| #[test] |
| fn test_decoder_causal_seq_exceeds_max() { |
| let config = DecoderConfig { max_seq_len: 10, ..DecoderConfig::small() }; |
| let decoder = Decoder::new(config, 42); |
| let x = Tensor::randn(&[1, 20, 64], 99); |
| assert!(decoder.forward_causal(&x).is_err()); |
| } |
|
|
| #[test] |
| fn test_decoder_num_params() { |
| let config = DecoderConfig::small(); |
| let decoder = Decoder::new(config, 42); |
| let params = decoder.num_params(); |
| |
| |
| assert_eq!(params, 133632); |
| } |
|
|
| #[test] |
| fn test_decoder_batch() { |
| let config = DecoderConfig::small(); |
| let decoder = Decoder::new(config, 42); |
| let x = Tensor::randn(&[3, 4, 64], 99); |
| let enc = Tensor::randn(&[3, 6, 64], 100); |
| let out = decoder.forward(&x, &enc).unwrap(); |
| assert_eq!(out.shape, vec![3, 4, 64]); |
| } |
|
|
| #[test] |
| fn test_decoder_weights_count() { |
| let config = DecoderConfig::small(); |
| let decoder = Decoder::new(config, 42); |
| let weights = decoder.weights(); |
| |
| |
| assert_eq!(weights.len(), 54); |
| } |
|
|
| #[test] |
| fn test_decoder_layer_params() { |
| let layer = DecoderLayer::new(64, 4, 256, 1e-5, 42); |
| |
| assert_eq!(layer.num_params(), 66752); |
| } |
| } |
|
|