use candle_core::{Device, Tensor};
use candle_nn::{linear, loss, lstm, LSTMConfig, Linear, Module, Optimizer, VarBuilder, VarMap, RNN};



struct LstmModel{
    lstm:candle_nn::LSTM,
    linear:Linear
}

impl LstmModel {
    fn new(in_dim:usize,hidden_dim: usize,out_dim: usize,vs:VarBuilder)->Self{
        let config=LSTMConfig::default();
        let lstm=lstm(in_dim, hidden_dim, config, vs.pp("lstm")).unwrap();
        let linear=linear(hidden_dim, out_dim, vs.pp("linear")).unwrap();
        Self{lstm,linear}
    }
    fn forward(&self,xs:&Tensor)->Tensor{
        let state=self.lstm.seq(xs).unwrap();
        let output=state.last().unwrap();
        let output=output.h();
        self.linear.forward(output).unwrap()
    }
}

struct Features{
    in_dim:usize,
    hidden_dim:usize,
    out_dim:usize,
    batch_size:usize,
    seq_len:usize,
    epochs:usize,
    learning_rate:f64
}

fn main() {
    let data=Features{in_dim:4,hidden_dim:64,out_dim:1,batch_size:2,seq_len:30,epochs:100,learning_rate:0.001};
    let device=Device::Cpu;
    let varmap=VarMap::new();
    let vs=candle_nn::VarBuilder::from_varmap(&varmap, candle_core::DType::F32, &device);
    let model=LstmModel::new(data.in_dim, data.hidden_dim, data.out_dim, vs);
    let input=Tensor::randn(0f32, 1.0, &[data.batch_size,data.seq_len,data.in_dim], &device).unwrap();
    let out=Tensor::randn(0f32, 1.0, &[data.batch_size,data.out_dim], &device).unwrap();
    let mut optimizer=candle_nn::AdamW::new_lr(varmap.all_vars(), data.learning_rate).unwrap();
    let epochs=data.epochs;
    for epoch in 0..=epochs{
        let logits=model.forward(&input);
        let loss=loss::mse(&logits, &out).unwrap();
        optimizer.backward_step(&loss).unwrap();
        println!("epoch:{},loss:{}",epoch,loss.to_scalar::<f32>().unwrap())
    }
    let pred=model.forward(&input);
    println!("{},{}",pred,out);
}