use twinkle as tw;
use twinkle::nn::{functional as F, Module};
use twinkle::datasets::load_digits;
use twinkle::nn::linear::Linear;
use twinkle::optim::{Optimizer, SGD};

pub fn fit() {
    let (x_train, x_test, y_train, y_test) = load_digits();

    let epochs = 50u32;
    let lr = 1e-3;

    // let layer1 = tw::rand(&[64, 32], true);
    // let layer2 = tw::rand(&[32, 16], true);
    // let layer3 = tw::rand(&[16, 10], true);
    let linear1 = Linear::new(64, 10, true);

    let mut train_score = 0u32;
    let mut test_score = 0u32;

    // let parameters = vec![layer1.clone(), layer2.clone(), layer3.clone()];
    let optimizer = SGD::new(linear1.parameters(), lr);

    for epoch in 0..epochs {
        for i in 0..x_train.shape()[0] {
            let x_i = x_train.index(&[i]).reshape(&[1, 64]);
            let y_i = y_train.index(&[i]).reshape(&[1, 10]);

            // let layer1_out = F::relu(&x_i.mm(&layer1));
            // let layer2_out = F::relu(&layer1_out.mm(&layer2));
            // let y_hat = layer2_out.mm(&layer3);
            let y_hat = linear1.forward(x_i);

            if y_hat.argmax().item() == y_i.argmax().item() {
                train_score += 1;
            }

            let loss = F::mse_loss(&y_hat, &y_i);

            optimizer.zero_grad(true);
            loss.backward_ones();
            optimizer.step();
        }

        for i in 0..x_test.shape()[0] {
            let x_i = x_test.index(&[i]).reshape(&[1, 64]);
            let y_i = y_test.index(&[i]).reshape(&[1, 10]);

            let y_hat = linear1.forward(x_i);

            if y_hat.argmax().item() == y_i.argmax().item() {
                test_score += 1;
            }
        }

        print!("epoch {:2}, ", epoch);
        print!("train accuracy {:.3}, ", f64::from(train_score) / 1000f64);
        println!("test accuracy {:.3}", f64::from(test_score) / 797f64);

        train_score = 0;
        test_score = 0;
    }
}

fn main() {
    fit();
}
