use std::ops::{Div, Mul};

mod demo1;
mod demo2;
mod env;
mod show_image;

fn main() {
    demo2_run().unwrap()
    // run().unwrap();
}
use crate::demo1::{gradient_descent, test_demo1};
use crate::demo2::demo2_run;
use anyhow::Result;
use tch::{Device, Kind, nn, nn::Module, nn::OptimizerConfig};

const IMAGE_DIM: i64 = 784;
const HIDDEN_NODES: i64 = 128;
const LABELS: i64 = 10;

fn net(vs: &nn::Path) -> impl Module {
    nn::seq()
        .add(nn::linear(
            vs / "layer1",
            IMAGE_DIM,
            HIDDEN_NODES,
            Default::default(),
        ))
        .add_fn(|xs| xs.relu())
        .add(nn::linear(vs, HIDDEN_NODES, LABELS, Default::default()))
}

pub fn run() -> Result<()> {
    let vs = nn::VarStore::new(Device::Cuda(0));
    let m = tch::vision::mnist::load_dir("D:\\AIGC\\minist\\data")?;

    let train_images = m.train_images.to_device(Device::Cuda(0));
    let train_labels = m.train_labels.to_device(Device::Cuda(0));
    let test_images = m.test_images.to_device(Device::Cuda(0));
    let test_labels = m.test_labels.to_device(Device::Cuda(0));
    let path = vs.root();
    let net = net(&path);
    let mut opt = nn::Adam::default().build(&vs, 1e-3)?;
    let label = train_labels.one_hot(10);
    assert_ne!(train_images.size()[0], 0, "无训练数据batch");
    for epoch in 1..100 {
        let loss = net
            .forward(&train_images)
            .log_softmax(-1, Kind::Float)
            .mul(&label)
            .sum(Kind::Float)
            .negative()
            .div(train_images.size()[0]);
        // let loss = net
        //     .forward(&train_images)
        //     .cross_entropy_for_logits(&train_labels);
        opt.backward_step(&loss);
        let test_accuracy = net.forward(&test_images).accuracy_for_logits(&test_labels);

        println!(
            "epoch: {:4} train loss: {:8.5} test acc: {:5.2}%",
            epoch,
            &loss.double_value(&[]),
            &test_accuracy.double_value(&[]),
        );
    }
    Ok(())
}
