use image::io::Reader as ImageReader;
use linfa::Dataset;
use mnist::{Mnist, MnistBuilder};
use ndarray::{Array1, Array2, Axis};
use std::io::Cursor;
use tch::nn::init;
use tch::{Device, IndexOp, Kind, Tensor};

fn main() {
    //generate_image();
    mnist_guess();
}

fn generate_image() {
    let mnist = MnistBuilder::new()
        .base_path("data/")
        .label_format_digit()
        .training_set_length(50_000)
        .test_set_length(10_000)
        .validation_set_length(10_000)
        .download_and_extract()
        .finalize();
    let valid_img = Tensor::try_from(&mnist.val_img)
        .unwrap()
        .view((-1, 28 * 28));
    let idx: Vec<i64> = Tensor::randperm(10, (Kind::Int64, Device::Cpu)).into();
    for i in &idx {
        let img = valid_img.i((*i,));
        let data: Vec<u8> = img.into();
        let image_reader = ImageReader::new(Cursor::new(data))
            .with_guessed_format()
            .unwrap()
            .decode()
            .unwrap();
        image_reader
            .save(format!("./examples/mnist/{}.png", i))
            .unwrap();
    }
}

fn mnist_guess() {
    let mnist = get_mnist();
    let device = Device::Mps;
    let (w1, b1) = (
        Tensor::randn(&[200, 784], (Kind::Float, device)).set_requires_grad(true),
        Tensor::zeros(&[200], (Kind::Float, device)).set_requires_grad(true),
    );
    let (w2, b2) = (
        Tensor::randn(&[200, 200], (Kind::Float, device)).set_requires_grad(true),
        Tensor::zeros(&[200], (Kind::Float, device)).set_requires_grad(true),
    );
    let (w3, b3) = (
        Tensor::randn(&[10, 200], (Kind::Float, device)).set_requires_grad(true),
        Tensor::zeros(&[10], (Kind::Float, device)).set_requires_grad(true),
    );

    use tch::nn::init;
    let w1 = init::init(
        init::Init::Kaiming {
            dist: init::NormalOrUniform::Normal,
            fan: init::FanInOut::FanIn,
            non_linearity: init::NonLinearity::ReLU,
        },
        &[200, 784],
        device,
    )
    .set_requires_grad(true);
    let w2 = init::init(
        init::Init::Kaiming {
            dist: init::NormalOrUniform::Normal,
            fan: init::FanInOut::FanIn,
            non_linearity: init::NonLinearity::ReLU,
        },
        &[200, 200],
        device,
    )
    .set_requires_grad(true);
    let w3 = init::init(
        init::Init::Kaiming {
            dist: init::NormalOrUniform::Normal,
            fan: init::FanInOut::FanIn,
            non_linearity: init::NonLinearity::ReLU,
        },
        &[10, 200],
        device,
    )
    .set_requires_grad(true);

    // 向前传播
    let forward = |x: &Tensor| -> Tensor {
        let mut x = x.matmul(&w1.tr()) + &b1;
        x = x.leaky_relu();
        x = x.matmul(&w2.tr()) + &b2;
        x = x.leaky_relu();
        x = x.matmul(&w3.tr()) + &b3;
        x
    };

    // 初始化一个优化器
    let mut optimizer_sgd = tch::COptimizer::sgd(0.001, 0., 0., 0., false).unwrap();
    optimizer_sgd.add_parameters(&w1, 0).unwrap();
    optimizer_sgd.add_parameters(&b2, 0).unwrap();
    optimizer_sgd.add_parameters(&w2, 0).unwrap();
    optimizer_sgd.add_parameters(&b2, 0).unwrap();
    optimizer_sgd.add_parameters(&w3, 0).unwrap();
    optimizer_sgd.add_parameters(&b3, 0).unwrap();

    // 处理数据
    let features = Array2::from_shape_vec((60_000, 28 * 28), mnist.trn_img).unwrap();
    let target = Array1::from(mnist.trn_lbl);
    let dataset = Dataset::new(features, target);
    let iter_trn = dataset.sample_chunks(200);
    let epochs = 20;

    let features = Array2::from_shape_vec((10_000, 28 * 28), mnist.tst_img).unwrap();
    let target = Array1::from(mnist.tst_lbl);
    let dataset_test = Dataset::new(features, target);
    let iter_chunk = dataset_test.sample_chunks(200);

    for epoch in 0..epochs {
        for (batch_idx, v) in iter_trn.clone().enumerate() {
            let data = Tensor::try_from(v.records())
                .unwrap()
                .view((-1, 28 * 28))
                .to_kind(Kind::Float)
                .to_device(device);

            // target Int64
            let target = Tensor::try_from(v.targets())
                .unwrap()
                .view(-1)
                .to_kind(Kind::Int64)
                .to_device(device);

            let logits = forward(&data);
            let loss = logits.cross_entropy_for_logits(&target);
            optimizer_sgd.zero_grad().unwrap();
            loss.backward();
            optimizer_sgd.step().unwrap();
            if batch_idx % 100 == 0 {
                println!(
                    "Train Epoch: {} [{}/{} {}%] \t loss: {} ",
                    epoch,
                    batch_idx * 200,
                    dataset.records().len_of(Axis(0)),
                    100. * 200. * (batch_idx as f64) / dataset.records().len_of(Axis(0)) as f64,
                    loss
                );
            }
        }

        let mut test_loss = Tensor::zeros(&[1], (Kind::Float, device));
        let mut correct = Tensor::zeros(&[1], (Kind::Float, device));

        for d in iter_chunk.clone() {
            let data = Tensor::try_from(d.records())
                .unwrap()
                .view((-1, 28 * 28))
                .to_kind(Kind::Float)
                .to_device(device);
            let target = Tensor::try_from(d.targets())
                .unwrap()
                .view(-1)
                .to_kind(Kind::Int64)
                .to_device(device);
            let logits = forward(&data);
            test_loss += logits.cross_entropy_for_logits(&target);
            let pred = logits.max_dim(1, true).1;
            correct += pred.eq_tensor(&target.view((200, 1))).sum(Kind::Int64);
        }
        test_loss /= dataset_test.records().len_of(Axis(0)) as f64;
        println!(
            "Test set: Average loss: {}, Accuracy: {}/{} ({}%)",
            f64::from(&test_loss),
            f64::from(&correct),
            dataset_test.records().len_of(Axis(0)),
            100. * f64::from(&correct) / dataset_test.records().len_of(Axis(0)) as f64
        );
    }

    let test_data = Tensor::try_from(dataset_test.records())
        .unwrap()
        .view((-1, 28 * 28))
        .to_kind(Kind::Float)
        .to_device(device);
    let test_target = Tensor::try_from(dataset_test.targets())
        .unwrap()
        .view(-1)
        .to_kind(Kind::Float)
        .to_device(device);
    let logits = forward(&test_data.i(16)).view(-1);
    println!("预测值: {}", logits.max_dim(0, true).1);
}

fn get_mnist() -> Mnist {
    MnistBuilder::new()
        .base_path("data/")
        .label_format_digit()
        .training_set_length(60_000)
        .test_set_length(10_000)
        //.validation_set_length(10_000)
        .download_and_extract()
        .finalize()
}
