use image::{ImageBuffer, RgbImage};
use linfa::Dataset;
use std::println;

use mnist::{Mnist, MnistBuilder};
use ndarray::prelude::*;
use plotters::prelude::Color;
use plotters::prelude::*;
use show_image::{event, ImageInfo, ImageView, WindowOptions};
use tch::nn::{init, Init};
use tch::{Device, IndexOp, Kind, Reduction, Tensor};

fn main() {
    // test1();
    //test2();
    //test3();
    //test4();
    // test5();
    //test6();
    //test7();
    //test8();
    //test9();
    //test10();
    //test11();
    // test12();
    //test13();
    //test14();
    //test15();
    //test16();
    //test17();
    test18();
}

// 基本
fn test1() {
    let mut x = tch::Tensor::ones(&[1, 2], (tch::Kind::Float, Device::Cpu));
    println!("{}", x);
    //x.set_data(&Tensor::try_from(array![3, 4]).unwrap());
    println!("{:?}", x.i((0, 0)));
    println!("{:?}", x.size());
    println!("{:?}", x.size2().unwrap());
    println!("{}", x.dim());
    println!("{}", x.numel());
}

// 创建数据
fn test2() {
    // 矩阵类型
    let a = array![2.0, 3.3];
    let b = tch::Tensor::try_from(&a).unwrap();
    println!("{}", b);

    // 自带的
    let c = tch::Tensor::ones(&[1, 2], (tch::Kind::Bool, tch::Device::Cpu));
    println!("{}", c);

    let d: Array2<f64> = Array2::ones((2, 3));

    // 从vec或者切片中中创建一维的
    println!("{}", tch::Tensor::try_from(vec![1, 2, 3]).unwrap());
    println!("{}", tch::Tensor::try_from(&vec![5, 6, 7]).unwrap());
    println!("{}", tch::Tensor::from([3, 4, 5].as_slice()));
    println!(
        "{}",
        tch::Tensor::try_from(array![[2, 4, 5], [4, 5, 6]]).unwrap()
    );

    // 随机值
    println!("{}", Tensor::rand(&[3, 3], (Kind::Float, Device::Cpu)));
    println!("{}", Tensor::rand_like(&b));
    println!(
        "{}",
        Tensor::randint(10, &[3, 3], (Kind::Float, Device::Cpu))
    );
    println!(
        "{}",
        Tensor::randint_low(-1, 10, &[3, 3], (Kind::Float, Device::Cpu))
    );
    let mut f = Tensor::randn(&[3, 3], (Kind::Float, Device::Cpu));
    println!("{}", f);
    // let n = f.normal_(0., 1.0);
    // println!("{}", n);
    println!("{}", f.mean(Kind::Float));
    let ret = Tensor::full(&[], 90, (Kind::Double, Device::Cpu));
    println!("{}", ret);

    println!("{}", Tensor::arange(5, (Kind::Int64, Device::Cpu)));
    println!("{}", Tensor::range(-1, 10, (Kind::Double, Device::Cpu)));

    // 可以设置step,表示生成多少个数
    println!(
        "{}",
        Tensor::linspace(-1, 10, 12, (Kind::Double, Device::Cpu))
    );
    // 生成5个数
    println!("{}", Tensor::linspace(0, 10, 5, (Kind::Int64, Device::Cpu)));
    // log运算, 数的多少次方
    println!(
        "{}",
        Tensor::logspace(0, 10, 5, 10.0, (Kind::Int64, Device::Cpu))
    );
    println!("{}", Tensor::eye(3, (Kind::Int64, Device::Cpu)));

    // randperm, 生成随机的索引
    println!("{}", Tensor::randperm(10, (Kind::Int64, Device::Cpu)));
    let a = Tensor::rand(&[2, 3], (Kind::Double, Device::Cpu));
    println!("{}", a);
    let b = Tensor::rand(&[2, 2], (Kind::Double, Device::Cpu));
    println!("{}", b);
    let idx = Tensor::randperm(2, (Kind::Int64, Device::Cpu));
    println!("{}", idx);
    println!("{}", a.i(&idx));
    println!("{}", b.i(&idx));
}

// 索引和切片
fn test3() {
    let tmp = Tensor::rand(&[4, 3, 28, 28], (Kind::Float, Device::Cpu));
    println!("{}", tmp.i(0));
    println!("{}", tmp.i((0, 0)));
    let v: f32 = tmp.i((0, 0, 2, 4)).try_into().unwrap();
    println!("{}", v);

    println!("{}", tmp.i(..2));
    println!("{}", tmp.i((..2, 1..)));
    println!("{}", tmp.i((..2, 2..)));

    println!(
        "{}",
        tmp.i((.., .., (0..28).step_by(2).collect::<Vec<_>>()))
    );
    println!("{}", tmp.index_select(0, &([0i32, 2i32].as_slice()).into()));
    println!("{}", tmp.index_select(1, &([1i32, 2i32].as_slice()).into()));
    println!(
        "{}",
        tmp.index_select(2, &Tensor::arange(28, (Kind::Int64, Device::Cpu)))
    );

    println!(
        "{}",
        tmp.index_select(2, &Tensor::arange(8, (Kind::Int64, Device::Cpu)))
    );

    let x = Tensor::rand(&[3, 4], (Kind::Float, Device::Cpu));
    let mask = x.ge(0.5);
    println!("{}", mask);
    println!("{}", x.masked_select(&mask));
    let index: Tensor = [0i32, 1i32, 2i32].as_slice().into();
    println!("{}", x);
    println!("{}", index);
}

// Tensor维度的变换
fn test4() {
    let a = Tensor::rand(&[4, 1, 28, 28], (Kind::Float, Device::Cpu));
    println!("{}", a);
    println!("{:?}", a.size());
    println!("{}", a.view([4, 28 * 28].as_slice()));
    println!("{}", a.reshape(&[4 * 28, 28]));
    println!("{}", a.reshape(&[4, 28, 28]));
    println!("{}", a.view([4, 28, 28,]));
    println!("{}", a.view(4 * 28 * 28));
    // 插入维度  正的索引，在之前插入
    println!("{}", a.unsqueeze(0));
    println!("{}", a.unsqueeze(-1));
    println!("{}", a.unsqueeze(4));
    let a = Tensor::from([1.2, 2.3].as_slice());
    println!("{}", a.unsqueeze(1));
    // For example
    let b = Tensor::rand(&[32], (Kind::Float, Device::Cpu));
    let f = Tensor::rand(&[4, 32, 14, 14], (Kind::Float, Device::Cpu));
    let b = b.unsqueeze(1).unsqueeze(2).unsqueeze_(0);
    println!("{}", b);
    println!("{}", b.squeeze());
    println!("{}", b.squeeze_dim(0));
    println!("{}", b.squeeze_dim(-1));
    println!("{}", b.squeeze_dims([0, -1, -2].as_slice()));
    // expand   repeat 扩展
    println!("{}", b.expand_as(&f));
    println!("{}", b.expand(&[4, 32, 14, 14], false));
    // -1表不变
    println!("{}", b.expand(&[-1, 32, -1, -1], false));

    // repeat表示每个维度重复的次数
    println!("{}", b.repeat(&[4, 1, 14, 14]));

    // 矩阵操作  transpose  permute
    let mut a = Tensor::rand(&[3, 4], (Kind::Float, Device::Cpu));
    println!("{}", a);
    println!("{}", a.t_());
    println!("{}", a.transpose(0, 1));
    // permute重新按照写出来的维度进行排序
    println!("{}", a.transpose(0, 1).permute(&[1, 0]));
}

// Broadcasting 自动扩张维度,不需要拷贝数据
fn test5() {}

// 合并与分割
fn test6() {
    let a = Tensor::rand(&[4, 32, 8], (Kind::Float, Device::Cpu));
    let b = Tensor::rand(&[5, 32, 8], (Kind::Float, Device::Cpu));
    let all = Tensor::cat(&[&a, &b], 0);
    println!("{}", all);

    //stack 插入一个新的维度 a和b的维度必须完全一致
    let a = Tensor::rand(&[32, 8], (Kind::Float, Device::Cpu));
    let b = a.rand_like();
    let stack = Tensor::stack(&[&a, &b], 0);
    println!("{}", stack);

    // split 拆分操作
    let b = stack.split_sizes(&[1, 1], 0);
    println!("{:?}", b);
    for t in &b {
        println!("{}", t.squeeze_dim(0));
    }
}

// 数学运算
fn test7() {
    let a = Tensor::rand(&[3, 4], (Kind::Float, Device::Cpu));
    let b = Tensor::rand(&[4], (Kind::Float, Device::Cpu));
    println!("{}", a);
    println!("{}", b);
    println!("{}", &a + &b);
    println!("{}", &a + 2);
    let a = Tensor::ones(&[3, 3], (Kind::Float, Device::Cpu));
    let b = &a * 3;
    println!("{}", b);
    println!("{}", &a * &b);
    let c = Tensor::from([1, 1, 2].as_slice());
    println!("{}", c);
    println!("{}", &b * &c);
    println!("{}", a.matmul(&b));
    let a = Tensor::rand(&[4, 1, 28, 64], (Kind::Float, Device::Cpu));
    let b = Tensor::rand(&[4, 3, 64, 32], (Kind::Float, Device::Cpu));
    println!("{}", a.matmul(&b));

    let mut a = Tensor::full(&[2, 2], 3, (Kind::Float, Device::Cpu));
    println!("{}", a.pow_(2));
    println!("{}", a);
    println!("{}", a.sqrt());
    println!("{}", a.exp());
    println!("{}", a.exp().log());
    let a = Tensor::from(3.14);
    println!("{}", a.floor());
    println!("{}", a.ceil());
    println!("{}", a.trunc());
    println!("{}", a.frac());
    println!("{}", a.round());

    // clamp函数
    let grad = Tensor::rand(&[2, 3], (Kind::Float, Device::Cpu)) * 15;
    println!("{}", grad.max());
    println!("{}", grad.median());
    println!("{}", grad.clamp_min(10));
    println!("{}", grad.clamp_max(10));
    println!("{}", grad.clamp(0, 10));
}

// Tensor的统计属性
fn test8() {
    let a = Tensor::full(&[8], 1, (Kind::Float, Device::Cpu));
    let b = a.view([2, 4]);
    println!("{}", b);
    let c = a.view([2, 2, 2]);
    println!("{}", c);
    println!("{}", a.norm());
    println!("{}", b.norm());
    println!("{}", c.norm());
    // 建议使用norm_scalaropt_dim
    println!("{}", b.norm_scalaropt_dim(1, &[], false));
    println!("{}", b.norm_scalaropt_dim(2, &[], true));
    // 统计属性
    let a = Tensor::arange(8, (Kind::Float, Device::Cpu));
    println!("{}", a);
    let b = Tensor::randperm(8, (Kind::Float, Device::Cpu));
    println!("{}", b);
    println!("{}", a.min());
    println!("{}", a.max());
    println!("{}", a.mean(Kind::Float)); // mean均值
    println!("{}", a.prod(Kind::Float));
    println!("{}", a.sum(Kind::Float));
    println!("{}", a.argmin(None, true)); // fan返回最大值的索引
    println!("{}", a.argmax(None, true));
    println!("{}", b.min());
    println!("{}", b.min());
    println!("{}", b.max());
    println!("{}", b.mean(Kind::Float)); // mean均值
    println!("{}", b.prod(Kind::Float));
    println!("{}", b.sum(Kind::Float));
    println!("{}", b.argmin(None, true));
    println!("{}", b.argmax(None, true));
    println!("{}", b.min());
    let a = Tensor::randn(&[4, 10], (Kind::Float, Device::Cpu));
    println!("{}", a.i(0));
    println!("{}", a);
    println!("{}", a.argmax(None, true)); // keepdim 保持原来的dim
    println!("{}", a.max());
    //println!("{}", a.mean_dim(Some([1].as_slice()), true, Kind::Float));

    // top key 获取前几个值 。 比max返回更多的数据 largest:true最大的几个 false最小的几个
    let topk_value = a.topk(3, 1, false, true);
    println!("{}", a);
    println!("{}", topk_value.0);
    println!("{}", topk_value.1); // 对应的原来的索引
                                  // kthvalue k:代表第几个小的, 只会返回一个
    let kthvalue = a.kthvalue(8, 1, true);
    println!("{}", kthvalue.0);
    println!("{}", a.gt(0));
    println!("{}", a.lt(0));
    let b = Tensor::randn(&[4, 10], (Kind::Float, Device::Cpu));
    println!("{}", a.eq_tensor(&b));
}

// where gather 操作
fn test9() {
    // where: return a tensor of elements selected from either x or y, depending
    // on condition.
    let cond = Tensor::randn(&[2, 2], (Kind::Float, Device::Cpu));
    println!("{}", cond);
    let cond = cond.ge(0.5);
    println!("{}", cond);

    let a = Tensor::zeros(&[2, 2], (Kind::Float, Device::Cpu));
    let b = Tensor::ones(&[2, 2], (Kind::Float, Device::Cpu));
    println!("{}", a);
    println!("{}", b);
    println!("{}", a.where_self(&cond, &b));

    // gather： 收集 Gather values along an axis specified by dim.
    // 根据查表的操作，就是一个收集的操作
    let prob = Tensor::randn(&[4, 10], (Kind::Double, Device::Cpu));

    let idx = prob.topk(3, 1, true, true);
    println!("{}\n{}", idx.0, idx.1);
    let label = Tensor::arange(10, (Kind::Int64, Device::Cpu)) + 100;
    println!("{}", label);
    println!("{}", label.expand(&[4, 10], false));
    println!("{}", label.expand(&[4, 10], false).gather(1, &idx.1, true));
}

// 梯度 sigmod函数[0,1]  Tanh函数[-1, 1] ReLu函数 三个激活函数  优先使用relu函数
fn test10() {
    let a = Tensor::linspace(-100, 100, 10, (Kind::Double, Device::Cpu));
    println!("{}", a);
    println!("{}", a.sigmoid());
    let a = Tensor::linspace(-10, 10, 10, (Kind::Double, Device::Cpu));
    println!("{}", a.tanh());
    println!("{}", a.relu());
}

// Loss 及其梯度
fn test11() {
    // f = x * w + b
    let x = Tensor::ones(&[1], (Kind::Double, Device::Cpu));
    let mut w = Tensor::full(&[1], 2, (Kind::Double, Device::Cpu));
    println!("{}", w);
    let predict = Tensor::ones(&[1], (Kind::Double, Device::Cpu));
    println!("{}", predict.mse_loss(&(&x * &w), Reduction::Sum));
    let mse = predict.mse_loss(&(&x * &w), Reduction::Sum);
    let w = w.requires_grad_(true);
    println!("{}", w);
    let mse = (&x * &w).mse_loss(&predict, Reduction::Sum);
    mse.backward();
    println!("hhaa: {}", w.grad());

    // softmax函数
    // let mut a = Tensor::rand(&[3], (Kind::Float, Device::Mps)).requires_grad_(true);
    // a.zero_grad();
    // let p = a.softmax(0, Kind::Float);
    // p.i(0).backward_with(true, false);
    // a.grad().print();
    // a.zero_grad();
    // p.i(1).backward_with(true, false);
    // a.grad().print();
    // a.zero_grad();
    // p.i(2).backward_with(false, false);
    // a.grad().print();
}

// 感知机 单输出
fn test12() {
    // x是已知的
    let x = Tensor::randn(&[1, 10], (Kind::Float, Device::Mps));
    x.print();

    // w是我们要求的未知量 , 设置求导标志
    let mut w = Tensor::randn(&[1, 10], (Kind::Float, Device::Mps)).set_requires_grad(true);

    // 求输出
    let o = x.matmul(&w.tr()).sigmoid();
    o.print();
    let loss = o.mse_loss(&o.ones_like(), Reduction::Sum);
    w.zero_grad();
    //反向传播
    loss.backward();
    // 求梯度
    w.grad().print();
}

// 感知机 多输出
fn test13() {
    let x = Tensor::randn(&[1, 10], (Kind::Float, Device::Mps));
    let mut w = Tensor::randn(&[2, 10], (Kind::Float, Device::Mps)).set_requires_grad(true);
    let o = (x.matmul(&w.tr())).sigmoid();
    let loss = o.mse_loss(&o.ones_like(), Reduction::Sum);
    loss.print();
    w.zero_grad();
    loss.backward();
    w.grad().print();
}

// 链式法则
fn test14() {
    let x = Tensor::full(&[], 1, (Kind::Float, Device::Mps));
    let mut w1 = Tensor::full(&[], 2, (Kind::Float, Device::Mps)).set_requires_grad(true);
    let b1 = Tensor::full(&[], 1, (Kind::Float, Device::Mps));

    let w2 = Tensor::full(&[], 2, (Kind::Float, Device::Mps)).set_requires_grad(true);
    let b2 = Tensor::full(&[], 1, (Kind::Float, Device::Mps));
    let y1 = (&x * &w1 + &b1);
    y1.backward_with(true, false);
    let dy2_dy1 = w1.grad();
    dy2_dy1.print();

    // let dy2_dw1 = dy2_dy1 * dy1_dw1;
    // dy2_dw1.print();
    w1.zero_grad();
    let y2 = &y1 * &w2 + &b2;
    y2.backward();
    w1.grad().print();
}

// 2D函数优化的实例
fn test15() {
    // f(x, y) = (x**2 + y -11)**2 + (x + y**2 - 7)**2 四个最小值都是0
    // view();

    // 随机梯度下降
    let x = Tensor::from([-4, 0].as_slice())
        .to_kind(Kind::Float)
        .to_device(Device::Mps)
        .set_requires_grad(true);
    // 创建优化器
    let mut optimizer = tch::COptimizer::adam(1e-3, 0.9, 0.99, 0., 1e-8, false).unwrap();
    optimizer.add_parameters(&x, 0).unwrap();
    optimizer.zero_grad().unwrap();
    let mut pred: Tensor = 0.into();

    for v in 0..20000 {
        let curr = himmeblau(&x.i(0), &x.i(1));
        optimizer.zero_grad().unwrap();
        curr.backward();
        optimizer.step().unwrap();
        println!("step {}: x = {}, f(x) = {}", v, x, curr);
        if (&curr - pred).abs().le(1e-8).into() {
            break;
        }
        pred = curr;
    }
}

fn himmeblau_scalar(x: f64, y: f64) -> f64 {
    (x.powi(2) + y - 11.).powi(2) + (x + y.powi(2) - 7.).powi(2)
}

fn himmeblau(x: &Tensor, y: &Tensor) -> Tensor {
    (x.pow_tensor_scalar(2) + y - 11.).pow_tensor_scalar(2)
        + (x + y.pow_tensor_scalar(2) - 7.).pow_tensor_scalar(2)
}

fn view() {
    let x = Tensor::arange_start_step(-6.0, 6.0, 0.1, (Kind::Float, Device::Mps));
    let y = Tensor::arange_start_step(-6., 6., 0.1, (Kind::Float, Device::Mps));
    x.print();
    y.print();

    let root = SVGBackend::new("./examples/torch1/data.svg", (600, 400)).into_drawing_area();
    root.fill(&WHITE).unwrap();

    let mut chart = ChartBuilder::on(&root)
        .set_all_label_area_size(40)
        .build_cartesian_3d(-6.0..6.0, 0.0..3000.0, -6.0..6.0)
        .unwrap();

    chart
        .configure_axes()
        .tick_size(1)
        .x_labels(4)
        .y_labels(4)
        .z_labels(4)
        .max_light_lines(5)
        .axis_panel_style(GREEN.mix(0.1))
        .bold_grid_style(BLUE.mix(0.3))
        .light_grid_style(BLUE.mix(0.2))
        .label_style(("Calibri", 10))
        .x_formatter(&|x| format!("x={x}"))
        .draw()
        .unwrap();

    let x_: Vec<f64> = x.into();
    let y_: Vec<f64> = y.into();

    chart
        .draw_series(
            SurfaceSeries::xoz(x_.into_iter(), y_.into_iter(), |x, y| {
                himmeblau_scalar(x, y)
            })
            .style(BLUE.mix(0.2).filled()),
        )
        .unwrap()
        .label("Surface")
        .legend(|(x, y)| Rectangle::new([(x + 5, y - 5), (x + 15, y + 5)], BLUE.mix(0.5).filled()));
}

// 逻辑回归
fn test16() {
    // entropy  熵(值越大，表示越稳定，确定)
    // cross entropy 交叉熵  H(P, Q) = -sigma(P(i)log(Q(i))  => H(P,Q) = H(p) + Hkl(P, Q) 交叉熵越小，Q模型越接近P。
    let x = Tensor::full(&[4], 1. / 4., (Kind::Float, Device::Mps));
    x.print();
    let log = x.log2();
    log.print();
    let entropy = -(x * log).sum(Kind::Float);
    entropy.print();

    let x: Tensor = [0.1, 0.1, 0.1, 0.7].as_slice().into();
    let log = x.log2();
    let entropy = -(x * log).sum(Kind::Float);
    entropy.print();

    let x: Tensor = [0.001, 0.001, 0.001, 0.999].as_slice().into();
    let log = x.log2();
    let entropy = -(x * log).sum(Kind::Float);
    entropy.print();

    let x = Tensor::randn(&[1, 1], (Kind::Float, Device::Mps));
    x.print();

    let w = Tensor::randn(&[1, 10], (Kind::Float, Device::Mps));
    w.print();

    let logits = x.matmul(&w);
    w.tr().print();
    logits.print();
    let pred = logits.softmax(1, Kind::Float);
    pred.print();
    let pred_log = pred.log();
    pred_log.print();
    let real = Tensor::from([3].as_slice()).to_device(Device::Mps);
    logits.cross_entropy_for_logits(&real).print();
    pred_log.nll_loss(&real).print();

    // cross_entropy = softmax + log + nll_loss
}

// 实际操作 这个很重要，掌握主要的操作步骤
fn test17() {
    let (w1, b1) = (
        Tensor::randn(&[200, 784], (Kind::Float, Device::Mps)).set_requires_grad(true),
        Tensor::zeros(&[200], (Kind::Float, Device::Mps)).set_requires_grad(true),
    );

    let (w2, b2) = (
        Tensor::randn(&[200, 200], (Kind::Float, Device::Mps)).set_requires_grad(true),
        Tensor::zeros(&[200], (Kind::Float, Device::Mps)).set_requires_grad(true),
    );

    let (w3, b3) = (
        Tensor::randn(&[10, 200], (Kind::Float, Device::Mps)).set_requires_grad(true),
        Tensor::zeros(&[10], (Kind::Float, Device::Mps)).set_requires_grad(true),
    );
    // 用更好的初始化
    use tch::nn::init;
    let w1 = init::init(
        init::Init::Kaiming {
            dist: init::NormalOrUniform::Normal,
            fan: init::FanInOut::FanIn,
            non_linearity: init::NonLinearity::ReLU,
        },
        &[200, 784],
        Device::Mps,
    )
    .set_requires_grad(true);
    let w2 = init::init(
        init::Init::Kaiming {
            dist: init::NormalOrUniform::Normal,
            fan: init::FanInOut::FanIn,
            non_linearity: init::NonLinearity::ReLU,
        },
        &[200, 200],
        Device::Mps,
    )
    .set_requires_grad(true);
    let w3 = init::init(
        init::Init::Kaiming {
            dist: init::NormalOrUniform::Normal,
            fan: init::FanInOut::FanIn,
            non_linearity: init::NonLinearity::ReLU,
        },
        &[10, 200],
        Device::Mps,
    )
    .set_requires_grad(true);

    let forward = |x: &Tensor| -> Tensor {
        let mut x = x.matmul(&w1.tr()) + &b1;
        x = x.leaky_relu();
        x = x.matmul(&w2.tr()) + &b2;
        x = x.leaky_relu();
        x = x.matmul(&w3.tr()) + &b3;
        x
    };

    // 初始化一个优化器
    // wd: weight decay 衰减
    // momentum: 动力惯性,这个值代表了上一次梯度的占比

    let mut optimizer_sgd = tch::COptimizer::sgd(0.001, 0.78, 0., 0.01, false).unwrap();

    optimizer_sgd.add_parameters(&w1, 0).unwrap();
    optimizer_sgd.add_parameters(&b1, 0).unwrap();
    optimizer_sgd.add_parameters(&w2, 0).unwrap();
    optimizer_sgd.add_parameters(&b2, 0).unwrap();
    optimizer_sgd.add_parameters(&w3, 0).unwrap();
    optimizer_sgd.add_parameters(&b3, 0).unwrap();

    // 加载手写数据
    let mnist_data = handle_mnist();
    let features = Array2::from_shape_vec((60_000, 28 * 28), mnist_data.trn_img).unwrap();
    let target = Array1::from(mnist_data.trn_lbl);
    let dataset = Dataset::new(features, target);

    // test数据
    let features_test = Array2::from_shape_vec((10_000, 28 * 28), mnist_data.tst_img).unwrap();
    let target_test = Array1::from(mnist_data.tst_lbl);
    let dataset_test = Dataset::new(features_test, target_test);
    let iter_chunk = dataset_test.sample_chunks(200);
    // mnist(
    //     &Tensor::try_from(dataset_test.records()).unwrap(),
    //     &Tensor::try_from(dataset_test.targets()).unwrap(),
    //     16,
    //     10_000,
    // );
    // 200个样本一块
    let iter = dataset.sample_chunks(200);
    let epochs = 10;
    for epoch in 0..epochs {
        for (batch_idx, v) in iter.clone().enumerate() {
            let data = Tensor::try_from(v.records())
                .unwrap()
                .view((-1, 28 * 28))
                .to_kind(Kind::Float)
                .to_device(Device::Mps);

            // target这里必须使用Int64
            let target = Tensor::try_from(v.targets())
                .unwrap()
                .view(-1)
                .to_kind(Kind::Int64)
                .to_device(Device::Mps);
            let logits = forward(&data);
            let loss = logits.cross_entropy_for_logits(&target);
            optimizer_sgd.zero_grad().unwrap();

            loss.backward();
            optimizer_sgd.step().unwrap();

            if batch_idx % 100 == 0 {
                println!(
                    "Train Epoch: {} [{}/{} ({:.2}%)]\t Loss: {}",
                    epoch,
                    batch_idx * 200,
                    dataset.records().len_of(Axis(0)),
                    100. * 200. * (batch_idx as f64) / (dataset.records.len_of(Axis(0)) as f64),
                    loss
                );
            }
        }
        let mut test_loss = Tensor::zeros(&[1], (Kind::Float, Device::Mps));
        let mut correct = Tensor::zeros(&[1], (Kind::Float, Device::Mps));
        //break;
        for d in iter_chunk.clone() {
            let data = Tensor::try_from(d.records())
                .unwrap()
                .view((-1, 28 * 28))
                .to_kind(Kind::Float)
                .to_device(Device::Mps);
            let target = Tensor::try_from(d.targets())
                .unwrap()
                .view(-1)
                .to_kind(Kind::Int64)
                .to_device(Device::Mps);
            let logits = forward(&data);
            test_loss += logits.cross_entropy_for_logits(&target);
            let pred = logits.max_dim(1, true).1; // 返回索引

            correct += pred.eq_tensor(&target.view((200, 1))).sum(Kind::Int64);
        }
        test_loss /= dataset_test.records.len_of(Axis(0)) as f64;
        println!(
            "Test set: Average loss: {}, Accuracy: {}/{} ({}%)",
            f64::from(&test_loss),
            f64::from(&correct),
            dataset_test.records.len_of(Axis(0)),
            100. * f64::from(&correct) / dataset_test.records().len_of(Axis(0)) as f64
        );
    }

    let test_data = Tensor::try_from(dataset_test.records())
        .unwrap()
        .view((-1, 28 * 28))
        .to_kind(Kind::Float)
        .to_device(Device::Mps);
    let target_data = Tensor::try_from(dataset_test.targets())
        .unwrap()
        .view(-1)
        .to_kind(Kind::Float)
        .to_device(Device::Mps);
    let logits = forward(&test_data.i((1,))).view(-1);
    println!("预测值: {}", logits.max_dim(0, true).1);
    println!("真实值: {}", target_data.i(1));
}

// 神经网络
fn test18() {
    
}

fn handle_mnist() -> Mnist {
    let (trn_size, _rows, _cols) = (60_000, 28, 28);
    let mnist = MnistBuilder::new()
        .base_path("data/")
        .label_format_digit()
        .training_set_length(trn_size)
        //.validation_set_length(10_000)
        .test_set_length(10_000)
        .download_and_extract()
        .finalize();
    mnist
}
// mnist手写识别的数据
fn mnist(train_img: &Tensor, train_lbl: &Tensor, index: usize, num: i64) {
    return_item_description_from_number(train_lbl.i(index as i64).into());

    let train_img = train_img.view((num, 28, 28));

    let image = bw_ndarray2_to_rgb_image(&train_img.i(index as i64));
    let window_options = WindowOptions::new();
    let window_options = window_options
        .set_size([100, 100])
        .set_resizable(true)
        .set_background_color(show_image::Color::white())
        .set_preserve_aspect_ratio(true)
        .set_fullscreen(false);

    show_image::run_context(move || {
        let show_iamge_data = image.to_vec();
        let image = ImageView::new(ImageInfo::rgb8(28, 28), &show_iamge_data);
        let window = show_image::create_window("image", window_options).unwrap();
        window.set_image("wawa", image).unwrap();

        // event
        for e in window.event_channel().unwrap() {
            if let event::WindowEvent::KeyboardInput(event) = e {
                if event.input.key_code == Some(event::VirtualKeyCode::Escape) {
                    println!("退出循环");
                    break;
                }
            }
        }

        Result::<(), Box<dyn std::error::Error>>::Ok(())
    });
}

// 返回对应label的数字
fn return_item_description_from_number(val: u8) {
    let description = match val {
        0 => "0",
        1 => "1",
        2 => "2",
        3 => "3",
        4 => "4",
        5 => "5",
        6 => "6",
        7 => "7",
        8 => "8",
        9 => "9",
        _ => panic!("An unrecognized label was used..."),
    };
    println!(
        "Based on the '{}' label, this image should be a : {}",
        val, description
    );
    println!("Hit [ESC] to exit....");
}

// 生成image数据
fn bw_ndarray2_to_rgb_image(arr: &Tensor) -> RgbImage {
    let size = arr.size();
    let (width, height) = (size[1], size[0]);
    let mut img: RgbImage = ImageBuffer::new(width as u32, height as u32);
    for y in 0..height {
        for x in 0..width {
            let val = (i64::from(arr.i((y, x))) * 255) as u8;
            img.put_pixel(x as u32, y as u32, image::Rgb([val, val, val]))
        }
    }
    img
}
