use std::ops::{Add, Div, Mul};
use std::sync::{Arc, Mutex};
use tch::nn::{ Module, VarStore, Variables};
use tch::{Device, Kind, Tensor, nn};

fn demo2_model(vs: &nn::Path) -> impl Module {
    let x1 = vs
        .randn("layer1", &[784, 128], 0.0, 0.2)
        .set_requires_grad(true); // 正态分布初始化
    let x2 = vs
        .randn("layer2", &[128, 10], 0.0, 0.2)
        .set_requires_grad(true);
    let x1_b = vs.zeros("b1", &[1, 128]).set_requires_grad(true); // 偏置初始化为零
    let x2_b = vs.zeros("b2", &[1, 10]).set_requires_grad(true);

    nn::seq()
        // .add_fn(|xs| xs / 255.0)
        .add_fn(move |xs| xs.matmul(&x1).add(&x1_b))
        .add_fn(|xs| xs.relu())
        .add_fn(move |xs| xs.matmul(&x2).add(&x2_b))
}

trait ZeroGrads {
    fn zero_grads(&self);
}
impl ZeroGrads for VarStore {
    fn zero_grads(&self) {
        let mut locker = self.variables_.lock().unwrap();
        locker
            .trainable_variables
            .iter_mut()
            .for_each(|v| v.tensor.zero_grad());
    }
}

trait MyAdam {
    fn step(&mut self,epoch:usize);
    fn build(var_store: &VarStore, lr: f64) -> Self;
}

struct CustomerOpt {
    m: Vec<Tensor>,
    v: Vec<Tensor>,
    beta1: f64,
    beta2: f64,
    lr: f64,
    variables_: Arc<Mutex<Variables>>,
}
impl MyAdam for CustomerOpt {
    fn step(&mut self,epoch:usize) {
        let mut locker = self.variables_.lock().unwrap();

        locker
            .trainable_variables
            .iter_mut()
            .enumerate()
            .into_iter()
            .for_each(|(i, v)| {
                // 计算一阶动量
                let m1 = v.tensor.grad().mul(1. - self.beta1) + self.m[i].data().mul(self.beta1);
                self.m[i] = m1.data();
                // 计算二阶动量
                let v1 = v.tensor.grad().pow_tensor_scalar(2).mul(1. - self.beta2)
                    + self.v[i].data().mul(self.beta2);
                self.v[i] = v1.data();
                // 偏差校正
                let rm = m1.div(1. - self.beta1.powf(epoch as f64));
                let rv = v1.div(1. - self.beta2.powf(epoch as f64));
                // 更新参数
                let update = rm.div(rv.sqrt() + 1e-8).mul(self.lr);
                v.tensor
                    .set_data(&(v.tensor.data() - update));
            })
    }

    fn build(var_store: &VarStore, lr: f64) -> Self {
        let var = var_store.variables_.clone();
        let locker = var.lock().unwrap();
        let m = locker
            .trainable_variables
            .iter()
            .map(|v| Tensor::zeros(v.tensor.size(), (Kind::Float, Device::cuda_if_available())))
            .collect::<Vec<_>>();
        let v = m
            .iter()
            .map(|v| Tensor::zeros(v.size(), (Kind::Float, Device::cuda_if_available())))
            .collect::<Vec<_>>();
        Self {
            m,
            v,
            beta1: 0.9,
            beta2: 0.999,
            lr,
            variables_: var_store.variables_.clone(),
        }
    }
}

pub fn demo2_run() -> anyhow::Result<()> {
    let vs = nn::VarStore::new(Device::Cuda(0));
    let m = tch::vision::mnist::load_dir("D:\\AIGC\\minist\\data")?;

    let train_images = m.train_images.to_device(Device::Cuda(0));
    let train_labels = m.train_labels.to_device(Device::Cuda(0));
    let test_images = m.test_images.to_device(Device::Cuda(0));
    let test_labels = m.test_labels.to_device(Device::Cuda(0));
    let path = vs.root();
    let net = demo2_model(&path);
    let mut opt = CustomerOpt::build(&vs, 1e-3);
    let label = train_labels.one_hot(10);
    assert_ne!(train_images.size()[0], 0, "无训练数据batch");
    for epoch in 1..100 {
        let mut loss = net
            .forward(&train_images)
            .log_softmax(-1, Kind::Float)
            .mul(&label)
            .sum(Kind::Float)
            .negative()
            .div(train_images.size()[0]);
        vs.zero_grads();
        loss.backward();
        opt.step(epoch);
        let test_accuracy = net.forward(&test_images).accuracy_for_logits(&test_labels);

        println!(
            "epoch: {:4} train loss: {:8.10} test acc: {:5.2}%",
            epoch,
            &loss.double_value(&[]),
            &test_accuracy.double_value(&[]),
        );
    }
    // println!("over");
    Ok(())
}
