use rand::random;

use tch::nn::{Module, OptimizerConfig, VarStore};
use tch::{Device, Kind, Tensor, nn};

fn gen_dataset(gen_counts: usize) -> (Tensor, Tensor) {
    let mut dataset = Vec::new();
    let mut lable = Vec::new();
    (0..gen_counts).into_iter().for_each(|_| {
        let x1: u8 = random();
        let x2: u8 = random();
        dataset.push(x1 as f32);
        dataset.push(x2 as f32);
        lable.push(x1 as f32 + x2 as f32);
    });
    (
        Tensor::from_slice(&dataset)
            .reshape(&[gen_counts as i64, 2])
            .to_device(Device::Cuda(0)),
        Tensor::from_slice(&lable)
            .reshape(&[gen_counts as i64, 1])
            .to_device(Device::Cuda(0)),
    )
}

fn my_module(p: nn::Path) -> impl Module+ use<> {
    let x1 = p.zeros("x1", &[2, 1]);

    nn::seq().add_fn(move |xs| {

        let tensor = xs.matmul(&x1);
        tensor
    })
}

pub fn gradient_descent() {
    let dim = 1;
    let vs = nn::VarStore::new(Device::Cuda(0));
    let path = vs.root();
    let my_module = my_module(path);
    let mut opt = nn::Sgd::default().build(&vs, 1e-8).unwrap();
    for epoch in 1..100000{
        // Dummy mini-batches made of zeros.
        let dataset = gen_dataset(dim as usize);
        let forward = my_module.forward(&dataset.0);
        let loss = (forward - dataset.1).pow_tensor_scalar(2).sum(Kind::Float);
        if loss.double_value(&[]).eq(&0.){ break }
        opt.backward_step(&loss);
        let test = gen_dataset(1);
        let forward = my_module.forward(&test.0);
        println!("epoch {epoch:5} loss {:5.5}", loss.double_value(&[]));
        println!("test forward {:5.0} label {:5.0}",forward.double_value(&[]),test.1.double_value(&[]));
    }
    save_to_cpu(&vs);
    vs.save("model_weights_gpu.safetensors").unwrap();
    println!("模型训练完成 已保存");
}

fn save_to_cpu(original_vs: &nn::VarStore) {
    // 创建CPU版本的VarStore
    let mut cpu_vs = nn::VarStore::new(Device::Cpu);

    // 复制GPU权重到CPU（核心操作）
    cpu_vs.copy(original_vs).unwrap();

    // 保存CPU版本权重
    cpu_vs.save("model_weights_cpu.safetensors").unwrap();
}

pub fn test_demo1() {

    // Create the model and load the pre-trained weights
    let mut vs = VarStore::new(Device::Cuda(0));
    let path = vs.root();
    let model = my_module(path);
    vs.load("./model_weights_gpu.safetensors").unwrap();
    let (test, test_label) = gen_dataset(1);

    test.print();

    test_label.print();

    test.to_device(Device::Cuda(0));
    println!("result");
    test.apply_t(&model,false).print();
}
