use candle_core::{Device, Tensor};
use candle_nn::{BatchNorm, Conv2d, Dropout, Linear, Module, Optimizer, VarBuilder, VarMap};
use parquet::record::Field;
use rand::seq::SliceRandom;

const DIVED:usize=64*64;

fn main() {
    let device=Device::new_cuda(0).unwrap();
    let m=Dataset::new("train", 60000, &device);
    let model=train(m, &device);
    let m=Dataset::new("test", 10000, &device);
    let data=m.data.narrow(0, 0, 10000).unwrap();
    let labels=m.labels.narrow(0, 0, 10000).unwrap().to_dtype(candle_core::DType::U32).unwrap();
    println!("{},{}",data,m.labels);
    let pre=model.forward(&data);
    let sum_ok=pre.argmax(candle_core::D::Minus1).unwrap()
    .eq(&labels).unwrap().to_dtype(candle_core::DType::F32).unwrap().sum_all().unwrap().to_scalar::<f32>().unwrap();
    println!("{}",sum_ok/10000.0);
}


struct Conv{
    conv1:Conv2d,
    conv2:Conv2d,
    fc1:Linear,
    fc2:Linear,
    n1:BatchNorm,
    n2:BatchNorm,
    dropout:Dropout,
}

impl Conv {
    fn new(in_channels: usize,out_channels: usize,kernel_size:usize,out_dim: usize,vb:VarBuilder)->Self{
        let cfg=candle_nn::Conv2dConfig{padding:1,stride:1,..Default::default()};
        let conv1=candle_nn::conv2d(in_channels, out_channels, kernel_size, cfg, vb.pp("c1")).unwrap();
        let conv2=candle_nn::conv2d(out_channels, out_channels, kernel_size, cfg, vb.pp("c2")).unwrap();
        let fc1=candle_nn::linear(out_channels*7*7, out_channels*7, vb.pp("fc1")).unwrap();
        let fc2=candle_nn::linear(out_channels*7, out_dim, vb.pp("fc2")).unwrap();
        let config=candle_nn::BatchNormConfig::default();//这个也会降低训练速度
        let n1=candle_nn::batch_norm(out_channels, config, vb.pp("n1")).unwrap();
        let n2=candle_nn::batch_norm(out_channels, config, vb.pp("n2")).unwrap();
        let dropout=Dropout::new(0.25);//这里会降低训练速度
        Self { conv1, conv2, fc1, fc2,n1,n2,dropout}
    }
    fn forward(&self,xs:&Tensor)->Tensor{
        let (batch,_in_dim)=xs.dims2().unwrap();
        let xs=xs.reshape((batch,1,28,28)).unwrap();
        let xs=self.conv1.forward(&xs).unwrap();
        let xs=self.n1.forward_train(&xs).unwrap().relu().unwrap();
        let xs=xs.max_pool2d(2).unwrap();
        let xs=self.conv2.forward(&xs).unwrap();
        let xs=self.n2.forward_train(&xs).unwrap().relu().unwrap();
        let xs=xs.max_pool2d(2).unwrap();
        let xs=xs.flatten_from(1).unwrap();
        let xs=self.fc1.forward(&xs).unwrap().relu().unwrap();
        let xs=self.dropout.forward(&xs, true).unwrap();
        let xs=self.fc2.forward(&xs).unwrap();
        xs
    }
}

fn train(m:Dataset,device:&Device)->Conv{
    let data=m.data.to_device(device).unwrap();
    let labels=m.labels.to_device(device).unwrap();
    let varmap=VarMap::new();
    let vb=VarBuilder::from_varmap(&varmap, candle_core::DType::F32, device);
    let model=Conv::new(1, 32, 3, 10, vb);
    let config=candle_nn::ParamsAdamW::default();
    let mut opt=candle_nn::AdamW::new(varmap.all_vars(), config).unwrap();
    let batch=data.clone().dims()[0]/DIVED;
    let mut batch=(0..batch).collect::<Vec<_>>();
    let mut rng=rand::rng();
    let mut lossse=Vec::new();
    let mut acc=Vec::new();
    for _ in 0..1000 {
        batch.shuffle(&mut rng);
        for ba in batch.iter(){
            let data=data.narrow(0,ba*DIVED,DIVED).unwrap();
            let labels=labels.narrow(0,ba*DIVED,DIVED).unwrap();
            let logits=model.forward(&data);
            let m_loss=candle_nn::ops::log_softmax(&logits, candle_core::D::Minus1).unwrap();
            let loss=candle_nn::loss::nll(&m_loss, &labels).unwrap();
            opt.backward_step(&loss).unwrap();
            lossse.push(loss.to_scalar::<f32>().unwrap());
            let sum_ok=logits.argmax(candle_core::D::Minus1).unwrap()
            .eq(&labels.to_dtype(candle_core::DType::U32).unwrap()).unwrap()
            .to_dtype(candle_core::DType::F32).unwrap().sum_all().unwrap().to_scalar::<f32>().unwrap();
            acc.push(sum_ok/logits.dims()[0] as f32);
        }
        let loss=lossse.iter().sum::<f32>()/lossse.len() as f32;
        let accc=acc.iter().sum::<f32>()/acc.len() as f32;
        println!("{},{}",accc,loss);
        if accc>0.95 {
            break;
        }
    }
    load_varmap.load("model").unwrap();
    let logits=model_loaded.forward(&dataset.data);
    let loss=candle_nn::ops::log_softmax(&logits, candle_core::D::Minus1).unwrap();
    let loss=candle_nn::loss::nll(&loss, &dataset.label).unwrap();
    println!("{:?}",loss)//保存和加载模型参数
    model
}

struct Dataset{
    data:Tensor,
    labels:Tensor,
}

impl Dataset {
    fn new(path:&str,capacity:usize,device:&Device)->Self {
        let file=std::fs::File::open(path).unwrap();
        let data=parquet::file::reader::SerializedFileReader::new(file).unwrap();
        let mut data_buffer=Vec::with_capacity(capacity);
        let mut labels_buffer=Vec::with_capacity(capacity);
        for xx in data {
            let row=xx.unwrap();
            for xx in row.into_columns() {
                if let Field::Group(row) = xx.1 {
                    for xx in row.into_columns() {
                        if let Field::Bytes(data) = xx.1 {
                            let image=image::load_from_memory(data.data()).unwrap();
                            let vv=image.to_luma8().to_vec().iter_mut().map(|xx|*xx as f32).collect::<Vec<_>>();
                            data_buffer.extend(vv);
                        }
                    }
                }else if let Field::Long(data) =xx.1{
                    labels_buffer.push(data);
                }
            }
        }
        let data=Tensor::from_vec(data_buffer, &[capacity,784], device).unwrap();
        let data=data/255.0;
        let data=data.unwrap();
        let labels=Tensor::from_vec(labels_buffer, &[capacity], device).unwrap();
        Self { data, labels }
    }
}

