//! iter 为迭代次数，
//!   nr_class: 4    %分类的个数    
//!      totalSV: 39    %总的支持向量个数
//!           rho: [6x1 double]   %b=-model.rho
//!         Label: [4x1 double]
//!         ProbA: []
//!         ProbB: []
//!           nSV: [4x1 double]  %每一类的支持向量的个数
//!       sv_coef: [39x3 double] %支持向量的系数
//!           SVs: [39x12 double] %具体的支持向量，以稀疏矩阵的形式存储  
//!    w*x+b=0 其中
//!    w=model.SVs'*model.sv_coef
//!        b=-model.rho
//!       w是高维空间中分类 超平面的法向量，b是常数项。

use csv::ReaderBuilder;
use linfa::prelude::*;
use linfa_preprocessing::linear_scaling::*;
use linfa_svm::Svm;
use ndarray::prelude::*;
use plotters::prelude::*;
fn main() {
    //svm_test1();
    gaussian_kernel_test().unwrap();
}

// 线性分隔
fn svm_test1() {
    let dataset = linfa_datasets::iris();
    let (train, _test) = dataset.split_with_ratio(0.8);
    // 提取两个特征
    let train = data_filter(train, 2);
    println!("{}", train.records());
    println!("{}", train.targets());

    // 数据归一化
    let train = LinearScalerParams::new(ScalingMethod::Standard(true, true))
        .fit(&train)
        .unwrap()
        .transform(train);
    let max = train
        .records()
        .iter()
        .fold(0.0, |acc, x| if *x > acc { *x } else { acc });
    let min = train
        .records()
        .iter()
        .fold(0.0, |acc, x| if *x > acc { acc } else { *x });

    // 显示数据
    let root = SVGBackend::new("./examples/svm/data.svg", (600, 400)).into_drawing_area();
    root.fill(&WHITE).unwrap();

    let mut chat = ChartBuilder::on(&root)
        .set_all_label_area_size(40)
        .build_cartesian_2d(min..max, min..max)
        .unwrap();
    chat.draw_series(
        train
            .records()
            .column(0)
            .iter()
            .zip(train.records().column(1).iter())
            .zip(train.targets().iter())
            .map(|((x, y), c)| {
                Circle::new(
                    (*x, *y),
                    3,
                    if *c == 0.0 {
                        RED.filled()
                    } else {
                        BLUE.filled()
                    },
                )
            }),
    )
    .unwrap();
    chat.configure_mesh().draw().unwrap();

    let model = linfa_svm::Svm::params()
        .linear_kernel()
        .c_eps(1000.0, 0.0001)
        .fit(&train)
        .unwrap();
    println!("{:?}", model);
    println!("{}", model.nsupport());
    println!("{}", model.alpha.len());
    let ret = model.predict(train.records());
    println!("{}", ret);
    println!(
        "{}",
        ret.fold(0, |acc, x| {
            if *x >= 0.0 {
                acc + 1
            } else {
                acc
            }
        })
    );
    println!(
        "{}",
        train.targets().fold(0, |acc, x| {
            if *x > 0.0 {
                acc + 1
            } else {
                acc
            }
        })
    );
    println!("{}", train.targets().len());
    let con = train
        .targets()
        .map(|x| *x as usize)
        .confusion_matrix(ret.map(|x| if *x >= 0.0 { 1usize } else { 0usize }))
        .unwrap();
    println!("{:?}", con.f1_score());
    // w1 *x + w2 * y + rho = 0
    // y = (-rho - w1*x ) / w2
    let w1 = 0.3094508452708098;
    let w2 = -0.31578051747857216;
    let rho = model.rho;
    // let w1 = 0.2485606820501988;
    // let w2 = -0.19591910773165963;
    // let rho = 0.446172639039582;

    let x = (min..max).step(0.5);
    chat.draw_series(LineSeries::new(
        x.values().map(|x| (x, (-w1 * x) / w2)),
        BLACK,
    ))
    .unwrap();
    chat.draw_series(LineSeries::new(
        x.values().map(|x| (x, (-rho - w1 * x) / w2)),
        BLACK,
    ))
    .unwrap();
    chat.draw_series(LineSeries::new(
        x.values().map(|x| (x, (rho - w1 * x) / w2)),
        BLACK,
    ))
    .unwrap();
}
// 加载数据
fn load(file: &str) -> (Array2<f64>, Array1<f64>) {
    let mut reader = ReaderBuilder::new()
        .has_headers(true)
        .delimiter(b',')
        .trim(csv::Trim::Headers)
        .from_path(format!("./examples/{}", file))
        .unwrap();
    let ret = reader.deserialize();
    let mut x_train = Array2::zeros((0, 2));
    let mut y_train = vec![];
    for r in ret {
        let row: Record = r.unwrap();
        let y_value = if &row.5 == "setosa" {
            1f64
            //"setosa"
        } else if &row.5 != "versicolor" {
            0f64
            //"versicolor"
        } else {
            continue;
        };

        let row_x = array![row.1, row.2];
        x_train.push_row(row_x.view()).unwrap();
        y_train.push(y_value);
    }
    (x_train, Array1::from_vec(y_train))
}

use serde::Deserialize;

#[derive(Deserialize, Debug, Clone)]
struct Record(String, f64, f64, f64, f64, String);

fn data_filter(
    data: DatasetBase<Array2<f64>, Array1<usize>>,
    n: usize,
) -> DatasetBase<Array2<f64>, Array1<f64>> {
    let data = data
        .sample_iter()
        .filter(|(_, y)| *y.first().unwrap() < n)
        .map(|(x, y)| {
            (
                x.slice(s![0..n]).to_owned(),
                y.map(|x| *x as f64).to_owned(),
            )
        })
        .collect::<Vec<_>>();
    let mut x = Array::zeros((0, n));
    let mut y = Array::zeros(0);
    for (x_, y_) in data {
        x.push_row(x_.view()).unwrap();
        y.push(Axis(0), y_.view()).unwrap();
    }
    Dataset::new(x, y)
}

fn gaussian_kernel_test() -> linfa_svm::Result<()> {
    // everything above 6.5 is considered a good wine
    let (train, valid) = linfa_datasets::winequality()
        .map_targets(|x| *x > 6)
        .split_with_ratio(0.9);

    println!(
        "Fit SVM classifier with #{} training points",
        train.nsamples()
    );

    // fit a SVM with C value 7 and 0.6 for positive and negative classes
    let model = Svm::<_, bool>::params()
        .pos_neg_weights(50000., 5000.)
        .gaussian_kernel(50.0)
        .fit(&train)?;

    println!("{}", model);
    // A positive prediction indicates a good wine, a negative, a bad one
    fn tag_classes(x: &bool) -> String {
        if *x {
            "good".into()
        } else {
            "bad".into()
        }
    }

    // map targets for validation dataset
    let valid = valid.map_targets(tag_classes);

    // predict and map targets
    let pred = model.predict(&valid).map(tag_classes);

    // create a confusion matrix
    let cm = pred.confusion_matrix(&valid)?;

    // Print the confusion matrix, this will print a table with four entries. On the diagonal are
    // the number of true-positive and true-negative predictions, off the diagonal are
    // false-positive and false-negative
    println!("{:?}", cm);

    // Calculate the accuracy and Matthew Correlation Coefficient (cross-correlation between
    // predicted and targets)
    println!("accuracy {}, MCC {}", cm.accuracy(), cm.mcc());

    Ok(())
}
