//! 定义sigmoid函数
//! 定义损失函数  -log(p)  if y = 1,    -log(1-p) if y = 0, p为sigmoid函数返回值(0,1)
//! 把上面的分类合并变成cost = -ylog(p) - (1-y)log(1-p)
//! 在样本中损失函数变成: J(ε) = -1／m∑yi(log(pi)) + (1-yi)log(1-pi)
//! p = sigmoid(Xε) = 1 / 1 + e^(-Xε)
//! 没有公式解，只能用梯度下降法求解
//! ε的梯度式子: 1/m∑(sigmoid(X^iε - y^i)X[上标i，下标j]
//! 简化后就是： 1/m∑(y^ - y^i)X[上标i，下标j]
//! 向量化: 1/m . X^T . (sigmoid(Xε) - y)
//!
use csv::ReaderBuilder;
use linfa::prelude::*;
use machine_study::AddFront;
use ndarray::{prelude::*, Slice};
use ndarray_csv::Array2Reader;

fn main() {
    //test_logistic();
    println!("hello world!");
}

fn test_logistic() {
    let (x_train, y_train) = load_data2("./examples/logistic_regression/origin_data.csv");
    let (x_test, y_test) = load_data2("./examples/logistic_regression/test_data.csv");
    let initial_theta = Array2::zeros((x_test.len_of(Axis(1)) + 1, 1));
    let mut logistic = LogisticRegression::new(initial_theta.view(), 10000, 0.01, 1e-4);
    logistic.fit(
        x_train.view(),
        y_train.to_shared().reshape((y_train.len(), 1)).view(),
    );
    println!("{:?}", logistic);
    // let y_pre = logistic.predict(x_train.view(), logistic.radio);
    // println!("{}", y_pre);

    let score = logistic.score(x_test.view(), y_test.view(), logistic.radio);
    println!("score: {:?}", score.accuracy());
}

// 定义损失函数  -log(p)  if y = 1,    -log(1-p) if y = 0, p为sigmoid函数返回值(0,1)
// 把上面的分类合并变成cost = -ylog(p) - (1-y)log(1-p)
#[derive(Debug)]
struct LogisticRegression {
    coef: Array2<f64>,
    intercept: f64,
    theta: Array2<f64>,
    iters: usize,
    eta: f64,
    epsilon: f64,
    radio: f64,
}

impl LogisticRegression {
    pub fn new(initial_theta: ArrayView2<f64>, iters: usize, eta: f64, epsilon: f64) -> Self {
        Self {
            coef: initial_theta
                .slice_axis(Axis(0), Slice::new(1, None, 1))
                .to_owned(),
            intercept: initial_theta[(0, 0)],
            theta: initial_theta.to_owned(),
            radio: 0.1,
            iters,
            eta,
            epsilon,
        }
    }

    pub fn fit(&mut self, X: ArrayView2<f64>, Y: ArrayView2<usize>) {
        // 给X加上一列1
        let rows = X.len_of(Axis(0));
        let new_x = X.add_front(Axis(1), Array1::ones(rows).view());
        self.theta = Self::gradient_descent(
            new_x.view(),
            Y.map(|x| *x as f64).view(),
            self.theta.view(),
            self.eta,
            self.iters,
            self.epsilon,
        );
        self.intercept = self.theta[(0, 0)];
        self.coef = self
            .theta
            .slice_axis(Axis(0), Slice::from(1i32..))
            .to_owned();
        // predict
        let mut ratio = self.radio;
        let mut score = 0.0;
        let y = Y.to_shared().reshape(Y.len_of(Axis(0)));
        for _ in 0..self.iters {
            let y_predict = self.predict(X, ratio);
            let matrix = y_predict.confusion_matrix(&y).unwrap();
            let s = matrix.accuracy();

            if s > score {
                score = s;
                self.radio = ratio;
            }
            ratio += 0.9 / self.iters as f64;
            if ratio >= 1.0 {
                break;
            }
        }
    }
    pub fn predict(&self, x_predict: ArrayView2<f64>, ratio: f64) -> Array1<usize> {
        let p = self.predict_proba(x_predict);
        p.map(|x| if *x >= ratio { 1 } else { 0 })
            .to_shared()
            .reshape(x_predict.len_of(Axis(0)))
            .to_owned()
    }

    pub fn score(
        &self,
        x_test: ArrayView2<f64>,
        y_test: ArrayView1<usize>,
        ratio: f64,
    ) -> ConfusionMatrix<usize> {
        let y_predict = self.predict(x_test, ratio);
        y_predict.confusion_matrix(y_test).unwrap()
    }

    pub fn predict_proba(&self, x_predict: ArrayView2<f64>) -> Array2<f64> {
        let x = x_predict.add_front(Axis(1), Array1::ones(x_predict.len_of(Axis(0))).view());
        let p = x.dot(&self.theta);
        Self::sigmoid(p.view())
    }
    // 损失函数J
    // J(ε) = -1／m∑yi(log(pi)) + (1-yi)log(1-pi)
    fn loss_funtion(theta: ArrayView2<f64>, X: ArrayView2<f64>, y: ArrayView2<f64>) -> f64 {
        let y_hat = X.dot(&theta);
        let y_hat = Self::sigmoid(y_hat.view());
        let first = y.t().dot(&y.map(|x| x.ln()));
        let second = (1.0 - &y).t().dot(&(1.0 - &y_hat).map(|x| x.ln()));
        -(first + second)[(0, 0)] / (y.len() as f64)
    }

    // 梯度值
    // 向量化: 1/m . X^T . (sigmoid(Xε) - y)
    fn gradient(theta: ArrayView2<f64>, X: ArrayView2<f64>, y: ArrayView2<f64>) -> Array2<f64> {
        X.t().dot(&(Self::sigmoid(X.dot(&theta).view()) - &y)) / y.len_of(Axis(0)) as f64
    }

    // 梯度下降法
    fn gradient_descent(
        X: ArrayView2<f64>,
        y: ArrayView2<f64>,
        initial_theta: ArrayView2<f64>,
        eta: f64,
        iters: usize,
        epsilon: f64,
    ) -> Array2<f64> {
        let mut theta = initial_theta.to_owned();
        let mut cur_iter = 0;
        while cur_iter < iters {
            // 求梯度
            let gradient = Self::gradient(theta.view(), X, y);
            let last_theta = theta.clone();
            theta = theta - eta * gradient;
            if (Self::loss_funtion(theta.view(), X, y)
                - Self::loss_funtion(last_theta.view(), X, y))
            .abs()
                < epsilon
            {
                break;
            }
            cur_iter += 1;
        }
        theta
    }

    // sigmoid函数
    fn sigmoid(x: ArrayView2<f64>) -> Array2<f64> {
        x.map(|v| 1.0 / (1.0 + (-*v).exp()))
    }
}
fn sigmoid(x: f64) -> f64 {
    1.0 / (1.0 + (-x).exp())
}

fn load_data(path: &str) -> Dataset<f64, usize, Ix1> {
    let mut reader = ReaderBuilder::new()
        .has_headers(false)
        .delimiter(b',')
        .from_path(path)
        .expect("can create reader");
    // 创建了二维数组
    let array: Array2<f64> = reader
        .deserialize_array2_dynamic()
        .expect("can deserializer array");

    let (data, targets) = (
        array.slice(s![.., 0..2]).to_owned(),
        array.column(2).to_owned(),
    );

    let feature_names = vec!["test 1", "test 2"];

    let dataset = Dataset::new(data, targets)
        .map_targets(|x| if *x as usize >= 1 { 1 } else { 0 })
        .with_feature_names(feature_names);
    dataset
}

fn load_data2(path: &str) -> (Array2<f64>, Array1<usize>) {
    let mut reader = ReaderBuilder::new()
        .has_headers(false)
        .delimiter(b',')
        .from_path(path)
        .expect("can create reader");
    // 创建了二维数组
    let array: Array2<f64> = reader
        .deserialize_array2_dynamic()
        .expect("can deserializer array");

    let (data, targets) = (
        array.slice(s![.., 0..2]).to_owned(),
        array.column(2).to_owned(),
    );
    (data, targets.map(|x| if *x as usize >= 1 { 1 } else { 0 }))
}
