use ndarray::prelude::*;
use show_image::Color;

fn main() -> anyhow::Result<()> {
    let x = array![[1., 1.]];
    let y = array![[0.]];
    let weight1 = array![[-0.0053, -0.5820, -0.2723], [0.3793, -0.5204, 0.1896]];
    let bias1 = array![[-0.0140, 0.5607, -0.0628]];
    let weight2 = array![[0.1528, -0.1745, -0.1135]];
    let bias2 = array![[-0.5516]];
    println!("bias2: {}", bias2);
    println!("loss");
    println!(
        "{}",
        feed_forward(&x, &y, &weight1, &bias1, &weight2, &bias2)?
    );
    println!("{}\n{}\n{}\n{}", weight1, bias1, weight2, bias2);
    let mut datas = vec![];
    let loss = feed_forward(&x, &y, &weight1, &bias1, &weight2, &bias2)?;
    let mut updated_weights = update_weights(&x, &y, &weight1, &bias1, &weight2, &bias2, 0.01)?;
    datas.push((0., loss as f64));
    for v in 1..100 {
        let loss = feed_forward(
            &x,
            &y,
            &updated_weights.0,
            &updated_weights.1,
            &updated_weights.2,
            &updated_weights.3,
        )?;
        updated_weights = update_weights(
            &x,
            &y,
            &updated_weights.0,
            &updated_weights.1,
            &updated_weights.2,
            &updated_weights.3,
            0.01,
        )?;
        datas.push((v as f64, loss as f64));
    }

    show_image::DrawArea::new_draw_area("./updated_weights.png", (600, 400))?
        .generate_chart_context2d("update line", -5.0..100., 0.0..0.35)?
        .draw_2d_line(datas.iter().cloned(), show_image::RED.mix(1.0), "line")?;

    let mut datas = vec![];
    let loss = feed_forward(&x, &y, &weight1, &bias1, &weight2, &bias2)?;
    let mut updated_weights =
        chain_update_weights(&x, &y, &weight1, &bias1, &weight2, &bias2, 0.01)?;
    datas.push((0., loss as f64));
    for v in 1..100 {
        let loss = feed_forward(
            &x,
            &y,
            &updated_weights.0,
            &updated_weights.1,
            &updated_weights.2,
            &updated_weights.3,
        )?;
        updated_weights = chain_update_weights(
            &x,
            &y,
            &updated_weights.0,
            &updated_weights.1,
            &updated_weights.2,
            &updated_weights.3,
            0.01,
        )?;
        datas.push((v as f64, loss as f64));
    }

    show_image::DrawArea::new_draw_area("./chain_weights.png", (600, 400))?
        .generate_chart_context2d("chain line", -5.0..100., 0.0..0.35)?
        .draw_2d_line(datas.iter().cloned(), show_image::RED.mix(1.0), "line")?;

    Ok(())
}

// 前向代码
// input: 是多个数据的batch,每一行都是一个样本
// weight1: 输入层对隐藏层的权重，多行向量
// bias1: 输入层对隐藏层三个节点的偏置, 行向量
// weight2: 隐藏层对输出层的权重,行向量
// bias2: 隐藏层输出层一个节点的偏置， 行向量
fn forward(
    input: &Array2<f32>,
    weight1: &Array2<f32>,
    bias1: &Array2<f32>,
    weight2: &Array2<f32>,
    bias2: &Array2<f32>,
) -> anyhow::Result<Array2<f32>> {
    // 预处理，让输入的矩阵的最后一列都是1
    let (row, _) = input.dim();
    let one = Array::from_elem([row, 1], 1.);
    let input = ndarray::concatenate(Axis(1), &[input.view(), one.view()])?;

    // 预处理权重和偏置，让最后一行是偏置，可以方便的矩阵运算
    let weights_bias1 = ndarray::concatenate(Axis(0), &[weight1.view(), bias1.view()])?;

    // 生成隐藏的好多行的[h11 h12 h13]， 每个样本一行
    let pre_hidden = input.dot(&weights_bias1);

    // 隐藏行激活函数处理
    let hidden_out = 1. / (1. + (-pre_hidden).exp());

    // 在计算输出层之前，对输入的预处理，增加1行1
    let (row, _) = hidden_out.dim();
    let last_col = Array::from_elem([row, 1], 1.);
    let hidden_out = ndarray::concatenate(Axis(1), &[hidden_out.view(), last_col.view()])?;

    // 预处理
    let weight2 = weight2.t();
    let weights_bias2 = ndarray::concatenate(Axis(0), &[weight2.view(), bias2.view()])?;

    // 预测的输出
    let pre_out = hidden_out.dot(&weights_bias2);
    Ok(pre_out)
}

fn feed_forward(
    input: &Array2<f32>,
    output: &Array2<f32>,
    weight1: &Array2<f32>,
    bias1: &Array2<f32>,
    weight2: &Array2<f32>,
    bias2: &Array2<f32>,
) -> anyhow::Result<f32> {
    let pre_out = forward(input, weight1, bias1, weight2, bias2)?;
    (pre_out - output)
        .pow2()
        .mean()
        .ok_or(anyhow::anyhow!("mean error"))
}

// 1. 少量改变神经网络中的每个权重—每次一个。
// 2. 当权重值改变（δW）时，度量损失的变化（δL）。
// 3. 将权重更新为 -k*δL/δW,其中 k 是某个正值，是一个称为学习率的超参数）。
// 这是一种简单的更新权重的方法.
#[allow(clippy::type_complexity)]
fn update_weights(
    input: &Array2<f32>,
    output: &Array2<f32>,
    weight1: &Array2<f32>,
    bias1: &Array2<f32>,
    weight2: &Array2<f32>,
    bias2: &Array2<f32>,
    lr: f32,
) -> anyhow::Result<(Array2<f32>, Array2<f32>, Array2<f32>, Array2<f32>)> {
    let mut weight1_copy = weight1.clone();
    let original_loss = feed_forward(input, output, weight1, bias1, weight2, bias2)?;
    // 对weight1中的每个参数进行一个很小的变化
    for (r, w) in weight1.axis_iter(Axis(0)).enumerate() {
        for (i, _) in w.iter().enumerate() {
            let mut tmp = weight1.clone();
            *tmp.get_mut([r, i]).unwrap() += 0.0001;
            let loss = feed_forward(&input, &output, &tmp, &bias1, &weight2, &bias2)?;
            let grad = (loss - original_loss) / 0.0001;
            *weight1_copy.get_mut([r, i]).unwrap() -= grad * lr;
        }
    }
    // 对bias1中的每个参数进行一个很小的变化
    let mut bias1_copy = bias1.clone();
    for (r, w) in bias1.axis_iter(Axis(0)).enumerate() {
        for (i, _w) in w.iter().enumerate() {
            let mut tmp = bias1.clone();
            *tmp.get_mut([r, i]).unwrap() += 0.0001;
            let loss = feed_forward(&input, &output, &weight1, &tmp, &weight2, &bias2)?;
            let grad = (loss - original_loss) / 0.0001;
            *bias1_copy.get_mut([r, i]).unwrap() -= grad * lr;
        }
    }

    // 对weight2中的每个参数进行一个很小的变化
    let mut weight2_copy = weight2.clone();
    for (r, w) in weight2.axis_iter(Axis(0)).enumerate() {
        for (i, _w) in w.iter().enumerate() {
            let mut tmp = weight2.clone();
            *tmp.get_mut([r, i]).unwrap() += 0.0001;
            let loss = feed_forward(&input, &output, &weight1, &bias1, &tmp, &bias2)?;
            let grad = (loss - original_loss) / 0.0001;
            *weight2_copy.get_mut([r, i]).unwrap() -= grad * lr;
        }
    }

    // 对bias2中的每个参数进行一个很小的变化
    let mut bias2_copy = bias2.clone();
    for (r, w) in bias2.axis_iter(Axis(0)).enumerate() {
        for (i, _w) in w.iter().enumerate() {
            let mut tmp = bias2.clone();
            *tmp.get_mut([r, i]).unwrap() += 0.0001;
            let loss = feed_forward(&input, &output, &weight1, &bias1, &weight2, &tmp)?;
            let grad = (loss - original_loss) / 0.0001;
            *bias2_copy.get_mut([r, i]).unwrap() -= grad * lr;
        }
    }
    Ok((weight1_copy, bias1_copy, weight2_copy, bias2_copy))
}

// 用链式求导法则进行梯度下降

fn chain_update_weights(
    input: &Array2<f32>,
    output: &Array2<f32>,
    weight1: &Array2<f32>,
    bias1: &Array2<f32>,
    weight2: &Array2<f32>,
    bias2: &Array2<f32>,
    lr: f32,
) -> anyhow::Result<(Array2<f32>, Array2<f32>, Array2<f32>, Array2<f32>)> {
    // 预处理，让输入的矩阵的最后一列都是1
    let (row, _) = input.dim();
    let one = Array::from_elem([row, 1], 1.);
    let input = ndarray::concatenate(Axis(1), &[input.view(), one.view()])?;

    // 预处理权重和偏置，让最后一行是偏置，可以方便的矩阵运算
    let weights_bias1 = ndarray::concatenate(Axis(0), &[weight1.view(), bias1.view()])?;

    // 生成隐藏的好多行的[h11 h12 h13]， 每个样本一行
    let pre_hidden = input.dot(&weights_bias1);

    // 隐藏行激活函数处理生成隐藏的好多行的[a11 a12 a13]， 每个样本一行
    let hidden_out_no_bias = 1. / (1. + (-pre_hidden).exp());

    // 在计算输出层之前，对输入的预处理，增加1行1
    let (row, _) = hidden_out_no_bias.dim();
    let last_col = Array::from_elem([row, 1], 1.);
    let hidden_out = ndarray::concatenate(Axis(1), &[hidden_out_no_bias.view(), last_col.view()])?;

    // 预处理
    let weight2_t = weight2.t();
    let weights_bias2 = ndarray::concatenate(Axis(0), &[weight2_t.view(), bias2.view()])?;

    // 预测的输出
    let pre_out = hidden_out.dot(&weights_bias2);

    // 下面充分利用矩阵的运算，注意灵活运用
    let sample_count = input.nrows() as f32;

    // w31 w32 w33的偏导数 [[w31, w32, w33]]
    let weight2_diff = (-2. * (output - &pre_out)).dot(&hidden_out_no_bias) / sample_count;
    // 隐藏层到输出层的偏置bias2 [[bias2]]
    let bias2_diff = (-2. * (output - &pre_out))
        .mean_axis(Axis(1))
        .unwrap()
        .to_shape([1, 1])?
        .to_owned();

    // w11 w12 w13 w21 w22 w23的偏导数
    let bias1_diff = (-2. * (output - &pre_out).t().dot(weight2))
        * (&hidden_out_no_bias * (1. - &hidden_out_no_bias))
            .mean_axis(Axis(0))
            .unwrap()
        / sample_count;
    let input_mean = input.mean_axis(Axis(0)).unwrap();
    let row_one = &bias1_diff * input_mean[0];
    let row_two = &bias1_diff * input_mean[1];
    let weight1_diff = ndarray::concatenate(Axis(0), &[row_one.view(), row_two.view()])?;

    let weight1 = weight1 - lr * weight1_diff;
    let bias1 = bias1 - lr * bias1_diff;
    let weight2 = weight2 - lr * weight2_diff;
    let bias2 = bias2 - lr * bias2_diff;
    Ok((weight1, bias1, weight2, bias2))
}
