use rand::Rng;
use serde::{Deserialize, Serialize};
use std::fmt;

/// 激活函数类型
#[derive(Debug, Clone, Serialize, Deserialize)]
pub enum Activation {
    Sigmoid,
    ReLU,
    Tanh,
}

impl Activation {
    /// 前向计算
    pub fn forward(&self, x: f64) -> f64 {
        match self {
            Activation::Sigmoid => 1.0 / (1.0 + (-x).exp()),
            Activation::ReLU => x.max(0.0),
            Activation::Tanh => x.tanh(),
        }
    }

    /// 导数计算
    pub fn derivative(&self, x: f64) -> f64 {
        match self {
            Activation::Sigmoid => {
                let s = self.forward(x);
                s * (1.0 - s)
            }
            Activation::ReLU => if x > 0.0 { 1.0 } else { 0.0 },
            Activation::Tanh => {
                let t = x.tanh();
                1.0 - t * t
            }
        }
    }
}

/// 神经网络层
#[derive(Clone, Serialize, Deserialize)]
pub struct Layer {
    pub weights: Vec<Vec<f64>>,
    pub biases: Vec<f64>,
    pub activation: Activation,
    // 用于反向传播的缓存（不序列化，训练时临时数据）
    #[serde(skip)]
    inputs: Vec<f64>,
    #[serde(skip)]
    weighted_sums: Vec<f64>,
    #[serde(skip)]
    outputs: Vec<f64>,
}

impl Layer {
    /// 创建新的层
    pub fn new(input_size: usize, output_size: usize, activation: Activation) -> Self {
        let mut rng = rand::thread_rng();
        
        // He初始化
        let scale = (2.0 / input_size as f64).sqrt();
        let weights = (0..output_size)
            .map(|_| {
                (0..input_size)
                    .map(|_| rng.gen_range(-scale..scale))
                    .collect()
            })
            .collect();

        let biases = vec![0.0; output_size];

        Layer {
            weights,
            biases,
            activation,
            inputs: Vec::new(),
            weighted_sums: Vec::new(),
            outputs: Vec::new(),
        }
    }

    /// 前向传播
    pub fn forward(&mut self, inputs: &[f64]) -> Vec<f64> {
        self.inputs = inputs.to_vec();
        self.weighted_sums = Vec::new();
        self.outputs = Vec::new();

        for i in 0..self.weights.len() {
            let mut sum = self.biases[i];
            for j in 0..inputs.len() {
                sum += self.weights[i][j] * inputs[j];
            }
            self.weighted_sums.push(sum);
            self.outputs.push(self.activation.forward(sum));
        }

        self.outputs.clone()
    }

    /// 反向传播
    pub fn backward(&mut self, output_gradients: &[f64], learning_rate: f64) -> Vec<f64> {
        let mut input_gradients = vec![0.0; self.inputs.len()];

        // 计算权重和偏置的梯度
        for i in 0..self.weights.len() {
            let delta = output_gradients[i] * self.activation.derivative(self.weighted_sums[i]);

            // 更新偏置
            self.biases[i] -= learning_rate * delta;

            // 更新权重并计算输入梯度
            for j in 0..self.weights[i].len() {
                input_gradients[j] += delta * self.weights[i][j];
                self.weights[i][j] -= learning_rate * delta * self.inputs[j];
            }
        }

        input_gradients
    }
}

impl fmt::Debug for Layer {
    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
        f.debug_struct("Layer")
            .field("input_size", &self.weights[0].len())
            .field("output_size", &self.weights.len())
            .field("activation", &self.activation)
            .finish()
    }
}

/// 神经网络
#[derive(Clone, Serialize, Deserialize)]
pub struct NeuralNetwork {
    layers: Vec<Layer>,
}

impl NeuralNetwork {
    /// 创建新的神经网络
    pub fn new() -> Self {
        NeuralNetwork {
            layers: Vec::new(),
        }
    }

    /// 添加层
    pub fn add_layer(&mut self, layer: Layer) {
        self.layers.push(layer);
    }

    /// 前向传播
    pub fn forward(&mut self, inputs: &[f64]) -> Vec<f64> {
        let mut current_input = inputs.to_vec();
        
        for layer in &mut self.layers {
            current_input = layer.forward(&current_input);
        }

        current_input
    }

    /// 训练(反向传播)
    pub fn train(&mut self, inputs: &[f64], targets: &[f64], learning_rate: f64) -> f64 {
        // 前向传播
        let outputs = self.forward(inputs);

        // 计算损失(均方误差)
        let mut loss = 0.0;
        let mut output_gradients = Vec::new();
        for i in 0..outputs.len() {
            let error = outputs[i] - targets[i];
            loss += error * error;
            output_gradients.push(error);
        }
        loss /= outputs.len() as f64;

        // 反向传播
        let mut gradients = output_gradients;
        for layer in self.layers.iter_mut().rev() {
            gradients = layer.backward(&gradients, learning_rate);
        }

        loss
    }

    /// 批量训练
    pub fn train_batch(
        &mut self,
        dataset: &[(Vec<f64>, Vec<f64>)],
        learning_rate: f64,
        epochs: usize,
    ) -> Vec<f64> {
        let mut losses = Vec::new();

        for epoch in 0..epochs {
            let mut total_loss = 0.0;

            for (inputs, targets) in dataset {
                total_loss += self.train(inputs, targets, learning_rate);
            }

            let avg_loss = total_loss / dataset.len() as f64;
            losses.push(avg_loss);

            if (epoch + 1) % 100 == 0 || epoch == 0 {
                println!("Epoch {}: Loss = {:.6}", epoch + 1, avg_loss);
            }
        }

        losses
    }

    /// 预测
    pub fn predict(&mut self, inputs: &[f64]) -> Vec<f64> {
        self.forward(inputs)
    }

    /// 获取层数
    pub fn layer_count(&self) -> usize {
        self.layers.len()
    }
}

impl Default for NeuralNetwork {
    fn default() -> Self {
        Self::new()
    }
}

impl fmt::Debug for NeuralNetwork {
    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
        f.debug_struct("NeuralNetwork")
            .field("layers", &self.layers)
            .finish()
    }
}

#[cfg(test)]
mod tests {
    use super::*;

    #[test]
    fn test_activation_functions() {
        let sigmoid = Activation::Sigmoid;
        assert!((sigmoid.forward(0.0) - 0.5).abs() < 1e-6);

        let relu = Activation::ReLU;
        assert_eq!(relu.forward(-1.0), 0.0);
        assert_eq!(relu.forward(1.0), 1.0);
    }

    #[test]
    fn test_neural_network() {
        let mut nn = NeuralNetwork::new();
        nn.add_layer(Layer::new(2, 3, Activation::ReLU));
        nn.add_layer(Layer::new(3, 1, Activation::Sigmoid));

        let inputs = vec![0.5, 0.8];
        let outputs = nn.predict(&inputs);
        assert_eq!(outputs.len(), 1);
    }
}

