#include <iostream>
#include <vector>
#include <cmath>
#include <ctime>
#include <cstdlib>

// 激活函数，这里使用Sigmoid函数
double sigmoid(double x) {
    return 1.0 / (1.0 + std::exp(-x));
}

// 神经网络类
class NeuralNetwork {
private:
    // 输入层神经元数量
    int input_size;
    // 隐藏层神经元数量
    int hidden_size;
    // 输出层神经元数量
    int output_size;

    // 输入层到隐藏层的权重矩阵
    std::vector<std::vector<double>> weights_ih;
    // 隐藏层到输出层的权重矩阵
    std::vector<std::vector<double>> weights_ho;

    // 隐藏层的偏置向量
    std::vector<double> bias_h;
    // 输出层的偏置向量
    std::vector<double> bias_o;

public:
    // 构造函数，初始化神经网络结构和权重、偏置
    NeuralNetwork(int input_n, int hidden_n, int output_n) :
        input_size(input_n), hidden_size(hidden_n), output_size(output_n) {

        // 随机初始化输入层到隐藏层的权重
        weights_ih.resize(hidden_size, std::vector<double>(input_size));
        for (int i = 0; i < hidden_size; ++i) {
            for (int j = 0; j < input_size; ++j) {
                weights_ih[i][j] = ((double)rand() / RAND_MAX) * 2 - 1;
            }
        }

        // 随机初始化隐藏层到输出层的权重
        weights_ho.resize(output_size, std::vector<double>(hidden_size));
        for (int i = 0; i < output_size; ++i) {
            for (int j = 0; j < hidden_size; ++j) {
                weights_ho[i][j] = ((double)rand() / RAND_MAX) * 2 - 1;
            }
        }

        // 初始化隐藏层偏置
        bias_h.resize(hidden_size);
        for (int i = 0; i < hidden_size; ++i) {
            bias_h[i] = ((double)rand() / RAND_MAX) * 2 - 1;
        }

        // 初始化输出层偏置
        bias_o.resize(output_size);
        for (int i = 0; i < output_size; ++i) {
            bias_o[i] = ((double)rand() / RAND_MAX) * 2 - 1;
        }
    }

    // 前向传播函数
    std::vector<double> forward_propagation(const std::vector<double>& input) {
        // 计算隐藏层的输入
        std::vector<double> hidden_input(hidden_size);
        for (int i = 0; i < hidden_size; ++i) {
            double sum = 0;
            for (int j = 0; j < input_size; ++j) {
                sum += weights_ih[i][j] * input[j];
            }
            hidden_input[i] = sum + bias_h[i];
        }
       

        // 计算隐藏层的输出
        std::vector<double> hidden_output(hidden_size);
        for (int i = 0; i < hidden_size; ++i) {
            hidden_output[i] = sigmoid(hidden_input[i]);
        }

        // 计算输出层的输入
        std::vector<double> output_input(output_size);
        for (int i = 0; i < output_size; ++i) {
            double sum = 0;
            for (int j = 0; j < hidden_size; ++j) {
                sum += weights_ho[i][j] * hidden_output[j];
            }
            output_input[i] = sum + bias_o[i];
        }

        // 计算输出层的输出
        std::vector<double> output(output_size);
        for (int i = 0; i < output_size; ++i) {
            output[i] = sigmoid(output_input[i]);
        }

        return output;
    }

    // 反向传播函数
    void back_propagation(const std::vector<double>& input, const std::vector<double>& target) {
        // 先进行前向传播获取相关中间结果，包括hidden_output
        std::vector<double> output = forward_propagation(input);

        // 提取隐藏层的输出结果，后续计算会用到
        std::vector<double> hidden_output(hidden_size);
        for (int i = 0; i < hidden_size; ++i) {
            hidden_output[i] = output[i];
        }

        // 计算输出层的误差
        std::vector<double> output_errors(output_size);
        for (int i = 0; i < output_size; ++i) {
            output_errors[i] = target[i] - output[i];
        }

        // 以下是原有的计算更新量及更新权重、偏置等操作的代码，保持不变
        // 计算隐藏层到输出层权重的更新量
        std::vector<std::vector<double>> d_weights_ho(output_size, std::vector<double>(hidden_size));
        for (int i = 0; i < output_size; ++i) {
            for (int j = 0; j < hidden_size; ++j) {
                d_weights_ho[i][j] = output_errors[i] * output[i] * (1 - output[i]) * hidden_output[j];
            }
        }

        // 计算输出层偏置的更新量
        std::vector<double> d_bias_o(output_size);
        for (int i = 0; i < output_size; ++i) {
            d_bias_o[i] = output_errors[i] * output[i] * (1 - output[i]);
        }

        // 计算隐藏层的误差
        std::vector<double> hidden_errors(hidden_size);
        for (int i = 0; i < hidden_size; ++i) {
            double sum = 0;
            for (int j = 0; j < output_size; ++j) {
                sum += output_errors[j] * weights_ho[j][i];
            }
            hidden_errors[i] = sum * hidden_output[i] * (1 - hidden_output[i]);
        }

        // 计算输入层到隐藏层权重的
        std::vector<std::vector<double>> d_weights_ih(hidden_size, std::vector<double>(input_size));
        for (int i = 0; i < hidden_size; ++i) {
            for (int j = 0; j < input_size; ++j) {
                d_weights_ih[i][j] = hidden_errors[i] * hidden_output[i] * (1 - hidden_output[i]) * input[j];
            }
        }

        // 计算隐藏层偏置的更新量
        std::vector<double> d_bias_h(hidden_size);
        for (int i = 0; i < hidden_size; ++i) {
            d_bias_h[i] = hidden_errors[i] * hidden_output[i] * (1 - hidden_output[i]);
        }

        // 更新输入层到隐藏层的权重
        for (int i = 0; i < hidden_size; ++i) {
            for (int j = 0; j < input_size; ++j) {
                weights_ih[i][j] += d_weights_ih[i][j];
            }
        }

        // 更新隐藏层到输出层的权重
        for (int i = 0; i < output_size; ++i) {
            for (int j = 0; j < hidden_size; ++j) {
                weights_ho[i][j] += d_weights_ho[i][j];
            }
        }

        // 更新隐藏层偏置
        for (int i = 0; i < hidden_size; ++i) {
            bias_h[i] += d_bias_h[i];
        }

        // 更新输出层偏置
        for (int i = 0; i < output_size; ++i) {
            bias_o[i] += d_bias_o[i];
        }
    }
};

// 生成随机平面坐标点数据
std::vector<double> generateRandomPoint() {
    std::vector<double> point(2);
    point[0] = ((double)rand() / RAND_MAX) * 10 - 5; // x坐标范围 -5到5
    point[1] = ((double)rand() / RAND_MAX) * 10 - 5; // y坐标范围 -5到5
    return point;
}

// 根据坐标点位置确定分类（这里简单分为两类：原点附近为一类，远离原点为另一类）
int classifyPoint(const std::vector<double>& point) {
    double distance = std::sqrt(point[0] * 2 + point[1] * 2);
    if (distance < 3) {
        return 0;
    } else {
        return 1;
    }
}

// 训练神经网络
void trainNeuralNetwork(NeuralNetwork& nn, int num_epochs, int batch_size) {
    for (int epoch = 0; epoch < num_epochs; ++epoch) {
        for (int i = 0; i < batch_size; ++i) {
            // 生成随机坐标点和对应的分类目标
            std::vector<double> point = generateRandomPoint();
            int target_class = classifyPoint(point);
            std::vector<double> target(2);
            target[target_class] = 1;

            // 执行反向传播进行训练
            nn.back_propagation(point, target);
        }

        // 每100个 epoch 打印一次训练信息
        if (epoch % 100 == 0) {
            std::cout << "Epoch " << epoch << " completed." << std::endl;
        }
    }
}

// 测试神经网络
void testNeuralNetwork(NeuralNetwork& nn, int num_tests) {
    int correct_predictions = 0;
    for (int i = 0; i < num_tests; ++i) {
        // 生成随机坐标点和对应的分类目标
        std::vector<double> point = generateRandomPoint();
        int target_class = classifyPoint(point);
        std::vector<double> target(2);
        target[target_class] = 1;

        // 进行前向传播得到预测结果
        std::vector<double> prediction = nn.forward_propagation(point);
        int predicted_class = (prediction[0] > prediction[1])? 0 : 1;

        // 判断预测是否正确
        if (predicted_class == target_class) {
            correct_predictions++;
        }
    }

    // 打印测试结果
    std::cout << "Test Results: " << correct_predictions << " out of " << num_tests << " predictions were correct." << std::endl;
}

int main() {
    // 设置随机数种子
    std::srand(static_cast<unsigned int>(std::time(nullptr)));

    // 创建一个具有2个输入神经元（平面坐标点的x和y），3个隐藏神经元，2个职业神经元（分类结果）的神经网络
    NeuralNetwork nn(2,20 , 2);

    // 训练神经网络
    trainNeuralNetwork(nn, 5000, 1000);

    // 测试神经网络
    testNeuralNetwork(nn, 10000);

    return 0;
}
