/*
这是一个DeepSeek生成的基于最简单的全连接神经网络DNN技术的程序示例。它的功能是：训练一个全连接神经网络DNN，用它来实现逻辑与功能。
逻辑与是一个硬件电路天然具备的，C语言算法中最基础的逻辑运算，我们在这里用神经网络去实现它，可谓杀鸡用牛刀。但是其中揭示出来的神经网络
训练的一般性技巧，对于C语言的中高级使用者来说，却非常有好处，它可以从最底层的视角来揭示神经网络的工作原理。
 */
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <time.h>

#define INPUT_SIZE 2
#define HIDDEN_SIZE 4
#define OUTPUT_SIZE 1
#define LEARNING_RATE 0.15
#define EPOCHS 10000

// Sigmoid激活函数
double sigmoid(double x) {
    return 1 / (1 + exp(-x));
}

// Sigmoid导数
double sigmoid_derivative(double x) {
    return x * (1 - x);
}

// 神经网络结构体
typedef struct {
    double weights_ih[INPUT_SIZE][HIDDEN_SIZE];
    double weights_ho[HIDDEN_SIZE][OUTPUT_SIZE];
    double bias_h[HIDDEN_SIZE];
    double bias_o[OUTPUT_SIZE];
    double hidden_output[HIDDEN_SIZE];  // 添加隐藏层输出存储
} NeuralNetwork;

// 初始化神经网络
void init_network(NeuralNetwork *net) {
    srand(time(NULL));
    
    // 初始化输入层到隐藏层的权重
    for (int i = 0; i < INPUT_SIZE; i++) {
        for (int j = 0; j < HIDDEN_SIZE; j++) {
            net->weights_ih[i][j] = ((double)rand() / RAND_MAX) * 2 - 1;
        }
    }
    
    // 初始化隐藏层到输出层的权重
    for (int i = 0; i < HIDDEN_SIZE; i++) {
        for (int j = 0; j < OUTPUT_SIZE; j++) {
            net->weights_ho[i][j] = ((double)rand() / RAND_MAX) * 2 - 1;
        }
    }
    
    // 初始化偏置
    for (int i = 0; i < HIDDEN_SIZE; i++) {
        net->bias_h[i] = ((double)rand() / RAND_MAX) * 2 - 1;
    }
    for (int i = 0; i < OUTPUT_SIZE; i++) {
        net->bias_o[i] = ((double)rand() / RAND_MAX) * 2 - 1;
    }
}

// 前向传播
double* forward(NeuralNetwork *net, double inputs[INPUT_SIZE]) {
    // 计算并存储隐藏层输出
    for (int j = 0; j < HIDDEN_SIZE; j++) {
        net->hidden_output[j] = 0;
        for (int i = 0; i < INPUT_SIZE; i++) {
            net->hidden_output[j] += inputs[i] * net->weights_ih[i][j];
        }
        net->hidden_output[j] += net->bias_h[j];
        net->hidden_output[j] = sigmoid(net->hidden_output[j]);
    }
    
    static double output[OUTPUT_SIZE];
    
    // 计算输出层（使用存储的隐藏层输出）
    for (int k = 0; k < OUTPUT_SIZE; k++) {
        output[k] = 0;
        for (int j = 0; j < HIDDEN_SIZE; j++) {
            output[k] += net->hidden_output[j] * net->weights_ho[j][k];
        }
        output[k] += net->bias_o[k];
        output[k] = sigmoid(output[k]);
    }
    
    return output;
}

// 训练神经网络
void train(NeuralNetwork *net, double inputs[][INPUT_SIZE], double targets[][OUTPUT_SIZE], int num_samples) {
    for (int epoch = 0; epoch < EPOCHS; epoch++) {
        double total_error = 0;
        
        for (int s = 0; s < num_samples; s++) {
            // 前向传播（修正隐藏层获取方式）
            // 前向传播并获取输出
            double *output = forward(net, inputs[s]);
            
            // 获取隐藏层输出
            double* hidden_output = net->hidden_output;
            
            // 计算输出层误差（使用均方误差导数）
            double output_delta[OUTPUT_SIZE];
            for (int k = 0; k < OUTPUT_SIZE; k++) {
                double error = targets[s][k] - output[k];
                output_delta[k] = error * sigmoid_derivative(output[k]) * 2.0;  // 乘以2加速收敛
                total_error += error * error;  // 使用平方误差
            }
            
            // 计算隐藏层误差
            double hidden_delta[HIDDEN_SIZE] = {0};
            for (int j = 0; j < HIDDEN_SIZE; j++) {
                double error = 0.0;
                for (int k = 0; k < OUTPUT_SIZE; k++) {
                    error += output_delta[k] * net->weights_ho[j][k];
                }
                hidden_delta[j] = error * sigmoid_derivative(hidden_output[j]);
            }
            
            // 更新输出层权重和偏置
            for (int j = 0; j < HIDDEN_SIZE; j++) {
                for (int k = 0; k < OUTPUT_SIZE; k++) {
                    net->weights_ho[j][k] += LEARNING_RATE * output_delta[k] * hidden_output[j];
                    net->bias_o[k] += LEARNING_RATE * output_delta[k];
                }
            }
            
            // 更新隐藏层权重和偏置
            for (int i = 0; i < INPUT_SIZE; i++) {
                for (int j = 0; j < HIDDEN_SIZE; j++) {
                    net->weights_ih[i][j] += LEARNING_RATE * hidden_delta[j] * inputs[s][i];
                    net->bias_h[j] += LEARNING_RATE * hidden_delta[j];
                }
            }
        }
        
        if (epoch % 1000 == 0) {
            printf("Epoch %d, Error: %f\n", epoch, total_error);
        }
    }
}

int main() {
    // 训练数据 (AND逻辑)
    double inputs[4][2] = {{0,0}, {0,1}, {1,0}, {1,1}};
    double inputs2[4][2] = {{1,1},{1,1},{1,1},{1,0}}; //是我自己添加的又一组测试
    double targets[4][1] = {{0}, {0}, {0}, {1}};
    
    NeuralNetwork net;
    init_network(&net);
    train(&net, inputs, targets, 4);
    
    // 测试训练结果
    printf("\nTest results:\n");
    for (int i = 0; i < 4; i++) {
        double *output = forward(&net, inputs[i]);
        printf("%.1f AND %.1f = %.2f\n", inputs[i][0], inputs[i][1], output[0]);
    }

    printf("\nTest II  results:\n");
    for (int i = 0; i < 4; i++) {
        double *output = forward(&net, inputs2[i]);
        printf("%.1f AND %.1f = %.2f\n", inputs[i][0], inputs[i][1], output[0]);
    }    
    return 0;
}
