-- 定义 Sigmoid 激活函数及其导数
local function sigmoid(x)
    return 1 / (1 + math.exp(-x))
end

local function sigmoid_derivative(x)
    local s = sigmoid(x)
    return s * (1 - s)
end

-- 初始化权重和偏置
local function init_weights(input_size, hidden_size, output_size)
    local weights_input_hidden = {}
    local weights_hidden_output = {}
    local bias_hidden = {}
    local bias_output = {}

    local scale = 1;
    -- 随机初始化权重和偏置
    for i = 1, input_size do
        weights_input_hidden[i] = {}
        for j = 1, hidden_size do
            weights_input_hidden[i][j] = scale * math.random() * 0.2 - 0.1  -- 小范围随机数
        end
    end

    for i = 1, hidden_size do
        weights_hidden_output[i] = scale * math.random() * 0.2 - 0.1
    end

    for i = 1, hidden_size do
        bias_hidden[i] = scale * math.random() * 0.2 - 0.1
    end

    bias_output[1] = math.random() * 0.2 - 0.1

    return weights_input_hidden, weights_hidden_output, bias_hidden, bias_output
end

-- 前向传播
local function forward(input, weights_input_hidden, weights_hidden_output, bias_hidden, bias_output)
    local hidden_layer_input = {}
    local hidden_layer_output = {}
    local output_layer_input = 0
    local output_layer_output = 0

    -- 计算隐藏层输入和输出
    for j = 1, #bias_hidden do
        hidden_layer_input[j] = bias_hidden[j]
        for i = 1, #input do
            hidden_layer_input[j] = hidden_layer_input[j] + input[i] * weights_input_hidden[i][j]
        end
        hidden_layer_output[j] = sigmoid(hidden_layer_input[j])  -- 隐藏层仍然使用 sigmoid
    end

    -- 计算输出层输入和输出
    output_layer_input = bias_output[1]
    for j = 1, #hidden_layer_output do
        output_layer_input = output_layer_input + hidden_layer_output[j] * weights_hidden_output[j]
    end
    output_layer_output = output_layer_input  -- 输出层不再使用 sigmoid，直接输出线性值

    return hidden_layer_input, hidden_layer_output, output_layer_input, output_layer_output
end

-- 反向传播
local function backward(input, target, learning_rate, weights_input_hidden, weights_hidden_output, bias_hidden, bias_output, hidden_layer_input, hidden_layer_output, output_layer_input, output_layer_output)
    -- 计算输出层误差
    local output_error = target - output_layer_output
    local output_delta = output_error  -- 输出层没有激活函数，导数为 1

    -- 计算隐藏层误差
    local hidden_errors = {}
    local hidden_deltas = {}
    for j = 1, #hidden_layer_output do
        hidden_errors[j] = output_delta * weights_hidden_output[j]
        hidden_deltas[j] = hidden_errors[j] * sigmoid_derivative(hidden_layer_input[j])
    end

    -- 更新隐藏层到输出层的权重和偏置
    for j = 1, #hidden_layer_output do
        weights_hidden_output[j] = weights_hidden_output[j] + learning_rate * output_delta * hidden_layer_output[j]
    end
    bias_output[1] = bias_output[1] + learning_rate * output_delta

    -- 更新输入层到隐藏层的权重和偏置
    for i = 1, #input do
        for j = 1, #hidden_layer_output do
            weights_input_hidden[i][j] = weights_input_hidden[i][j] + learning_rate * hidden_deltas[j] * input[i]
        end
    end

    for j = 1, #hidden_layer_output do
        bias_hidden[j] = bias_hidden[j] + learning_rate * hidden_deltas[j]
    end
end

-- 训练神经网络
local function train(inputs, targets, epochs, learning_rate, input_size, hidden_size, output_size)
    local weights_input_hidden, weights_hidden_output, bias_hidden, bias_output = init_weights(input_size, hidden_size, output_size)

    for epoch = 1, epochs do
        local total_error = 0
        for i = 1, #inputs do
            local input = inputs[i]
            local target = targets[i]

            -- 前向传播
            local hidden_layer_input, hidden_layer_output, output_layer_input, output_layer_output = forward(input, weights_input_hidden, weights_hidden_output, bias_hidden, bias_output)

            -- 反向传播
            backward(input, target, learning_rate, weights_input_hidden, weights_hidden_output, bias_hidden, bias_output, hidden_layer_input, hidden_layer_output, output_layer_input, output_layer_output)

            -- 计算总误差
            total_error = total_error + (target - output_layer_output)^2
        end

        -- 打印每个 epoch 的平均误差
        print(string.format("Epoch %d, Error: %.6f", epoch, total_error / #inputs))
    end

    return weights_input_hidden, weights_hidden_output, bias_hidden, bias_output
end

-- 测试神经网络
local function test(input, weights_input_hidden, weights_hidden_output, bias_hidden, bias_output)
    local _, _, _, output = forward(input, weights_input_hidden, weights_hidden_output, bias_hidden, bias_output)
    return output
end

-- 生成随机训练数据和测试数据
local function generate_data(num_samples)
    local inputs = {}
    local targets = {}

    for i = 1, num_samples do
        -- 随机生成输入值 x1, x2, x3 在 [0, 1] 范围内
        local x1 = math.random()
        local x2 = math.random()
        local x3 = math.random()

        -- 目标值为 y = x1^2 + x2^2 + x3^2
        --local target = x1^2 + x2^2 + x3^2
        local target = x1 * 10 + x2^3 + math.sin(x3);

        table.insert(inputs, {x1, x2, x3})
        table.insert(targets, target)
    end

    return inputs, targets
end

-- 主程序
local function main()
    -- 生成 100 条随机数据
    local num_samples = 100
    local inputs, targets = generate_data(num_samples)

    -- 将数据分为训练集和测试集（80% 训练，20% 测试）
    local train_inputs = {}
    local train_targets = {}
    local test_inputs = {}
    local test_targets = {}

    for i = 1, num_samples do
        if i <= num_samples * 0.8 then
            table.insert(train_inputs, inputs[i])
            table.insert(train_targets, targets[i])
        else
            table.insert(test_inputs, inputs[i])
            table.insert(test_targets, targets[i])
        end
    end

    -- 训练神经网络
    local input_size = 3
    local hidden_size = 6
    local output_size = 1
    local epochs = 5000
    local learning_rate = 0.3

    local weights_input_hidden, weights_hidden_output, bias_hidden, bias_output = train(train_inputs, train_targets, epochs, learning_rate, input_size, hidden_size, output_size)

    -- 测试神经网络
    print("\nTesting the neural network:")
    for i = 1, #test_inputs do
        local output = test(test_inputs[i], weights_input_hidden, weights_hidden_output, bias_hidden, bias_output)
        print(string.format("Input: (%.2f, %.2f, %.2f), Predicted Output: %.6f, Target: %.6f", 
            test_inputs[i][1], test_inputs[i][2], test_inputs[i][3], output, test_targets[i]))
    end
end

-- 运行主程序
main()
