local Layer = require("Layer");

-- 定义 NeuralNetwork 对象
local NeuralNetwork = {}
NeuralNetwork.__index = NeuralNetwork

-- 构造函数：创建一个新的神经网络
function NeuralNetwork:new(input_size, hidden_size1, hidden_size2, output_size)
    local network = setmetatable({}, self)
    network.hidden_layer1 = Layer:new(input_size, hidden_size1, true)  -- 第一隐藏层使用激活函数
    network.hidden_layer2 = Layer:new(hidden_size1, hidden_size2, true)  -- 第二隐藏层使用激活函数
    network.output_layer = Layer:new(hidden_size2, output_size, false)  -- 输出层不使用激活函数
    return network
end

-- 前向传播：计算整个神经网络的输出
function NeuralNetwork:forward(inputs)
    local hidden_outputs1 = self.hidden_layer1:forward(inputs)
    local hidden_outputs2 = self.hidden_layer2:forward(hidden_outputs1)
    local output = self.output_layer:forward(hidden_outputs2)
    return output[1]  -- 假设输出层只有一个神经元
end

-- 反向传播：更新整个神经网络的权重和偏置
function NeuralNetwork:backward(inputs, target, learning_rate)
    -- 计算输出层误差
    local output = self:forward(inputs)
    local output_error = target - output
    local output_delta = output_error  -- 输出层没有激活函数，导数为 1

    local hidden_errors2 = {}
    for j = 1, #self.hidden_layer2.neurons do
        hidden_errors2[j] = 0
        for i, neuron in ipairs(self.output_layer.neurons) do
            hidden_errors2[j] = hidden_errors2[j] + output_delta * neuron.weights[j]
        end
    end

    -- 计算第一隐藏层误差
    local hidden_errors1 = {}
    for j = 1, #self.hidden_layer1.neurons do
        hidden_errors1[j] = 0
        for k = 1, #self.hidden_layer2.neurons do
            hidden_errors1[j] = hidden_errors1[j] + hidden_errors2[k] * self.hidden_layer2.neurons[k].weights[j]
        end
    end

    local hidden_outputs1 = self.hidden_layer1:forward(inputs)   
    -- 计算第二隐藏层误差
    local hidden_outputs2 = self.hidden_layer2:forward(hidden_outputs1)

    -- 更新输出层权重和偏置
    self.output_layer:backward(hidden_outputs2, {output_delta}, learning_rate)

    -- 更新第二隐藏层权重和偏置
    self.hidden_layer2:backward(hidden_outputs1, hidden_errors2, learning_rate)

    -- 更新第一隐藏层权重和偏置
    self.hidden_layer1:backward(inputs, hidden_errors1, learning_rate)
end

-- 训练神经网络
function NeuralNetwork:train(inputs, targets, epochs, learning_rate)
    for epoch = 1, epochs do
        local total_error = 0
        for i = 1, #inputs do
            local input = inputs[i]
            local target = targets[i]
            -- 反向传播
            self:backward(input, target, learning_rate)
            -- 计算总误差
            local output = self:forward(input)
            total_error = total_error + (target - output)^2
        end
        -- 打印每个 epoch 的平均误差
        if epoch % 100 == 0 then
            print(string.format("Epoch %d, Error: %.6f", epoch, total_error / #inputs))
        end
    end
end

-- 测试神经网络
function NeuralNetwork:test(inputs, targets)
    for i = 1, #inputs do
        local output = self:forward(inputs[i])
        print(string.format("Input: (%.2f, %.2f, %.2f), Predicted Output: %.6f, Target: %.6f",
            inputs[i][1], inputs[i][2], inputs[i][3], output, targets[i]))
    end
end

return NeuralNetwork;