#include <vector>
#include <iomanip> 
#include <iostream>
#include <chrono>
#include "matrix_operations_cpu.h"
#include "simple_fnn.h"
#include "relu.h"
#include "sigmoid.h"
#include "loss_function.h"

int main() {
    auto start = std::chrono::high_resolution_clock::now();

    auto matrixOperations = std::make_shared<MatrixOperationsCPU>();
    auto lossFunction = std::make_shared<MeanSquaredErrorLoss>();

    size_t inputSize = 1; // 输入层大小调整为1，因为我们的函数是一元的
    size_t hiddenLayerSize = 1024; // 隐藏层大小减少，因为问题相对简单
    size_t numHiddenLayers = 3; // 减少隐藏层数量
    size_t outputSize = 1; // 输出层大小保持不变
    float learningRate = 0.001;

    FNN myFNN(inputSize, hiddenLayerSize, numHiddenLayers, outputSize, learningRate, nullptr, matrixOperations, lossFunction);

    // 生成简单的线性函数训练数据集
    std::vector<std::vector<float>> inputs;
    std::vector<std::vector<float>> expectedOutputs;
    for (float x = -100; x <= 100; x += 0.5) {
        inputs.push_back({x});
        expectedOutputs.push_back({2 * x + 1}); // f(x) = 2x + 1
    }

    // int epochs = 100; // 可能需要更多的训练轮数
    // for (int epoch = 0; epoch < epochs; epoch++) {
    //     float totalLoss = 0.0;
    //     for (size_t j = 0; j < inputs.size(); j++) {
    //         myFNN.train({inputs[j]}, {expectedOutputs[j]});
    //         auto predictions = myFNN.predict({inputs[j]});
    //         totalLoss += myFNN.lossFunction->loss(predictions, {expectedOutputs[j]});
    //     }
    //     totalLoss /= inputs.size();
    //     std::cout << "Epoch " << (epoch + 1) << "/" << epochs << ", Loss: " << totalLoss << std::endl;
    // }

    int batchSize = 128;  // 选择一个合适的批量大小
    int epochs = 10;
    for (int epoch = 0; epoch < epochs; epoch++) {
        float totalLoss = 0.0;
        size_t numBatches = (inputs.size() + batchSize - 1) / batchSize; // 计算总共需要多少个批次

        for (size_t batchIndex = 0; batchIndex < numBatches; batchIndex++) {
            size_t startIndex = batchIndex * batchSize;
            size_t endIndex = std::min(inputs.size(), startIndex + batchSize);

            std::vector<std::vector<float>> batchInputs(inputs.begin() + startIndex, inputs.begin() + endIndex);
            std::vector<std::vector<float>> batchOutputs(expectedOutputs.begin() + startIndex, expectedOutputs.begin() + endIndex);

            myFNN.train(batchInputs, batchOutputs);
            auto predictions = myFNN.predict(batchInputs);

            totalLoss += myFNN.lossFunction->loss(predictions, batchOutputs);
        }
        totalLoss /= inputs.size(); // 计算平均损失
        std::cout << "Epoch " << (epoch + 1) << "/" << epochs << ", Loss: " << totalLoss << std::endl;
    }



    // 测试神经网络的表现
    for (size_t i = 0; i < inputs.size(); i++) {
        if(i > 20) break;
        auto output = myFNN.predict({inputs[i]});
        // std::cout << "Input: " << inputs[i][0]
        //         << " - Expected Output: " << expectedOutputs[i][0]
        //         << " - Predicted Output: " << output[0][0] << std::endl;
    }

    auto stop = std::chrono::high_resolution_clock::now();
    auto duration = std::chrono::duration_cast<std::chrono::milliseconds>(stop - start);
    std::cout << "Time taken by function: " << duration.count() << " milliseconds" << std::endl;

    return 0;
}