#include <vector>
#include <iomanip> 
#include <iostream>
#include <chrono>
#include "matrix_operations_gpu.cu"
#include "matrix_operations_cpu.h"
#include "matrix_operations_wmma.cu"
#include "matrix_operations_wmma_1.cu"
#include "simple_fnn.h"
#include "relu.h"
#include "sigmoid.h"
#include "loss_function.cu"





// 函数定义，接受数据集大小和x值的范围
std::pair<std::vector<std::vector<float>>, std::vector<std::vector<float>>> generateLinearDataset(int datasetSize, float xMin, float xMax) {
    std::vector<std::vector<float>> inputs;
    std::vector<std::vector<float>> expectedOutputs;

    // 随机数生成器设置
    std::random_device rd; 
    std::mt19937 gen(rd());
    // 计算范围，扩大10倍后为整数
    int xMinInt = static_cast<int>(xMin * 10);
    int xMaxInt = static_cast<int>(xMax * 10);
    std::uniform_int_distribution<> dis(xMinInt, xMaxInt);

    for (int i = 0; i < datasetSize; ++i) {
        float x = dis(gen) / 10.0; // 生成随机x值，确保一位小数
        inputs.push_back({x});
        expectedOutputs.push_back({2 * x + 1}); // f(x) = 2x + 1
    }

    return std::make_pair(inputs, expectedOutputs);
}

int main(int argc, char *argv[]) {
    if (argc < 2) {
        std::cout << "unknow mode" << std::endl;
        return 1;
    }
    //根据入参切换矩阵操作类
    std::shared_ptr<MatrixOperations> matrixOperations; 
    int mode = std::atoi(argv[1]);
    if(mode == 0){
        //CPU
        matrixOperations = std::make_shared<MatrixOperationsCPU>();
    }else if(mode == 1){
        //GPU
        matrixOperations = std::make_shared<MatrixOperationsGPU>();
    }else if(mode == 2){
        //WMMA
        matrixOperations = std::make_shared<MatrixOperationsWMMA>();
    }else if(mode == 3){
        //WMMA
        matrixOperations = std::make_shared<MatrixOperationsWMMA1>();
    }else{
        std::cout << "unknow mode: " << mode << std::endl;
        return 1;
    }
    //计算运行时间
    auto start = std::chrono::high_resolution_clock::now();
    //定义损失函数, 可以跟随mode切换CPU GPU版本
    auto lossFunction = std::make_shared<MeanSquaredErrorLoss>();

    //定义FNN网络
    size_t inputSize = 1; // 输入层大小调整为1，因为我们的函数是一元的
    size_t hiddenLayerSize = 512; // 隐藏层大小减少，因为问题相对简单
    size_t numHiddenLayers = 3; // 减少隐藏层数量
    size_t outputSize = 1; // 输出层大小保持不变
    float learningRate = 0.001; //学习率, 使用Adam自适应学习率
    FNN myFNN(inputSize, hiddenLayerSize, numHiddenLayers, outputSize, learningRate, nullptr, matrixOperations, lossFunction);

    // 批次大小, 一次传输批次大小的数据进行训练
    int batchSize = 64;  
    // 训练次数
    int epochs = 100;
    for (int epoch = 0; epoch < epochs; epoch++) {
        // 生成简单的线性函数训练数据集
        auto list = generateLinearDataset(64, -10, 10);
        std::vector<std::vector<float>> inputs = list.first;
        std::vector<std::vector<float>> expectedOutputs = list.second;
        auto startTime = std::chrono::high_resolution_clock::now(); // 开始时间
        float totalLoss = 0.0;
        size_t numBatches = (inputs.size() + batchSize - 1) / batchSize; // 计算总共需要多少个批次

        for (size_t batchIndex = 0; batchIndex < numBatches; batchIndex++) {
            size_t startIndex = batchIndex * batchSize;
            size_t endIndex = std::min(inputs.size(), startIndex + batchSize);

            std::vector<std::vector<float>> batchInputs(inputs.begin() + startIndex, inputs.begin() + endIndex);
            std::vector<std::vector<float>> batchOutputs(expectedOutputs.begin() + startIndex, expectedOutputs.begin() + endIndex);

            myFNN.train(batchInputs, batchOutputs);
            auto predictions = myFNN.predict(batchInputs);

            totalLoss += myFNN.lossFunction->loss(predictions, batchOutputs);
        }
        totalLoss /= inputs.size(); // 计算平均损失
        auto endTime = std::chrono::high_resolution_clock::now(); // 结束时间
        auto duration = std::chrono::duration_cast<std::chrono::milliseconds>(endTime - startTime).count(); // 计算耗时，毫秒为单位

        std::cout << "Epoch " << (epoch + 1) << "/" << epochs
                << ", Loss: " << totalLoss
                << ", Time: " << duration << " ms" << std::endl;
    }

    //生成10个测试数据
    auto test = generateLinearDataset(10, -10, 10);
    std::vector<std::vector<float>> testInputs = test.first;
    std::vector<std::vector<float>> testExpectedOutputs = test.second;

    //测试神经网络的表现
    for (size_t i = 0; i < testInputs.size(); i++) {
        auto output = myFNN.predict({testInputs[i]});
        std::cout << std::fixed << std::setprecision(1)  // 设置输出格式为固定浮点表示，保留一位小数
                << "Input: " << testInputs[i][0]
                << " - Expected Output: " << testExpectedOutputs[i][0]
                << " - Predicted Output: " << output[0][0] << std::endl;
    }

    auto stop = std::chrono::high_resolution_clock::now();
    auto duration = std::chrono::duration_cast<std::chrono::milliseconds>(stop - start);
    std::cout << "Time taken by function: " << duration.count() << " milliseconds" << std::endl;

    return 0;
}

