#include "NeuralNetwork.h"
// #include "load_mnist.h"
#include "loadcsv.h"
#include <random> //确保包含此头文件以生成随机标签
#include <cassert>

//g++  main.cpp NeuralNetwork.cpp -o NN -I/opt/homebrew/Cellar/eigen/3.4.0_1/include/eigen3 -std=c++17
int main(){

//--------------LOAD CSV FILES------------------------

    // Data data {load_csv("filename.csv", "label_column", 0.8)}; //--> change training data ratio from 
                                                                   // 0.8 to something else if you want

    // MatrixXd X_train {toMatrixXd(data.X_train)};
    // MatrixXd X_test {toMatrixXd(data.X_test)};
    // VectorXd Y_train {toVectorXd(data.Y_train)};
    // VectorXd Y_test {toVectorXd(data.Y_test)};

//you can use toOneHot() to one hot encode the labels

//--------------LOAD MNIST----------------------------
    
    int num_images_train, rows_train, cols_train, num_labels_train;
    int num_images_test, rows_test, cols_test, num_labels_test;

    // //change path to where your MNIST data is 
    // MatrixXd train_images = load_mnist_images("MNIST/train-images.idx3-ubyte", num_images_train, rows_train, cols_train);
    // MatrixXd test_images = load_mnist_images("MNIST/t10k-images.idx3-ubyte", num_images_test, rows_test, cols_test);
    // VectorXd train_labels = load_mnist_labels("MNIST/train-labels.idx1-ubyte", num_labels_train);
    // VectorXd test_labels = load_mnist_labels("MNIST/t10k-labels.idx1-ubyte", num_labels_test);

    // MatrixXd train_labels_oh {toOneHot(train_labels, 10)};
    // MatrixXd test_labels_oh {toOneHot(test_labels, 10)};

    //--------------Random data----------------------------
    const int num_features = 784; // 与网络输入层匹配
    const int num_classes = 10;   // 与网络输出层匹配
    const int num_train_samples = 1000; // 可以调整训练样本数量
    const int num_test_samples = 200;   // 可以调整测试样本数量

    // 生成随机图像数据 (特征值在 [0, 1] 之间)
    MatrixXd train_images = (MatrixXd::Random(num_train_samples, num_features) + MatrixXd::Ones(num_train_samples, num_features)) / 2.0;
    MatrixXd test_images = (MatrixXd::Random(num_test_samples, num_features) + MatrixXd::Ones(num_test_samples, num_features)) / 2.0;

    // 打印样本大小
    std::cout << "训练图像大小: " << train_images.rows() << "x" << train_images.cols() << std::endl;
    std::cout << "测试图像大小: " << test_images.rows() << "x" << test_images.cols() << std::endl;

    // 生成随机标签 (0 到 num_classes-1 之间的整数)
    VectorXd train_labels_raw(num_train_samples);
    VectorXd test_labels_raw(num_test_samples);

    const int seed = 42; // 随机种子
    std::mt19937 gen(seed);
    std::uniform_int_distribution<> distrib(0, num_classes - 1);

    for (int i = 0; i < num_train_samples; ++i) {
        train_labels_raw(i) = distrib(gen);
    }
    for (int i = 0; i < num_test_samples; ++i) {
        test_labels_raw(i) = distrib(gen);
    }

    // 将标签进行独热编码
    MatrixXd train_labels_oh = toOneHot(train_labels_raw, num_classes);
    MatrixXd test_labels_oh = toOneHot(test_labels_raw, num_classes);
    // --- 随机数据生成结束 ---

    // 打印前5个训练标签（调试用）
    std::cout << "\n=== 前5个训练标签 ===" << std::endl;
    for(int i=0; i<3; ++i) {
        std::cout << "样本" << i << ": 原始标签=" << train_labels_raw(i) 
                  << ", 独热编码=" << train_labels_oh.row(i) << std::endl;
    }

    NeuralNetwork nn {{784, 16, 16, 10}, {"relu", "relu", "softmax"}};
    nn.learn(train_images, train_labels_oh, 30, 0.01, 32, "cross_entropy_loss", true);

    MatrixXd pred {nn.predict(test_images)};

    for (int i {}; i<test_labels_oh.rows(); ++i){
        std::cout<<"Actual: "<<test_labels_oh.row(i)<<" Predicted: ";
        for (int j {}; j<10; ++j){
            std::cout<<std::round(pred(i,j))<<" ";
        }
        std::cout<<std::endl;
    }

    std::cout<<"Accuracy: "<<accuracy(test_labels_oh, pred);

    return 0;
}
