#include "eigen/fashion_mnist_classification.h"

#include <string>
#include <chrono>
#include <iomanip>  // for std::setw, std::left
#include "eigen/linear.h"
#include "log.h"
#include "eigen/iterator.h"
#include "eigen/softmax.h"
#include "eigen/cross_entropy_loss.h"
#include "eigen/fashion_mnist_load.h"
#include "eigen/net.h"
#include "eigen/relu.h"
#include "eigen/conv.h"
#include "tools/random_sampling.h"
#include "eigen/classification_accuracy.h"
#include "eigen/position_decay_linear.h"
#include "eigen/normal.h"

namespace  ldl_eigen
{
std::string MatrixXf_shape(const Eigen::MatrixXf &data)
{
    return std::string{"("} + std::to_string(data.rows()) +  std::string{", "} + std::to_string(data.cols()) + std::string{")"};
}

void FashionMnistClassification::net_train()
{
    const int64_t img_height{28};
    const int64_t img_width{28};
    const int64_t kernel_size{3};
    const int64_t stride{1};
    const int64_t padding{2};
    // const int64_t d{((img_height+2*padding-kernel_size)/stride + 1)*((img_width+2*padding-kernel_size)/stride + 1)};
    int64_t d = 28 * 28;    // 单个样本的维度
    int64_t hidden1 = 256;
    int64_t h = 10; // 输出的个数

    Net net{};
    // net.add_hidden_layer(std::make_shared<Conv>(img_height, img_width, kernel_size, stride, padding));

    for(auto linear_index = 0; linear_index < 2; linear_index++)
    {
        net.add_hidden_layer(std::make_shared<PositionDecayLinear>(d,d, 28, 28));
        net.add_hidden_layer(std::make_shared<ReLU>());

        // net.add_hidden_layer(std::make_shared<Linear>(d,hidden1));
        // net.add_hidden_layer(std::make_shared<ReLU>());
    }

    net.add_hidden_layer(std::make_shared<Linear>(d,hidden1));
    net.add_hidden_layer(std::make_shared<ReLU>());
    net.add_hidden_layer(std::make_shared<Linear>(hidden1,h));
    net.add_hidden_layer(std::make_shared<SoftMax>());
    net.set_loss(std::make_shared<CrossEntropyLoss>());

    const std::string resource_dir{"data/FashionMnist/"};
    FashionMnistLoad fashion_mnist_load(resource_dir);
    fashion_mnist_load.load();
    const auto train_images =  fashion_mnist_load.get_normalized_train_images();
    const auto train_labels = fashion_mnist_load.get_one_hot_train_labels();

    auto random_train_images = Normal::matrix(train_images.rows()*10, train_images.cols());
    auto random_train_labels = Normal::matrix(train_labels.rows()*10, train_labels.cols(), 0.0, 0.0);

    Eigen::MatrixXf train_images_all(random_train_images.rows() + train_images.rows(), train_images.cols());
    train_images_all << train_images, random_train_images;

    Eigen::MatrixXf train_labels_all(train_labels.rows() + random_train_labels.rows(), random_train_labels.cols());
    train_labels_all << train_labels, random_train_labels;

    const auto test_labels = fashion_mnist_load.get_one_hot_test_labels();
    Iterator iterator(train_images_all.rows());
    LogInfo() << "train_images_all.rows()" << train_images_all.rows();

    int64_t batch_size = 32;   // 每个批次的样本个数
    int64_t epochs = 100;    // 训练次数
    Eigen::MatrixXf train_images_batch;
    Eigen::MatrixXf train_labels_batch;
    float loss;

    // 打印表头
    LogInfo() << std::right
            << std::setw(10) << "Loss" 
            << std::setw(10) << "TrainAcc"
            << std::setw(10) << "TestAcc"
            << std::setw(12) << "TrainTime" 
            << std::setw(12) << "Epoch";
    for(int64_t epoch = 1; epoch <= epochs; epoch++)
    {
        Eigen::MatrixXf train_images_noisy = train_images.array() + Normal::matrix(1, 1, 0.0, 0.1)(0,0);
        int64_t loop_per_epoch = train_images.rows() / batch_size;
        auto start = std::chrono::high_resolution_clock::now();
        for(int64_t loop = 0;loop < loop_per_epoch;loop++)
        {
            // 获取批次数据
            iterator.get_rand_batch(train_images_all, train_labels_all, train_images_batch, train_labels_batch, batch_size);
            loss = net.train(train_images_batch, train_labels_batch);
        }
        auto end = std::chrono::high_resolution_clock::now();
        auto duration = std::chrono::duration_cast<std::chrono::milliseconds>(end - start);
        auto predicts = net.predict(fashion_mnist_load.get_normalized_test_images());
        auto accuracy = ClassificationAccuracy::accuracy(predicts, test_labels);

        predicts = net.predict(train_images);
        auto train_accuracy = ClassificationAccuracy::accuracy(predicts, train_labels);

        // 打印数据行
        LogInfo() << std::right
                << std::setw(10) << loss/batch_size
                << std::setw(10) << train_accuracy
                << std::setw(10) << accuracy
                << std::setw(10) << duration.count() << "ms "
                << std::setw(7) << epoch << "/" << epochs;
    }
}

void FashionMnistClassification::train()
{
    // 加载训练集（前10张）
    const std::string resource_dir{"../../data/FashionMnist/"};
    FashionMnistLoad fashion_mnist_load(resource_dir);
    fashion_mnist_load.load();
    const auto& train_images =  fashion_mnist_load.get_normalized_train_images();
    const auto& train_labels = fashion_mnist_load.get_one_hot_train_labels();

    int64_t d = 28 * 28;    // 单个样本的维度
    int64_t h = 10; // 输出的个数
    Linear linear(d, h);
    SoftMax softmax{};
    CrossEntropyLoss cross_entropy_loss{};

    int64_t batch_size = 128;   // 每个批次的样本个数
    int64_t epochs = 10;    // 训练次数
    Iterator iterator(train_images.rows());
    Eigen::MatrixXf linear_output{};
    Eigen::MatrixXf relu_output{};
    Eigen::MatrixXf softmax_output{};
    float loss{};
    Eigen::MatrixXf cross_entropy_derivative{};
    const Eigen::MatrixXf *softmax_devivative{};
    const Eigen::MatrixXf relu_devivative{};
    const Eigen::MatrixXf* linear_derivative{};

    Eigen::MatrixXf train_images_batch;
    Eigen::MatrixXf train_labels_batch;
    for(int64_t epoch = 0; epoch < epochs; epoch++)
    {
        int64_t loop_per_epoch = train_images.rows() / batch_size;
        auto start = std::chrono::high_resolution_clock::now();
        for(int64_t loop = 0;loop < loop_per_epoch;loop++)
        {
            // 获取批次数据
            iterator.get_rand_batch(train_images, train_labels, train_images_batch, train_labels_batch, batch_size);

            // 将数据输入到全连接层，得到全连接的输出
            linear.forward(train_images_batch, linear_output);
            // LogInfo() << "linear_output[64]: " << linear_output[64];

            // 将全连接层的输出，作为ReLU层的输入
            // relu.forward(linear_output, relu_output);
            // LogInfo() << "relu_output[64]: " << relu_output[64];

            // 将ReLU层的输出，输入到softmax层，得到softmax层的输出
            softmax.forward(linear_output, softmax_output);
            // LogInfo() << "softmax_output[64]: " << softmax_output[64];

            // 将softmax层的输出和对应的标签数据，输入到交叉熵层，得到损失值（误差）
            loss = cross_entropy_loss.forward(softmax_output, train_labels_batch);
            // LogInfo() << "CrossEntropyLoss loss: " << loss/batch_size;

            // 计算交叉熵层的损失对输入的偏导数
            cross_entropy_derivative = cross_entropy_loss.backward();
            // LogInfo() << "cross_entropy_derivative[0]: " << cross_entropy_derivative.row(0);

            // 计算softmax层的损失对输入的偏导数
            softmax_devivative = &(softmax.backward(cross_entropy_derivative));

            // 计算ReLU层的损失对输入的偏导数
            // relu_devivative = relu.backward(softmax_devivative);

            // 计算全连接层中的权重对loss的导数，偏执对loss的导数和输入对loss的导数
            linear_derivative = &(linear.backward(*softmax_devivative));
            linear.update();
            // std::cout << std::endl;
        }
        auto end = std::chrono::high_resolution_clock::now();
        auto duration = std::chrono::duration_cast<std::chrono::milliseconds>(end - start);
        LogInfo() << "Train time: " << duration.count() << " ms, epoch: " << epoch << ", epochs: " << epochs;
        LogInfo() << "linear output.shape: " << MatrixXf_shape(linear_output);

        // LogInfo() << "ReLU output.shape: " << MatrixXf_shape(relu_output);
        // LogInfo() << "ReLU output: " << relu_output;

        LogInfo() << "SoftMax output.shape: " << MatrixXf_shape(softmax_output);
        // LogInfo() << "SoftMax output: " << softmax_output;

        LogInfo() << "CrossEntropyLoss loss: " << loss/batch_size;

        LogInfo() << "CrossEntropyLoss derivative.shape: " << MatrixXf_shape(cross_entropy_derivative);
        // LogInfo() << "CrossEntropyLoss derivative: " << cross_entropy_derivative;

        LogInfo() << "Softmax devivative.shape: " << MatrixXf_shape(*softmax_devivative);
        // LogInfo() << "Softmax derivative: " << softmax_devivative;

        // LogInfo() << "ReLU devivative.shape: " << MatrixXf_shape(*relu_devivative);
        // LogInfo() << "ReLU derivative: " << relu_devivative;

        LogInfo() << "Linear devivative.shape: " << MatrixXf_shape(*linear_derivative);
        // LogInfo() << "Linear derivative: " << linear_derivative;

        // LogInfo() << "train_labels_batch: " << train_labels_batch;
        std::cout << std::endl;
    }
}
}