#include <stdio.h>
#include <assert.h>
#include <string.h>
#include <stdlib.h>
#include <math.h>
#include <stdint.h>

#include "multi-cls-data.h"  // 为实现方便，将数据直接放进数组中

/**
 *  定义逻辑回归模型的参数类型
 */
typedef struct 
{
    int paramCount;
    int n_classes;
    int n_features;
    double beta[0]; // cls x (features + 1)
} LogisticsRegression;


void softmax(const double z[],double p[],  int k)
{
    double denominator = 0;
    for (int i = 0; i < k; i++) {
        p[i] = exp(z[i]);
        denominator += p[i];
    }

    for (int i = 0; i < k; i++) {
        p[i] /= denominator;
    }
}

int argmax(double prob[], int n_cls)
{
    int idx = 0;
    double max_val = prob[0];
    for (int i = 1; i < n_cls; i++) {
        if (prob[i] > max_val) {
            max_val = prob[i];
            idx = i;
        }
    }
    return idx;
}

/**
 * @brief 计算交叉熵
 * @param y_true: 基于位图表示one-hot编码
 * @param y_pred: 预测的类别概率
 * @param n_cls:类别个数
 */
double cross_entrophy(uint8_t y_true, double y_pred[], int n_cls)
{
    // printf("%f %f %f %hhu\n", y_pred[0], y_pred[1], y_pred[2], y_true);
    double loss = 0, epsilon = 1e-7;
    for (int k = 0; k < n_cls; k++) {
        loss += - ((y_true >> k) & 0x01) * log(y_pred[k] + epsilon);
    }
    return loss;
}

uint8_t one_hot(uint8_t label_id)
{
    return 1 << label_id;
}

/**
 * @brief 根据模型参数和数据特征对单条数据进行预测
 * @param model: 线性模型
 * @param x: 数据特征
 * @param p: 输出概率的数组，长度为类别个数
 * 
 * @return 无
 */
void predict(const LogisticsRegression *model, const double x[], double p[])
{
    double *z = malloc(model->n_classes * sizeof(double));
    assert(z != NULL);
    for (int j = 0; j < model->n_classes; j++) {
        const double *theta_j = model->beta + (model->n_features + 1) * j;
        *(z + j) = theta_j[n_features]; // bias
        for (int m = 0; m < model->n_features; m++) {
            *(z + j) += x[m] * theta_j[m];
        }
    }

    softmax(z, p, model->n_classes);
    free(z);
}

/**
 * @brief 随机梯度下降(SGD)算法实现，即基于单条数据更新模型参数
 * @param model: 线性模型
 * @param x: 某条样本数据的特征
 * @param y_true: 样本x对应的目标值, 位图表示的one-hot编码
 * @param lr: 学习率
 * 
 * @return 参数更新前的交叉熵损失
 */
double stochastic_gradient_descent(LogisticsRegression *model, 
    const double x[], const uint8_t y_true, double lr)
{
    double *p = malloc(sizeof(double) * model->n_classes);
    predict(model, x, p);
    
    double loss = cross_entrophy(y_true, p, model->n_classes);
    // printf("loss %f\n", loss);
    // 更新参数
    for (int k = 0; k < model->n_classes; k++) {
        double *theta_k = model->beta + (model->n_features + 1) * k;
        double err = p[k] - ((y_true >> k) & 0x01);
        for (int m = 0; m < model->n_features; m++) {
            *(theta_k + m) += - lr * err * x[m];
        }
        theta_k[n_features] += -lr * err;

    }
    return loss;
}

/**
 * @brief 通过随机梯度下降算法进行模型训练
 * @param model: 线性回归模型
 * @param x: 所有的数据特征
 * @param y: 样本类别对应的整数编码
 * @param n: 样本数量
 * @param epochs: 训练轮次
 * @param lr: 学习率
 * 
 * @return 训练结束时，模型的均方误差损失
 */

double train(LogisticsRegression *model, 
    const double x[][4], const uint8_t y[], int n, 
    int epochs, double lr)
{
    double loss_val = 0;
    for (int epoch = 0; epoch < epochs; epoch++) {
        loss_val = 0;
        for (int i = 0; i < n; i++) {
            
            loss_val += stochastic_gradient_descent(model, x[i], one_hot(y[i]),lr);
        }
        printf("epoch = %d, mse loss = %.4f\n", epoch + 1, loss_val / n);
    }
    
    return loss_val / n;
}


void save_model(const LogisticsRegression *model, const char *filename)
{
    FILE *file = fopen(filename, "wb");
    assert(file != NULL);
    fwrite(model, sizeof(LogisticsRegression) + sizeof(double) * model->paramCount, 1, file);
    fclose(file);
}


void load_model(LogisticsRegression **pmodel, const char *filename) 
{
    FILE *file = fopen(filename, "rb");
    assert(file != NULL);
    fseek(file, 0, SEEK_END);
    size_t nbytes = ftell(file);
    fseek(file, 0, SEEK_SET);

    *pmodel = malloc(nbytes);
    assert(*pmodel != NULL);

    fread(*pmodel, nbytes, 1, file);
    fclose(file);
}

LogisticsRegression *init_model(size_t n_features, size_t n_classes)
{
    LogisticsRegression *model = malloc(sizeof(LogisticsRegression) + sizeof(double) *(n_features + 1) * n_classes);
    assert(model != NULL);

    model->n_classes = n_classes;
    model->n_features = n_features;
    model->paramCount = (n_features + 1) * n_classes;

    for (int j = 0; j < model->paramCount; j++) {
        model->beta[j] = 1e-7;
    }
    return model;

}

int main(int argc, const char *argv[]) {
    assert(argc == 2);
    if (strcmp(argv[1], "train") == 0) {
        printf("train model\n");
        LogisticsRegression *pmodel = init_model(n_features, n_classes);
        double loss = train(pmodel, x_train, y_train, n_train_samples, 2000, 1e-3);
        printf("final loss = %.7f\n", loss);
        save_model(pmodel, "multi-cls-logistic_model.bin");
        free(pmodel);
    }

    if (strcmp(argv[1], "test") == 0) {
        printf("test model\n");
        LogisticsRegression *pmodel = NULL;
        load_model(&pmodel, "multi-cls-logistic_model.bin");
        double loss = 0;
        double threshold = 0.5;
        int count_same = 0;
        for (int i = 0; i < n_test_samples; i++) {
            double p[3];
            predict(pmodel, x_test[i], p);
            loss += cross_entrophy(y_test[i], p, n_classes);
            count_same += (argmax(p, n_classes) == y_test[i]);
            printf("第%d条测试数据, 预测类别:[%.2f %.2f %.2f]-> %d, 实际类别:%hhu\n", i+1, p[0], p[1], p[2], 
                argmax(p, n_classes), y_test[i]);
        }
        printf("BCE = %.4f 准确率(ACC) = %.2f%%\n", loss/ n_test_samples, 
            count_same/(n_test_samples + 0.0) * 100);
        free(pmodel);
    }

    return 0;
}

