//
// Created by hamlet on 25-2-7.
//

#include "gpt2.h"
#include <fstream>
#include <iostream>
#include <format>
#include <cstring>
#include <cassert>
#include <ranges>
#include <algorithm>

#include "function.h"

std::pair<float*, size_t> set_pointer(const size_t* sizes, const int num, float*** ptrs) {
    // count the number of parameters
    size_t size = std::ranges::fold_left(sizes, sizes + num, 0, std::plus());
    // malloc all parameters all at once
    auto* mem = new float[size];
    // assign all the tensors
    float* it = mem;
    for (size_t i = 0; i < num; i++) {
        *ptrs[i] = it;
        it += sizes[i];
    }
    return {mem, size};
}

void ParameterTensors::fill_in_sizes(size_t* sizes, const GPT2Config& config) {
    const size_t Vp = config.padded_vocab_size;
    const size_t C = config.channels;
    const size_t maxT = config.max_seq_len;
    const size_t L = config.num_layers;
    sizes[0] = Vp * C;           // wte
    sizes[1] = maxT * C;         // wpe
    sizes[2] = L * C;            // ln1w
    sizes[3] = L * C;            // ln1b
    sizes[4] = L * (3 * C) * C;  // qkvw
    sizes[5] = L * (3 * C);      // qkvb
    sizes[6] = L * C * C;        // attprojw
    sizes[7] = L * C;            // attprojb
    sizes[8] = L * C;            // ln2w
    sizes[9] = L * C;            // ln2b
    sizes[10] = L * (4 * C) * C; // fcw
    sizes[11] = L * (4 * C);     // fcb
    sizes[12] = L * C * (4 * C); // fcprojw
    sizes[13] = L * C;           // fcprojb
    sizes[14] = C;               // lnfw
    sizes[15] = C;               // lnfb
}

ParameterTensors::ParameterTensors(const GPT2Config& config) {
    static size_t sizes[NUM_TENSORS]; // 参数大小
    // allocate space for all the parameters and read them in
    fill_in_sizes(sizes, config);
    // assign all the tensors
    static float** ptrs[] = {
        &this->wte, &this->wpe, &this->ln1w, &this->ln1b, &this->qkvw, &this->qkvb,
        &this->attprojw, &this->attprojb, &this->ln2w, &this->ln2b, &this->fcw, &this->fcb,
        &this->fcprojw, &this->fcprojb, &this->lnfw, &this->lnfb
    };
    auto [mem, size] = set_pointer(sizes, NUM_TENSORS, ptrs);
    this->memory = mem;
    this->size = size;
}

ParameterTensors::~ParameterTensors() {
    delete[] this->memory;
}

void ActivationTensors::fill_in_sizes(size_t* sizes, const GPT2Config& config, const int B, const int T) {
    const size_t C = config.channels;
    const size_t NH = config.num_heads;
    const size_t L = config.num_layers;
    const size_t Vp = config.padded_vocab_size;
    sizes[0] = B * T * C;          // encoded
    sizes[1] = L * B * T * C;      // ln1
    sizes[2] = L * B * T;          // ln1_mean
    sizes[3] = L * B * T;          // ln1_rstd
    sizes[4] = L * B * T * 3 * C;  // qkv
    sizes[5] = L * B * T * C;      // atty
    sizes[6] = L * B * NH * T * T; // preatt
    sizes[7] = L * B * NH * T * T; // att
    sizes[8] = L * B * T * C;      // attproj
    sizes[9] = L * B * T * C;      // residual2
    sizes[10] = L * B * T * C;     // ln2
    sizes[11] = L * B * T;         // ln2_mean
    sizes[12] = L * B * T;         // ln2_rstd
    sizes[13] = L * B * T * 4 * C; // fch
    sizes[14] = L * B * T * 4 * C; // fch_gelu
    sizes[15] = L * B * T * C;     // fcproj
    sizes[16] = L * B * T * C;     // residual3
    sizes[17] = B * T * C;         // lnf
    sizes[18] = B * T;             // lnf_mean
    sizes[19] = B * T;             // lnf_rstd
    sizes[20] = B * T * Vp;        // logits
    sizes[21] = B * T * Vp;        // probs
    sizes[22] = B * T;             // losses
}


ActivationTensors::ActivationTensors(const GPT2Config& config, const int B, const int T) {
    static size_t sizes[NUM_TENSORS]; // 激活大小
    // allocate space for all the parameters and read them in
    fill_in_sizes(sizes, config, B, T);
    // assign all the tensors
    static float** ptrs[] = {
        &this->encoded, &this->ln1, &this->ln1_mean, &this->ln1_rstd, &this->qkv, &this->atty,
        &this->preatt, &this->att, &this->attproj, &this->residual2, &this->ln2, &this->ln2_mean,
        &this->ln2_rstd, &this->fch, &this->fch_gelu, &this->fcproj, &this->residual3, &this->lnf,
        &this->lnf_mean, &this->lnf_rstd, &this->logits, &this->probs, &this->losses
    };
    auto [mem, size] = set_pointer(sizes, NUM_TENSORS, ptrs);
    this->memory = mem;
}

ActivationTensors::~ActivationTensors() {
    delete[] this->memory;
}

GPT2::GPT2(const char* checkpoint_path) {
    // read in model from a checkpoint file
    std::ifstream model_file{checkpoint_path};
    if (!model_file.is_open()) {
        std::cerr << std::format("Error opening model file path: {}", checkpoint_path) << std::endl;
        std::terminate();
    }
    int model_header[256];
    model_file.read(reinterpret_cast<char*>(model_header), sizeof(model_header));
    constexpr int EXPECTED_MAGIC = 20240326, EXPECTED_VERSION = 3;
    if (model_header[0] != EXPECTED_MAGIC) {
        std::cerr << std::format("Bad magic model file: {}, expected {}", model_header[0], EXPECTED_MAGIC) << std::endl;
        std::terminate();
    }
    if (model_header[1] != EXPECTED_VERSION) {
        std::cerr << std::format("Bad version in model file (version = {}, expected {})", model_header[1],
                                 EXPECTED_VERSION) << std::endl;
        std::terminate();
    }

    // read in hyperparameters
    this->config.max_seq_len = model_header[2];
    this->config.vocab_size = model_header[3];
    this->config.num_layers = model_header[4];
    this->config.num_heads = model_header[5];
    this->config.channels = model_header[6];
    this->config.padded_vocab_size = model_header[7];

    // read in all the parameters from file
    this->params = new ParameterTensors{this->config};
    model_file.read(reinterpret_cast<char*>(this->params->memory), sizeof(float) * this->params->size);
    model_file.close();

    // other inits
    this->acts = nullptr;
    this->inputs = nullptr;
    this->batch_size = 0;
    this->seq_len = 0;
}

GPT2::~GPT2() {
    delete this->params;
    delete this->acts;
    delete[] this->inputs;
}

void GPT2::forward(const int* inputs, const int B, const int T) {
    // ensure the model was initialized or error out
    if (this->params == nullptr) {
        std::cerr << "Error: model was not initialized properly." << std::endl;
        std::terminate();
    }

    // convenience parameters
    const int V = this->config.vocab_size;
    const int Vp = this->config.padded_vocab_size;
    const int L = this->config.num_layers;
    const int NH = this->config.num_heads;
    const int C = this->config.channels;

    // validate inputs, all indices must be in the range [0, V)
    for (int i = 0; i < B * T; i++) {
        assert(0 <= inputs[i] && inputs[i] < V);
    }

    // allocate space for all the activations if needed (done here, lazily)
    if (this->acts == nullptr || B != this->batch_size || T != this->seq_len) {
        // first delete the old memory if it exists
        delete this->acts;
        delete[] this->inputs;
        // record the current B,T as well
        this->batch_size = B;
        this->seq_len = T;
        // and now allocate the space
        this->acts = new ActivationTensors{this->config, B, T};
        // also create memory for caching inputs and targets
        this->inputs = new int[B * T];
    }

    // cache the inputs/targets
    std::memcpy(this->inputs, inputs, B * T * sizeof(int));

    // forward pass
    const auto& params = *this->params; // for brevity
    const auto& acts = *this->acts;
    float* residual;
    encoder_forward(acts.encoded, inputs, params.wte, params.wpe, B, T, C); // encoding goes into residual[0]
    for (int l = 0; l < L; l++) {
        residual = l == 0 ? acts.encoded : acts.residual3 + (l - 1) * B * T * C;

        // get the pointers of the weights for this layer
        const float* l_ln1w = params.ln1w + l * C;
        const float* l_ln1b = params.ln1b + l * C;
        const float* l_qkvw = params.qkvw + l * 3 * C * C;
        const float* l_qkvb = params.qkvb + l * 3 * C;
        const float* l_attprojw = params.attprojw + l * C * C;
        const float* l_attprojb = params.attprojb + l * C;
        const float* l_ln2w = params.ln2w + l * C;
        const float* l_ln2b = params.ln2b + l * C;
        const float* l_fcw = params.fcw + l * 4 * C * C;
        const float* l_fcb = params.fcb + l * 4 * C;
        const float* l_fcprojw = params.fcprojw + l * C * 4 * C;
        const float* l_fcprojb = params.fcprojb + l * C;

        // get the pointers of the activations for this layer
        float* l_ln1 = acts.ln1 + l * B * T * C;
        float* l_ln1_mean = acts.ln1_mean + l * B * T;
        float* l_ln1_rstd = acts.ln1_rstd + l * B * T;
        float* l_qkv = acts.qkv + l * B * T * 3 * C;
        float* l_atty = acts.atty + l * B * T * C;
        float* l_preatt = acts.preatt + l * B * NH * T * T;
        float* l_att = acts.att + l * B * NH * T * T;
        float* l_attproj = acts.attproj + l * B * T * C;
        float* l_residual2 = acts.residual2 + l * B * T * C;
        float* l_ln2 = acts.ln2 + l * B * T * C;
        float* l_ln2_mean = acts.ln2_mean + l * B * T;
        float* l_ln2_rstd = acts.ln2_rstd + l * B * T;
        float* l_fch = acts.fch + l * B * T * 4 * C;
        float* l_fch_gelu = acts.fch_gelu + l * B * T * 4 * C;
        float* l_fcproj = acts.fcproj + l * B * T * C;
        float* l_residual3 = acts.residual3 + l * B * T * C;

        // now do the forward pass
        layernorm_forward(l_ln1, l_ln1_mean, l_ln1_rstd, residual, l_ln1w, l_ln1b, B, T, C);
        matmul_forward(l_qkv, l_ln1, l_qkvw, l_qkvb, B, T, C, 3 * C);
        attention_forward(l_atty, l_preatt, l_att, l_qkv, B, T, C, NH);
        matmul_forward(l_attproj, l_atty, l_attprojw, l_attprojb, B, T, C, C);
        residual_forward(l_residual2, residual, l_attproj, B * T * C);
        layernorm_forward(l_ln2, l_ln2_mean, l_ln2_rstd, l_residual2, l_ln2w, l_ln2b, B, T, C);
        matmul_forward(l_fch, l_ln2, l_fcw, l_fcb, B, T, C, 4 * C);
        gelu_forward(l_fch_gelu, l_fch, B * T * 4 * C);
        matmul_forward(l_fcproj, l_fch_gelu, l_fcprojw, l_fcprojb, B, T, 4 * C, C);
        residual_forward(l_residual3, l_residual2, l_fcproj, B * T * C);
    }
    residual = acts.residual3 + (L - 1) * B * T * C; // last residual is in residual3
    layernorm_forward(acts.lnf, acts.lnf_mean, acts.lnf_rstd, residual, params.lnfw, params.lnfb, B, T, C);
    matmul_forward(acts.logits, acts.lnf, params.wte, nullptr, B, T, C, Vp);
    softmax_forward(acts.probs, acts.logits, B, T, V, Vp);
}

int GPT2::generate_one(const int* inputs, const int t) {
    // generate next token given the previous tokens
    // inputs is a list of tokens of length t
    this->forward(inputs, 1, up8(t));
    const float* probs = this->acts->probs + (t - 1) * this->config.padded_vocab_size;
    return sampler(probs, this->config.vocab_size);
}

Sampler::Sampler(): gen(rd()) {
}

int Sampler::operator()(const float* probs, const int size) {
    dist.param({probs, probs + size});
    return dist(gen);
}
