/*
 * SPDX-FileCopyrightText: 2025 kvcache-ai
 * SPDX-FileCopyrightText: 2025 Qingcheng.AI
 *
 * SPDX-License-Identifier: Apache-2.0
 */

/**
 * This file has adaption of open-source code from the following sources:
 * - https://github.com/kvcache-ai/ktransformers, licensed under Apache 2.0.
 */

#ifndef CPUINFER_OPERATOR_LINEAR_H
#define CPUINFER_OPERATOR_LINEAR_H

#include <cmath>
#include <cstdio>
#include <functional>
#include <mutex>
#include <vector>

#include "conversion.h"
#include "cpuinfer.h"
#include "llama.cpp/ggml-impl.h"
#include "llama.cpp/ggml-quants.h"
#include "llama.cpp/ggml.h"
#include "llamafile/sgemm.h"
#include "shared_mem_buffer.h"

struct LinearConfig {
    int input_size;
    int output_size;
    int stride;
    int group_max_len;
    void *proj;
    ggml_type proj_type;
    ggml_type hidden_type;

    LinearConfig() {}

    LinearConfig(int input_size, int output_size, int stride, int group_max_len,
                 void *proj, ggml_type proj_type, ggml_type hidden_type)
        : input_size(input_size), output_size(output_size), stride(stride),
          group_max_len(group_max_len), proj(proj), proj_type(proj_type),
          hidden_type(hidden_type) {}
};

class Linear {
  public:
    Linear(LinearConfig);
    ~Linear();
    void warm_up(CPUInfer *CPUInfer);
    void forward_many(int qlen, const void *input, void *output,
                      CPUInfer *CPUInfer);
    void forward(int qlen, const void *input, void *output, CPUInfer *CPUInfer);

  private:
    LinearConfig config_;
    void *proj_; // [output_size * input_size ( /32 if quantized)]

    float *input_fp32_; // [group_max_len * input_size]
    uint8_t *
        proj_input_; // [group_max_len * input_size *
                     // ggml_type_size(ggml_internal_get_type_traits(proj_type).vec_dot_type)
                     // /
                     // ggml_blck_size(ggml_internal_get_type_traits(proj_type).vec_dot_type)]
    float *proj_output_; // [group_max_len * output_size]
};

#endif