#pragma once

#include <cstdint>   // int32_t, uint8_t
#include <omp.h>     // OpenMP并行
#include <vector>
#include <cmath>
#include <algorithm>
#include "kv.pb.h"
#include "doca_brpc_unit.h"

class CpuOperatorUnit {
public:
    static CpuOperatorUnit& Instance() {
        static CpuOperatorUnit instance;
        return instance;
    }

    template<typename Tinput, typename Toutput, typename Tstruct>
    void fillnull(Tinput* input_data, Toutput* output_data, Tstruct* arg) {
        int Start_feature = arg->feature_start_id;
        int End_feature   = arg->feature_end_id;
        int Start_batch   = arg->batch_start_id;
        int End_batch     = arg->batch_end_id;

        int batch_size    = End_batch - Start_batch;
        int ele_feature   = arg->element_per_feature;
        int type_input_size  = sizeof(Tinput);
        int type_output_size = sizeof(Toutput);

        int feature_stride = batch_size * ele_feature;

        for (int i = Start_feature; i < End_feature; ++i) {
            int feature_base_idx = i * feature_stride;

            #pragma omp parallel for num_threads(num_thread)
            for (int j = Start_batch; j < End_batch; ++j) {
                int batch_base_idx = feature_base_idx + j * ele_feature;

                for (int k = 0; k < ele_feature; ++k) {
                    int idx = batch_base_idx + k;

                    Tinput* input_ptr = reinterpret_cast<Tinput*>(
                        reinterpret_cast<uint8_t*>(input_data) + idx * type_input_size
                    );
                    Toutput* output_ptr = reinterpret_cast<Toutput*>(
                        reinterpret_cast<uint8_t*>(output_data) + idx * type_output_size
                    );

                    Tinput x = *input_ptr;
                    if (x == 1) x = 2;
                    // std::cout << (*input_ptr) << ' ';
                    *output_ptr = static_cast<Toutput>(x);
                }
            }
        }
    }

    template<typename Tinput, typename Toutput, typename Tstruct>
    void sigrid_hash(Tinput* input_data, Toutput* output_data, Tstruct* arg) {
        int Start_feature = arg->feature_start_id;
        int End_feature   = arg->feature_end_id;
        int Start_batch   = arg->batch_start_id;
        int End_batch     = arg->batch_end_id;
        int batch_size    = End_batch - Start_batch;
        int ele_feature   = arg->element_per_feature;
        int type_input_size  = sizeof(Tinput);
        int type_output_size = sizeof(Toutput);

        int salt = SIGRID_ARG1;
        int num_embeddings = SIGRID_ARG2;
        (void)salt;
        (void)num_embeddings;

        int feature_stride = batch_size * ele_feature;

        for (int i = Start_feature; i < End_feature; ++i) {
            int feature_base_idx = i * feature_stride;

            #pragma omp parallel for num_threads(num_thread)
            for (int j = Start_batch; j < End_batch; ++j) {
                int batch_base_idx = feature_base_idx + j * ele_feature;

                for (int k = 0; k < ele_feature; ++k) {
                    int idx = batch_base_idx + k;

                    Tinput* input_ptr = reinterpret_cast<Tinput*>(
                        reinterpret_cast<uint8_t*>(input_data) + idx * type_input_size
                    );
                    Toutput* output_ptr = reinterpret_cast<Toutput*>(
                        reinterpret_cast<uint8_t*>(output_data) + idx * type_output_size
                    );

                    Tinput x = *input_ptr;
                    uint64_t value = (static_cast<uint64_t>(x) ^ static_cast<uint64_t>(salt));
                    value = (value * 2654435761U) ^ (value >> 16);
                    value = value % static_cast<uint64_t>(num_embeddings);

                    // just for test
                    // value = static_cast<uint64_t>(x) % static_cast<uint64_t>(num_embeddings);
                    // for (int o = 0; o < 100000000; ++o) value = value * value;

                    *output_ptr = static_cast<Toutput>(value);
                }
            }
        }
    }

    template<typename Tinput, typename Toutput, typename Tstruct>
    void clamp_list(Tinput* input_data, Toutput* output_data, Tstruct* arg) {
        int Start_feature = arg->feature_start_id;
        int End_feature   = arg->feature_end_id;
        int Start_batch   = arg->batch_start_id;
        int End_batch     = arg->batch_end_id;
        int batch_size    = End_batch - Start_batch;
        int ele_feature   = arg->element_per_feature;
        int type_input_size  = sizeof(Tinput);
        int type_output_size = sizeof(Toutput);

        int feature_stride = batch_size * ele_feature;

        for (int i = Start_feature; i < End_feature; ++i) {
            int feature_base_idx = i * feature_stride;

            #pragma omp parallel for num_threads(num_thread)
            for (int j = Start_batch; j < End_batch; ++j) {
                int batch_base_idx = feature_base_idx + j * ele_feature;

                for (int k = 0; k < ele_feature; ++k) {
                    int idx = batch_base_idx + k;

                    Tinput* input_ptr = reinterpret_cast<Tinput*>(
                        reinterpret_cast<uint8_t*>(input_data) + idx * type_input_size
                    );
                    Toutput* output_ptr = reinterpret_cast<Toutput*>(
                        reinterpret_cast<uint8_t*>(output_data) + idx * type_output_size
                    );

                    Tinput x = *input_ptr;
                    if (x < CLAMP_ARG1) x = CLAMP_ARG1;
                    else if (x > CLAMP_ARG2) x = CLAMP_ARG2;

                    *output_ptr = static_cast<Toutput>(x);
                }
            }
        }
    }

    template<typename Tinput, typename Toutput, typename Tstruct>
    void ngram_list(Tinput* input_data, Toutput* output_data, Tstruct* arg) {
        int Start_feature = arg->feature_start_id;
        int End_feature   = arg->feature_end_id;
        int Start_batch   = arg->batch_start_id;
        int End_batch     = arg->batch_end_id;
        int batch_size    = End_batch - Start_batch;
        int ele_feature   = arg->element_per_feature;
        int type_input_size  = sizeof(Tinput);
        int type_output_size = sizeof(Toutput);

        int32_t actual_n_of_gram = (NGRAM_ARG > ele_feature) ? ele_feature : NGRAM_ARG;
        int32_t final_length = (ele_feature - actual_n_of_gram + 1) * actual_n_of_gram;

        int input_feature_stride  = batch_size * ele_feature;
        int output_feature_stride = batch_size * final_length;

        for (int i = Start_feature; i < End_feature; ++i) {
            int input_feature_base  = i * input_feature_stride;
            int output_feature_base = i * output_feature_stride;

            #pragma omp parallel for num_threads(num_thread)
            for (int j = Start_batch; j < End_batch; ++j) {
                int input_batch_base  = input_feature_base + j * ele_feature;
                int output_batch_base = output_feature_base + j * final_length;

                int cnt_output = 0;
                for (int k = 0; k < ele_feature - actual_n_of_gram + 1; ++k) {
                    for (int l = 0; l < actual_n_of_gram; ++l) {
                        int input_idx  = input_batch_base + (k + l);
                        int output_idx = output_batch_base + cnt_output;

                        Tinput* input_ptr = reinterpret_cast<Tinput*>(
                            reinterpret_cast<uint8_t*>(input_data) + input_idx * type_input_size
                        );
                        Toutput* output_ptr = reinterpret_cast<Toutput*>(
                            reinterpret_cast<uint8_t*>(output_data) + output_idx * type_output_size
                        );

                        *output_ptr = static_cast<Toutput>(*input_ptr);
                        ++cnt_output;
                    }
                }
            }
        }
    }

    template<typename Tinput, typename Toutput, typename Tstruct>
    void firstx(Tinput* input_data, Toutput* output_data, Tstruct* arg) {
        int Start_feature = arg->feature_start_id;
        int End_feature   = arg->feature_end_id;
        int Start_batch   = arg->batch_start_id;
        int End_batch     = arg->batch_end_id;
        int batch_size    = End_batch - Start_batch;
        int ele_feature   = arg->element_per_feature;
        int type_input_size  = sizeof(Tinput);
        int type_output_size = sizeof(Toutput);

        int32_t firstx_len = (FIRSTX_ARG > ele_feature) ? ele_feature : (int32_t)FIRSTX_ARG;

        int input_feature_stride  = batch_size * ele_feature;
        int output_feature_stride = batch_size * firstx_len;

        for (int i = Start_feature; i < End_feature; ++i) {
            int input_feature_base  = i * input_feature_stride;
            int output_feature_base = i * output_feature_stride;

            #pragma omp parallel for num_threads(num_thread)
            for (int j = Start_batch; j < End_batch; ++j) {
                int input_batch_base  = input_feature_base + j * ele_feature;
                int output_batch_base = output_feature_base + j * firstx_len;

                for (int k = 0; k < firstx_len; ++k) {
                    int input_idx  = input_batch_base + k;
                    int output_idx = output_batch_base + k;

                    Tinput* input_ptr = reinterpret_cast<Tinput*>(
                        reinterpret_cast<uint8_t*>(input_data) + input_idx * type_input_size
                    );
                    Toutput* output_ptr = reinterpret_cast<Toutput*>(
                        reinterpret_cast<uint8_t*>(output_data) + output_idx * type_output_size
                    );

                    // std::cout << (*input_ptr) << ' ';

                    *output_ptr = static_cast<Toutput>(*input_ptr);
                } //std::cout << std::endl;

                // for (int k = 0; k < ele_feature; ++k) {
                //     int input_idx  = input_batch_base + k;

                //     Tinput* input_ptr = reinterpret_cast<Tinput*>(
                //         reinterpret_cast<uint8_t*>(input_data) + input_idx * type_input_size
                //     );


                //     std::cout << (*input_ptr) << ' ';
                // } std::cout << std::endl;
            }
        }
    }

    template<typename TInput, typename TOutput, typename Tstruct>
    void onehot(TInput* input_data, TOutput* output_data, Tstruct* arg) {
        int Start_feature = arg->feature_start_id;
        int End_feature   = arg->feature_end_id;
        int Start_batch   = arg->batch_start_id;
        int End_batch     = arg->batch_end_id;
        int batch_size    = End_batch - Start_batch;
        int ele_feature   = arg->element_per_feature;
        int type_input_size  = sizeof(TInput);
        int type_output_size = sizeof(TOutput);

        int input_feature_stride  = batch_size * ele_feature;
        int output_feature_stride = batch_size * ele_feature * ONEHOT_CLASS;

        TInput lower = (TInput)ONEHOT_LOW;
        TInput upper = (TInput)ONEHOT_HIGH;
        int num_class = ONEHOT_CLASS;

        if (num_class <= 0) {
            throw std::invalid_argument("num_class must be greater than 0.");
        }
        if (upper < lower) {
            throw std::invalid_argument("upper bound should be larger than lower bound.");
        }

        for (int i = Start_feature; i < End_feature; ++i) {
            int input_feature_base  = i * input_feature_stride;
            int output_feature_base = i * output_feature_stride;

            #pragma omp parallel for num_threads(num_thread)
            for (int j = Start_batch; j < End_batch; ++j) {
                int input_batch_base  = input_feature_base + j * ele_feature;
                int output_batch_base = output_feature_base + j * ele_feature * num_class;

                for (int k = 0; k < ele_feature; ++k) {
                    int input_idx  = input_batch_base + k;
                    int output_idx = output_batch_base + k * num_class;

                    TInput* input_ptr = reinterpret_cast<TInput*>(
                        reinterpret_cast<uint8_t*>(input_data) + input_idx * type_input_size
                    );
                    TOutput* output_ptr = reinterpret_cast<TOutput*>(
                        reinterpret_cast<uint8_t*>(output_data) + output_idx * type_output_size
                    );

                    TInput val = static_cast<TInput>(*input_ptr);
                    TInput step = (upper - lower) / (TInput)(num_class);
                    int32_t class_idx = (int)((val - lower) / step);

                    if (class_idx < 0 || class_idx >= num_class) {
                        class_idx = 0;
                    }

                    for (int c = 0; c < num_class; ++c) {
                        output_ptr[c] = (c == class_idx) ? static_cast<TOutput>(1) : static_cast<TOutput>(0);
                    }
                }
            }
        }
    }

    template<typename TInput, typename TOutput, typename Tstruct>
    void logit(TInput* input_data, TOutput* output_data, Tstruct* arg) {
        int Start_feature = arg->feature_start_id;
        int End_feature   = arg->feature_end_id;
        int Start_batch   = arg->batch_start_id;
        int End_batch     = arg->batch_end_id;
        int batch_size    = End_batch - Start_batch;
        int ele_feature   = arg->element_per_feature;
        int type_input_size  = sizeof(TInput);
        int type_output_size = sizeof(TOutput);

        int feature_stride = batch_size * ele_feature;

        constexpr TInput one = TInput(1);
        TInput LOGIT_EPSILON = (TInput)(LOGIT_ARG);

        for (int i = Start_feature; i < End_feature; ++i) {
            int feature_base_idx = i * feature_stride;

            #pragma omp parallel for num_threads(num_thread)
            for (int j = Start_batch; j < End_batch; ++j) {
                int batch_base_idx = feature_base_idx + j * ele_feature;

                for (int k = 0; k < ele_feature; ++k) {
                    int idx = batch_base_idx + k;

                    TInput* input_ptr = reinterpret_cast<TInput*>(
                        reinterpret_cast<uint8_t*>(input_data) + idx * type_input_size
                    );
                    TOutput* output_ptr = reinterpret_cast<TOutput*>(
                        reinterpret_cast<uint8_t*>(output_data) + idx * type_output_size
                    );

                    TInput x = *input_ptr;

                    // Clamp to [epsilon, 1 - epsilon]
                    if (x < LOGIT_EPSILON) {
                        x = LOGIT_EPSILON;
                    } else if (x > one - LOGIT_EPSILON) {
                        x = one - LOGIT_EPSILON;
                    }

                    // Compute logit
                    TInput p = x;
                    TInput logit_val = std::log(p / (one - p));

                    *output_ptr = static_cast<TOutput>(logit_val);
                }
            }
        }
    }

    template<typename TInput, typename TOutput, typename Tstruct>
    void bucketize(TInput* input_data, TOutput* output_data, Tstruct* arg) {
        int Start_feature = arg->feature_start_id;
        int End_feature   = arg->feature_end_id;
        int Start_batch   = arg->batch_start_id;
        int End_batch     = arg->batch_end_id;
        int batch_size    = End_batch - Start_batch;
        int ele_feature   = arg->element_per_feature;
        int type_input_size  = sizeof(TInput);
        int type_output_size = sizeof(TOutput);

        int feature_stride = batch_size * ele_feature;

        for (int i = Start_feature; i < End_feature; ++i) {
            int feature_base_idx = i * feature_stride;

            #pragma omp parallel for num_threads(num_thread)
            for (int j = Start_batch; j < End_batch; ++j) {
                int batch_base_idx = feature_base_idx + j * ele_feature;

                for (int k = 0; k < ele_feature; ++k) {
                    int idx = batch_base_idx + k;

                    TInput* input_ptr = reinterpret_cast<TInput*>(
                        reinterpret_cast<uint8_t*>(input_data) + idx * type_input_size
                    );
                    TOutput* output_ptr = reinterpret_cast<TOutput*>(
                        reinterpret_cast<uint8_t*>(output_data) + idx * type_output_size
                    );

                    TInput value = *input_ptr;

                    // 查找 bucket index
                    auto it = std::lower_bound(bucket_vec.begin(), bucket_vec.end(), value);
                    int bucket_idx = static_cast<int>(std::distance(bucket_vec.begin(), it));

                    *output_ptr = static_cast<TOutput>(bucket_idx);
                }
            }
        }
    }

    template<typename TInput, typename TOutput, typename Tstruct>
    void boxcox(TInput* input_data, TOutput* output_data, Tstruct* arg) {
        int Start_feature = arg->feature_start_id;
        int End_feature   = arg->feature_end_id;
        int Start_batch   = arg->batch_start_id;
        int End_batch     = arg->batch_end_id;
        int batch_size    = End_batch - Start_batch;
        int ele_feature   = arg->element_per_feature;
        int type_input_size  = sizeof(TInput);
        int type_output_size = sizeof(TOutput);

        int feature_stride = batch_size * ele_feature;

        // 假设 BOXCOX_LAMBDA 是 TInput 类型或可以隐式转成 TInput
        TInput BOXCOX_LAMBDA = (TInput) (BOXCOX_ARG);

        for (int i = Start_feature; i < End_feature; ++i) {
            int feature_base_idx = i * feature_stride;

            #pragma omp parallel for num_threads(num_thread)
            for (int j = Start_batch; j < End_batch; ++j) {
                int batch_base_idx = feature_base_idx + j * ele_feature;

                for (int k = 0; k < ele_feature; ++k) {
                    int idx = batch_base_idx + k;

                    TInput* input_ptr = reinterpret_cast<TInput*>(
                        reinterpret_cast<uint8_t*>(input_data) + idx * type_input_size
                    );
                    TOutput* output_ptr = reinterpret_cast<TOutput*>(
                        reinterpret_cast<uint8_t*>(output_data) + idx * type_output_size
                    );

                    TInput val = *input_ptr;
                    const TInput zero = static_cast<TInput>(0);
                    const TInput one = static_cast<TInput>(1);

                    if (val > zero) {
                        if (BOXCOX_LAMBDA == zero) {
                            val = std::log(val);
                        } else {
                            val = (std::pow(val, BOXCOX_LAMBDA) - one) / BOXCOX_LAMBDA;
                        }
                    }

                    *output_ptr = static_cast<TOutput>(val);
                }
            }
        }
    }

    template<typename Tinput, typename Toutput, typename Tstruct>
    void cast_type(Tinput* input_data, Toutput* output_data, Tstruct* arg) {
        int Start_feature = arg->feature_start_id;
        int End_feature   = arg->feature_end_id;
        int Start_batch   = arg->batch_start_id;
        int End_batch     = arg->batch_end_id;
        int batch_size    = End_batch - Start_batch;
        int ele_feature   = arg->element_per_feature;
        int type_input_size  = sizeof(Tinput);
        int type_output_size = sizeof(Toutput);

        int feature_stride = batch_size * ele_feature;

        for (int i = Start_feature; i < End_feature; ++i) {
            int feature_base_idx = i * feature_stride;

            #pragma omp parallel for num_threads(num_thread)
            for (int j = Start_batch; j < End_batch; ++j) {
                int batch_base_idx = feature_base_idx + j * ele_feature;

                for (int k = 0; k < ele_feature; ++k) {
                    int idx = batch_base_idx + k;

                    Tinput* input_ptr = reinterpret_cast<Tinput*>(
                        reinterpret_cast<uint8_t*>(input_data) + idx * type_input_size
                    );
                    Toutput* output_ptr = reinterpret_cast<Toutput*>(
                        reinterpret_cast<uint8_t*>(output_data) + idx * type_output_size
                    );

                    Tinput x = *input_ptr;
                    *output_ptr = static_cast<Toutput>(x);
                }
            }
        }
    }
    
    /* 增加一个网络算子 */
    template<typename Tinput, typename Toutput, typename Tstruct>
    void embedding_fetch(Tinput * input_data, Toutput* output_data, Tstruct * arg) {

        int Start_feature = arg->feature_start_id;
        int End_feature   = arg->feature_end_id;
        int Start_batch   = arg->batch_start_id;
        int End_batch     = arg->batch_end_id;
        int ele_feature   = arg->element_per_feature;

        BrpcKvstore & brpckvstore = BrpcKvstore::getInstance();
        std::string name = arg->name;
        size_t pos = name.find('#');
        if (pos != std::string::npos) {
            name = name.substr(0, pos);
        }

        int total_key_num = 
            (End_feature - Start_feature) * (End_batch - Start_batch) * ele_feature;
        brpckvstore.fetch_embedding_async(name, 500, input_data, output_data, total_key_num, EMBEDDING_DIM);
    }

    template<typename Tstruct1, typename Tstruct2>
    void cpu_operator_execute(Tstruct1 ExecuteNode_info, Tstruct2 MemNode_info) {
        switch (ExecuteNode_info->op_type) {
            case LOGIT:
                this->logit((double *) ExecuteNode_info->input_addr, (double *) ExecuteNode_info->output_addr, MemNode_info);
                break;
            case BUCKETIZE:
                this->bucketize( (double *) ExecuteNode_info->input_addr, (int64_t *) ExecuteNode_info->output_addr, MemNode_info);
                break;
            case SIGRID_HASH:
                this->sigrid_hash( (uint64_t *) ExecuteNode_info->input_addr, (uint64_t *) ExecuteNode_info->output_addr, MemNode_info);
                break;
            case CLAMP_LIST:
                this->clamp_list( (int64_t *) ExecuteNode_info->input_addr, (int64_t *) ExecuteNode_info->output_addr, MemNode_info);
                break;
            case BOXCOX:
                this->boxcox((double *) ExecuteNode_info->input_addr, (double *) ExecuteNode_info->output_addr, MemNode_info);
                break;
            case ONEHOT:
                this->onehot( (double *) ExecuteNode_info->input_addr, (int64_t *) ExecuteNode_info->output_addr, MemNode_info);
                break;
            case NGRAM:
                this->ngram_list( (int64_t *) ExecuteNode_info->input_addr, (int64_t *) ExecuteNode_info->output_addr, MemNode_info);
                break;
            case FIRSTX:
                this->firstx( (int64_t *) ExecuteNode_info->input_addr, (int64_t *) ExecuteNode_info->output_addr, MemNode_info);
                break;
            case MAPID:
                break;
            case FILLNULL:
                this->fillnull((int64_t *) ExecuteNode_info->input_addr, (int64_t *) ExecuteNode_info->output_addr, MemNode_info);
                break;
            case CAST:
                this->cast_type( (int64_t *) ExecuteNode_info->input_addr, (int64_t *) ExecuteNode_info->output_addr, MemNode_info);
                break;
            case EMBEDDING_FETCH:
                this->embedding_fetch( (int64_t *) ExecuteNode_info->input_addr, (float *) ExecuteNode_info->output_addr, MemNode_info);
            default:
                break;
        }
    }

    void set_thread(int thread_num) {
        this->num_thread = thread_num;
        if (this->num_thread > 16) this->num_thread = 16;
        bucket_vec = std::vector<double>{0.5, 1.0, 1.5};
    }

public:
    int num_thread;
    std::vector<double> bucket_vec;
};