#include "DeepCMP_RT.h"
#include "Preprocess_CMP_RT.h"

// 四维张量容器，优化内存布局
struct Tensor4D {
    int batch;    // 批次维度（固定为1）
    int height;   // 频率轴（129）
    int width;    // 时间轴（16）
    int channel;  // 通道维度（固定为1）
    std::vector<float> data; // 数据存储（行优先顺序）

    // 预分配内存
    void reserve_space(int total_batches) {
        data.reserve(batch * height * width * channel);
    }
};

// 构造函数
DeepCMP::DeepCMP()
{
    engine_ = CreateRKNNEngine();
    input_tensor_.data = nullptr;
}

// 析构函数
DeepCMP::~DeepCMP()
{
    if (input_tensor_.data != nullptr)
    {
        free(input_tensor_.data);
        input_tensor_.data = nullptr;
    }
    for (auto &tensor : output_tensors_)
    {
        free(tensor.data);
        tensor.data = nullptr;
    }
}


// 加载模型，获取输入输出属性
nn_error_e DeepCMP::LoadModel(const char *model_path)
{

    auto ret = engine_->LoadModelFile(model_path);
    if (ret != NN_SUCCESS)
    {
        NN_LOG_ERROR("yolo load model file failed");
        return ret;
    }
    // get input tensor
    auto input_shapes = engine_->GetInputShapes();

    // check number of input and n_dims
    if (input_shapes.size() != 1)
    {
        NN_LOG_ERROR("yolo input tensor number is not 1, but %ld", input_shapes.size());
        return NN_RKNN_INPUT_ATTR_ERROR;
    }
    nn_tensor_attr_to_cvimg_input_data(input_shapes[0], input_tensor_);
    input_tensor_.attr.size = input_tensor_.attr.n_elems * nn_tensor_type_to_size(input_tensor_.attr.type); //nn_tensor_type_to_size(input_tensor_.attr.type)
    input_tensor_.data = malloc(input_tensor_.attr.size);

    auto output_shapes = engine_->GetOutputShapes();

    for (int i = 0; i < output_shapes.size(); i++)
    {
        tensor_data_s tensor;
        tensor.attr.n_elems = output_shapes[i].n_elems;
        tensor.attr.n_dims = output_shapes[i].n_dims;//output_shapes[i].n_dims
        for (int j = 0; j < output_shapes[i].n_dims; j++)
        {
            tensor.attr.dims[j] = output_shapes[i].dims[j];
        }
        // output tensor needs to be float32
        if (output_shapes[i].type != NN_TENSOR_FLOAT16)
        {
            NN_LOG_ERROR("yolo output tensor type is not FP16, but %d", output_shapes[i].type);
            return NN_RKNN_OUTPUT_ATTR_ERROR;
        }
        tensor.attr.type = output_shapes[i].type;
        tensor.attr.index = i;
        tensor.attr.layout = output_shapes[i].layout;
        tensor.attr.zp = output_shapes[i].zp;
        tensor.attr.scale = output_shapes[i].scale;
        tensor.attr.size = output_shapes[i].n_elems * nn_tensor_type_to_size(tensor.attr.type);
        // tensor.attr.size = output_shapes[i].n_elems;
        tensor.data = malloc(tensor.attr.size);
        output_tensors_.push_back(tensor);
        out_zps_.push_back(output_shapes[i].zp);
        out_scales_.push_back(output_shapes[i].scale);
    }
    return NN_SUCCESS;
}

// 运行模型
nn_error_e DeepCMP::Run(std::vector<float> &mono, std::vector<std::vector<float>> &mel_spectrogram,
                        std::vector<std::vector<float>> &mel_filter_bank,int &Env_Class)
{
    Preprocess(mono,mel_spectrogram,mel_filter_bank);
    Inference(mel_spectrogram,input_tensor_,Env_Class);               // 推理
    return NN_SUCCESS;
}

// 图像预处理
nn_error_e DeepCMP::Preprocess(std::vector<float> &mono, std::vector<std::vector<float>> &mel_spectrogram,
                               std::vector<std::vector<float>> &mel_filter_bank)
{
    // 将预处理后的结果放入input_tensor_中
    compute_Melstft(mono,mel_spectrogram,mel_filter_bank);
    return NN_SUCCESS;
}

// 推理
nn_error_e DeepCMP::Inference(std::vector<std::vector<float>> &mel_spectrogram, tensor_data_s &tensor,int &Env_Class)
{
    std::vector<tensor_data_s> inputs;
    // // 步骤1：准备输入数据
    // prepare_inputs(stft_result, inputs);
    
    const int total_frames = mel_spectrogram.size();
    const int freq_bins = mel_spectrogram[0].size();
    const int batch_size = 1;
    inputs.clear();
    // 1. 创建连续内存容器存储数据
    const int num_time = 251;    // 时间维度大小
    const int num_freq = 32;   // 频率维度大小
    std::vector<__fp16> Push(num_time * num_freq);  // 根据实际数据类型调整（如float/int）
    const int total_batches = total_frames / batch_size;

    tensor_data_s input;
    // 分配内存并填充数据（转置操作）
    input.data = new float[num_freq * num_time];
    float* dst = static_cast<float*>(input.data);
    int index = 0;
    for (int f = 0; f < num_freq; ++f) {     // 频率维度
        for (int t = 0; t < num_time; ++t) {  // 时间维度
            // 计算一维索引（行优先）
            // int index = t * num_freq + f;
            Push[index] = mel_spectrogram[t][f]; //result.magnitude[frame_start + t][f]
            index++;
        }
    }

    // 步骤 1：将 FP32 转换为 FP16
    std::vector<__fp16> fp16_data(num_time * num_freq);
    for (size_t i = 0; i < Push.size(); ++i) {
        fp16_data[i] = static_cast<__fp16>(Push[i]); // 显式转换为 FP16
    }

    // 步骤 2：计算正确的字节数
    size_t fp16_bytes = fp16_data.size() * sizeof(__fp16);

    // 步骤 3：拷贝数据到 tensor.data
    memcpy(tensor.data, fp16_data.data(), fp16_bytes);
    // memcpy(tensor.data, Push.data(), tensor.attr.size);
    inputs.push_back(tensor);
    inputs[0].attr.zp = 0;
    inputs[0].attr.scale = 1;
    output_tensors_[0].attr.layout = NN_TENSOR_NHWC;
    engine_->Run(inputs, output_tensors_, false);
    
    // 方法1：使用正确的类型和逻辑
    if (output_tensors_.empty() || output_tensors_[0].data == nullptr) {
        // 错误处理
        Env_Class = -1;
    }
    int Output_num = output_tensors_[0].attr.n_elems;
    __fp16* data_ptr = (__fp16*)output_tensors_[0].data;  // 预先转换指针类型
    __fp16 max_NUM = data_ptr[0];
    int max_index = 0;
    for (int index_num = 1; index_num < Output_num; index_num++) {
        if (data_ptr[index_num] > max_NUM) {
            max_NUM = data_ptr[index_num];
            max_index = index_num;
        }
    }
    Env_Class = max_index;  // 最终的最大值位置
    inputs.clear();
    return NN_SUCCESS;   
}