#include "task/retinaface.h"
#include "task/rknn_box_priors.h"

RetinafaceCustom::RetinafaceCustom()
{
    engine_ = CreateRKNNEngine();
    input_tensor_.data = nullptr;
    want_float_ = false;
    ready_ = false;
}

RetinafaceCustom::~RetinafaceCustom()
{
    // release input tensor and output tensor
    NN_LOG_DEBUG("release input tensor");
    if (input_tensor_.data != nullptr)
    {
        free(input_tensor_.data);
        input_tensor_.data = nullptr;
    }
    NN_LOG_DEBUG("release output tensor");
    for (auto &tensor : output_tensors_)
    {
        if (tensor.data != nullptr)
        {
            free(tensor.data);
            tensor.data = nullptr;
        }
    }
}

nn_error_e RetinafaceCustom::LoadModel(const char *model_path)
{
    auto ret = engine_->LoadModelFile(model_path);
    if (ret != NN_SUCCESS)
    {
        NN_LOG_ERROR("retinaface load model file failed");
        return ret;
    }
    // get input tensor
    auto input_shapes = engine_->GetInputShapes();
    // NN_LOG_INFO("retinaface input tensor number is %ld", input_shapes.size());

    // check number of input and n_dims
    if (input_shapes.size() != 1)
    {
        NN_LOG_ERROR("retinaface input tensor number is not 1, but %ld", input_shapes.size());
        return NN_RKNN_INPUT_ATTR_ERROR;
    }
    nn_tensor_attr_to_cvimg_input_data(input_shapes[0], input_tensor_);
    input_tensor_.data = malloc(input_tensor_.attr.size);

    auto output_shapes = engine_->GetOutputShapes();
 
    if (output_shapes[0].type == NN_TENSOR_FLOAT16)
    {
        want_float_ = true;
        NN_LOG_WARNING("retinaface output tensor type is float16, want type set to float32");
    }
    for (int i = 0; i < output_shapes.size(); i++)
    {
        tensor_data_s tensor;
        tensor.attr.n_elems = output_shapes[i].n_elems;
        tensor.attr.n_dims = output_shapes[i].n_dims;
        for (int j = 0; j < output_shapes[i].n_dims; j++)
        {
            tensor.attr.dims[j] = output_shapes[i].dims[j];
        }
        // output tensor needs to be float32
        tensor.attr.type = want_float_ ? NN_TENSOR_FLOAT : output_shapes[i].type;
        tensor.attr.index = 0;
        tensor.attr.size = output_shapes[i].n_elems * nn_tensor_type_to_size(tensor.attr.type);
        tensor.data = malloc(tensor.attr.size);
        output_tensors_.push_back(tensor);
        out_zps_.push_back(output_shapes[i].zp);
        out_scales_.push_back(output_shapes[i].scale);
    }

    ready_ = true;
    return NN_SUCCESS;
}

nn_error_e RetinafaceCustom::Preprocess(const cv::Mat &img, cv::Mat &image_letterbox)
{
    //比例
    float wh_ratio = (float)input_tensor_.attr.dims[2] / (float)input_tensor_.attr.dims[1];
    img_width = img.cols;
    img_height = img.rows;
    //BGR2RGB
    // printf("img size : %d, %d \n", img.rows, img.cols);
    letterbox(img, image_letterbox, letter_box, input_tensor_.attr.dims[2], input_tensor_.attr.dims[1]);
    // printf("image_letterbox size : %d, %d \n", image_letterbox.rows, image_letterbox.cols);
    cvimg2tensor(image_letterbox, input_tensor_.attr.dims[2], input_tensor_.attr.dims[1], input_tensor_);
    return NN_SUCCESS;
}


nn_error_e RetinafaceCustom::Inference()
{
    std::vector<tensor_data_s> inputs;
    inputs.push_back(input_tensor_);
    return engine_->Run(inputs, output_tensors_, want_float_);
}

static int filterValidResult(float *scores, float *loc, float *landms, const float boxPriors[][4], int model_in_h, int model_in_w,
                             int filter_indice[], float *props, float threshold, const int num_results) {
    int validCount = 0;
    const float VARIANCES[2] = {0.1, 0.2};
    // Scale them back to the input size.
    for (int i = 0; i < num_results; ++i) {
        float face_score = scores[i * 2 + 1];
        if (face_score > threshold) {
            filter_indice[validCount] = i;
            props[validCount] = face_score;
            //decode location to origin position
            float xcenter = loc[i * 4 + 0] * VARIANCES[0] * boxPriors[i][2] + boxPriors[i][0];
            float ycenter = loc[i * 4 + 1] * VARIANCES[0] * boxPriors[i][3] + boxPriors[i][1];
            float w = (float) expf(loc[i * 4 + 2] * VARIANCES[1] ) * boxPriors[i][2];
            float h = (float) expf(loc[i * 4 + 3] * VARIANCES[1]) * boxPriors[i][3];

            float xmin = xcenter - w * 0.5f;
            float ymin = ycenter - h * 0.5f;
            float xmax = xmin + w;
            float ymax = ymin + h;

            loc[i * 4 + 0] = xmin ;
            loc[i * 4 + 1] = ymin ;
            loc[i * 4 + 2] = xmax ;
            loc[i * 4 + 3] = ymax ;
            for (int j = 0; j < 5; ++j) {
                landms[i * 10 + 2 * j] = landms[i * 10 + 2 * j] * VARIANCES[0] * boxPriors[i][2] + boxPriors[i][0];
                landms[i * 10 + 2 * j + 1] = landms[i * 10 + 2 * j + 1] * VARIANCES[0] * boxPriors[i][3] + boxPriors[i][1];
            }
            ++validCount;
        }
    }

    return validCount;
}

static float CalculateOverlap(float xmin0, float ymin0, float xmax0, float ymax0, float xmin1, float ymin1, float xmax1, float ymax1) {
    float w = fmax(0.f, fmin(xmax0, xmax1) - fmax(xmin0, xmin1) + 1);
    float h = fmax(0.f, fmin(ymax0, ymax1) - fmax(ymin0, ymin1) + 1);
    float i = w * h;
    float u = (xmax0 - xmin0 + 1) * (ymax0 - ymin0 + 1) + (xmax1 - xmin1 + 1) * (ymax1 - ymin1 + 1) - i;
    return u <= 0.f ? 0.f : (i / u);
}

static int nms(int validCount, float *outputLocations, int order[], float threshold, int width, int height) {
    for (int i = 0; i < validCount; ++i) {
        if (order[i] == -1) {
            continue;
        }
        int n = order[i];
        for (int j = i + 1; j < validCount; ++j) {
            int m = order[j];
            if (m == -1) {
                continue;
            }
            float xmin0 = outputLocations[n * 4 + 0] * width;
            float ymin0 = outputLocations[n * 4 + 1] * height;
            float xmax0 = outputLocations[n * 4 + 2] * width;
            float ymax0 = outputLocations[n * 4 + 3] * height;

            float xmin1 = outputLocations[m * 4 + 0] * width;
            float ymin1 = outputLocations[m * 4 + 1] * height;
            float xmax1 = outputLocations[m * 4 + 2] * width;
            float ymax1 = outputLocations[m * 4 + 3] * height;

            float iou = CalculateOverlap(xmin0, ymin0, xmax0, ymax0, xmin1, ymin1, xmax1, ymax1);

            if (iou > threshold) {
                order[j] = -1;
            }
        }
    }
    return 0;
}

static int quick_sort_indice_inverse(float *input, int left, int right, int *indices) {
    float key;
    int key_index;
    int low = left;
    int high = right;
    if (left < right) {
        key_index = indices[left];
        key = input[left];
        while (low < high) {
            while (low < high && input[high] <= key) {
                high--;
            }
            input[low] = input[high];
            indices[low] = indices[high];
            while (low < high && input[low] >= key) {
                low++;
            }
            input[high] = input[low];
            indices[high] = indices[low];
        }
        input[low] = key;
        indices[low] = key_index;
        quick_sort_indice_inverse(input, left, low - 1, indices);
        quick_sort_indice_inverse(input, low + 1, right, indices);
    }
    return low;
}

static int clamp(int x, int min, int max) {
    if (x > max) return max;
    if (x < min) return min;
    return x;
}

nn_error_e RetinafaceCustom::Postprocess(const cv::Mat &img, retinaface_result *result)
{
    float *location = (float *)output_tensors_[0].data;
    float *scores = (float *)output_tensors_[1].data;
    float *landms = (float *)output_tensors_[2].data;

    const float (*prior_ptr)[4];
    int num_priors = 0;
    if (input_tensor_.attr.dims[2] == 320) {
        num_priors = 4200;//anchors box number
        prior_ptr = BOX_PRIORS_320;
    } else if(input_tensor_.attr.dims[2] == 640){
        num_priors = 16800;//anchors box number
        prior_ptr = BOX_PRIORS_640;
    }
    else
    {
        printf("model_shape error!!!\n");
        return NN_RKNN_RUNTIME_ERROR;
    }

    int filter_indices[num_priors];
    float props[num_priors];

    memset(filter_indices, 0, sizeof(int)*num_priors);
    memset(props, 0, sizeof(float)*num_priors);

    // printf("x_pad : %d, y_pad : %d scale : %f \n", letter_box.x_pad, letter_box.y_pad, letter_box.scale);
    // printf("model_in_w : %d, model_in_h : %d \n", input_tensor_.attr.dims[2], input_tensor_.attr.dims[1]);
    int validCount = filterValidResult(scores, location, landms, prior_ptr, input_tensor_.attr.dims[1], input_tensor_.attr.dims[2],
                                    filter_indices, props, BOX_THRESH, num_priors);

    quick_sort_indice_inverse(props, 0, validCount - 1, filter_indices);
    nms(validCount, location, filter_indices, NMS_THRESH, img_width, img_height);

    int last_count = 0;
    result->count = 0;
    for (int i = 0; i < validCount; ++i) {
        if (last_count >= 128) {
            printf("Warning: detected more than 128 faces, can not handle that");
            break;
        }
        if (filter_indices[i] == -1 || props[i] < VIS_THRESHOLD) {
            continue;
        }

        int n = filter_indices[i];

        float x1 = location[n * 4 + 0] * input_tensor_.attr.dims[2] - letter_box.x_pad * letter_box.scale;
        float y1 = location[n * 4 + 1] * input_tensor_.attr.dims[1] - letter_box.y_pad * letter_box.scale;
        float x2 = location[n * 4 + 2] * input_tensor_.attr.dims[2] - letter_box.x_pad * letter_box.scale;
        float y2 = location[n * 4 + 3] * input_tensor_.attr.dims[1] - letter_box.y_pad * letter_box.scale;
 
        int model_in_w = input_tensor_.attr.dims[2];
        int model_in_h = input_tensor_.attr.dims[1];
        result->object[last_count].box.left   = (int)(clamp(x1, 0, model_in_w) / letter_box.scale); // Face box
        result->object[last_count].box.top    = (int)(clamp(y1, 0, model_in_h) / letter_box.scale);
        result->object[last_count].box.right  = (int)(clamp(x2, 0, model_in_w) / letter_box.scale);
        result->object[last_count].box.bottom = (int)(clamp(y2, 0, model_in_h) / letter_box.scale);
        result->object[last_count].score = props[i];  // Confidence

        for (int j = 0; j < 5; ++j) { // Facial feature points
            float ponit_x = landms[n * 10 + 2 * j] * input_tensor_.attr.dims[2] - letter_box.x_pad * letter_box.scale;
            float ponit_y = landms[n * 10 + 2 * j + 1] * input_tensor_.attr.dims[1] - letter_box.y_pad * letter_box.scale;
            result->object[last_count].ponit[j].x = (int)(clamp(ponit_x, 0, model_in_w) / letter_box.scale);
            result->object[last_count].ponit[j].y = (int)(clamp(ponit_y, 0, model_in_h) / letter_box.scale);
        }
        last_count++;
    }

    result->count = last_count;

    return NN_SUCCESS;
}


nn_error_e RetinafaceCustom::Run(const cv::Mat &img, retinaface_result *result)
{
    cv::Mat img_letterbox;
    Preprocess(img, img_letterbox);
    Inference();
    // NN_LOG_INFO("Inference done!!! \n");
    Postprocess(img_letterbox, result);
    return NN_SUCCESS;
}

/*脸部对齐到水平*/
nn_error_e RetinafaceCustom::FaceAlignment(const cv::Mat &img, cv::Mat &align_img, retinaface_object_t *result)
{
    // 对于 5 点标定（通常是嘴巴、鼻子、眼睛的关键点）
    float x = result->ponit[1].x - result->ponit[0].x;
    float y = result->ponit[1].y - result->ponit[0].y;

    // 计算眼睛连线的角度
    float angle = 0;
    if (x != 0) {
        angle = atan2(y, x) * 180.0 / CV_PI;  // 使用 atan2 更稳健
    }

    // 获取图像中心
    cv::Point2f center(img.cols / 2.0f, img.rows / 2.0f);

    // 构建旋转矩阵
    cv::Mat rotationMatrix = cv::getRotationMatrix2D(center, angle, 1.0);

    // 旋转图像
    cv::warpAffine(img, align_img, rotationMatrix, img.size());
}