#include "backward.h"
#include <torch/types.h>
#include <ATen/cuda/CUDAContext.h>
#include <thrust/device_vector.h>
#include <thrust/sort.h>
#include <thrust/functional.h>
#include "auxiliary.h"

__global__ void p2e_logproba_backward_kernel(
    const float* grad_output,
    const float* sxy,
    const float* oxy,
    const float* invcov,
    const float* logdet_invcov,
    const int* fids,
    const int* max_idx,
    float* grad_sxy,
    float* grad_oxy,
    float* grad_invcov,
    float* grad_logdet_invcov,
    int num_elipses,
    int num_sample_points,
    int num_all_points
) {
    int sample_idx = blockIdx.x * blockDim.x + threadIdx.x;
    int fid = sample_idx / num_sample_points;

    if (sample_idx < num_all_points) {
        int max_ellipse_idx = max_idx[sample_idx];

        // 计算梯度
        if (max_ellipse_idx != -1) {
          float d_sq_x = sxy[sample_idx * 2] - oxy[max_ellipse_idx * 2];
          float d_sq_y = sxy[sample_idx * 2 + 1] - oxy[max_ellipse_idx * 2 + 1];

          float invcov_00 = invcov[max_ellipse_idx * 4];
          float invcov_01 = invcov[max_ellipse_idx * 4 + 1];
          float invcov_10 = invcov[max_ellipse_idx * 4 + 2];
          float invcov_11 = invcov[max_ellipse_idx * 4 + 3];

          // grad_sxy
          grad_sxy[sample_idx * 2] = grad_output[sample_idx] * ( -(2 * d_sq_x * invcov_00 + d_sq_y * (invcov_10 + invcov_01)));
          grad_sxy[sample_idx * 2 + 1] = grad_output[sample_idx] * ( -(d_sq_x * (invcov_01 + invcov_10) + 2 * d_sq_y * invcov_11));

          // grad_oxy
          atomicAdd(&grad_oxy[max_ellipse_idx * 2], grad_output[sample_idx] * ((2 * d_sq_x * invcov_00 + d_sq_y * (invcov_10 + invcov_01))));
          atomicAdd(&grad_oxy[max_ellipse_idx * 2 + 1], grad_output[sample_idx] * ((d_sq_x * (invcov_01 + invcov_10) + 2 * d_sq_y * invcov_11)));

          // grad_invcov
          atomicAdd(&grad_invcov[max_ellipse_idx * 4], grad_output[sample_idx] * (-d_sq_x * d_sq_x)); // 00
          atomicAdd(&grad_invcov[max_ellipse_idx * 4 + 1], grad_output[sample_idx] * (-d_sq_x * d_sq_y)); // 01
          atomicAdd(&grad_invcov[max_ellipse_idx * 4 + 2], grad_output[sample_idx] * (-d_sq_y * d_sq_x)); // 10
          atomicAdd(&grad_invcov[max_ellipse_idx * 4 + 3], grad_output[sample_idx] * (-d_sq_y * d_sq_y)); // 11

          // grad_logdet_invcov
          atomicAdd(&grad_logdet_invcov[max_ellipse_idx], grad_output[sample_idx]);
        }
    }
}

std::tuple<torch::Tensor, torch::Tensor, torch::Tensor, torch::Tensor> p2e_logproba_backward(
    torch::Tensor grad_output,
    torch::Tensor sxy,
    torch::Tensor oxy,
    torch::Tensor invcov,
    torch::Tensor logdet_invcov,
    torch::Tensor fids,
    torch::Tensor max_idx
) {
    // 检查输入张量的类型

    TORCH_CHECK(grad_output.dtype() == torch::kFloat32, "grad_output must be a float32 tensor");
    TORCH_CHECK(sxy.dtype() == torch::kFloat32, "sxy must be a float32 tensor");
    TORCH_CHECK(oxy.dtype() == torch::kFloat32, "oxy must be a float32 tensor");
    TORCH_CHECK(invcov.dtype() == torch::kFloat32, "invcov must be a float32 tensor");
    TORCH_CHECK(logdet_invcov.dtype() == torch::kFloat32, "logdet_invcov must be a float32 tensor");
    TORCH_CHECK(fids.dtype() == torch::kInt32, "fids must be a int32 tensor");
    TORCH_CHECK(max_idx.dtype() == torch::kInt32, "max_idx must be a int32 tensor");

    int num_faces = sxy.size(0);
    int num_sample_points = sxy.size(1);
    int num_all_points = num_faces * num_sample_points;
    int num_elipses = oxy.size(0);

    // 创建输出张量
    torch::Tensor grad_sxy = torch::zeros({num_faces, num_sample_points, 2}, sxy.options());
    torch::Tensor grad_oxy = torch::zeros({num_elipses, 2}, oxy.options());
    torch::Tensor grad_invcov = torch::zeros({num_elipses, 2, 2}, invcov.options());
    torch::Tensor grad_logdet_invcov = torch::zeros({num_elipses}, logdet_invcov.options());

    // 计算线程块和网格大小
    int threads_per_block = getOptimalThreadsPerBlock();
    int blocks_per_grid = (num_all_points + threads_per_block - 1) / threads_per_block;

    // 调用 CUDA 核函数
    p2e_logproba_backward_kernel<<<blocks_per_grid, threads_per_block>>>(
        // ---inputs---
        grad_output.data_ptr<float>(),
        sxy.data_ptr<float>(),
        oxy.data_ptr<float>(),
        invcov.data_ptr<float>(),
        logdet_invcov.data_ptr<float>(),
        fids.data_ptr<int>(),
        max_idx.data_ptr<int>(),
        // ---outputs---
        grad_sxy.data_ptr<float>(),
        grad_oxy.data_ptr<float>(),
        grad_invcov.data_ptr<float>(),
        grad_logdet_invcov.data_ptr<float>(),
        num_elipses,
        num_sample_points,
        num_all_points
    );

    return std::make_tuple(grad_sxy, grad_oxy, grad_invcov, grad_logdet_invcov);
}