#include <gtest/gtest.h>
#include <torch/torch.h>
#include <torch_npu/csrc/libs/torch_npu.h>
#include <acl/acl.h>
#include <experiment/runtime/runtime/rt.h>

#include "kernel_loader.h"
#include "../src/torch_api/npu_triton_add_kernel.h"
#include "test_utils.h"

namespace {

constexpr int64_t kNumElements = 98432;
constexpr int32_t kBlockSize = 1024;
constexpr float kTolerance = 1e-5f;
constexpr int32_t kDeviceId = 0;

// Test parameters: kernel binary filename and kernel name
struct TestParams {
    std::string binary_filename;
    std::string kernel_name;
};

// Global ACL initialization flag
bool g_acl_initialized = false;

/**
 * @brief Check if NPU device is available
 */
// 在 IsNpuAvailable() 中：
// bool IsNpuAvailable() {
//     try {
//         auto tensor = torch::zeros({1}, torch::TensorOptions().device("npu:0"));
//         (void)tensor;
//         return true;
//     } catch (...) {
//         return false;
//     }
// }

/**
 * @brief Initialize ACL and device (called once for all tests)
 */
void InitializeACL() {
    if (!g_acl_initialized) {
        aclError acl_ret = aclInit(nullptr);
        ASSERT_EQ(acl_ret, ACL_SUCCESS) << "aclInit failed: " << acl_ret;

        aclError rt_ret = aclrtSetDevice(kDeviceId);
        ASSERT_EQ(rt_ret, ACL_SUCCESS) << "rtSetDevice failed: " << rt_ret;
        g_acl_initialized = true;
    }
}

/**
 * @brief Cleanup ACL and device (called after all tests)
 */
void CleanupACL() {
    if (g_acl_initialized) {
        rtDeviceReset(kDeviceId);
        aclFinalize();
        g_acl_initialized = false;
    }
}

} // namespace

/**
 * @brief Test fixture for NPU Triton kernel tests
 * 
 * This fixture handles:
 * - NPU availability check
 * - ACL initialization
 * - Kernel loading
 * - Test parameter management
 */
class NpuTritonKernelTest : public ::testing::TestWithParam<TestParams> {
protected:
    void SetUp() override {
        // Check NPU availability
        // if (!IsNpuAvailable()) {
        //     GTEST_SKIP() << "NPU device not available";
        // }

        // Initialize ACL (only once)
        InitializeACL();

        // Get test parameters
        const auto& params = GetParam();
        kernel_name_ = params.kernel_name;
        binary_filename_ = params.binary_filename;

        // Build full binary path
        binary_path_ = test_utils::GetKernelBinaryPath(binary_filename_);

        // Verify kernel binary exists
        ASSERT_TRUE(test_utils::FileExists(binary_path_))
            << "Kernel binary not found: " << binary_path_;

        // Load kernel
        auto handle = KernelLoader::getKernel(kernel_name_);
        if (!handle.isValid()) {
            handle = KernelLoader::loadKernel(kernel_name_, binary_path_);
        }
        ASSERT_TRUE(handle.isValid())
            << "Failed to load kernel: " << kernel_name_ << " from " << binary_path_;

        // Set random seed for reproducibility
        torch::manual_seed(0);
    }

    // void TearDown() override {
    //     if (IsNpuAvailable()) {
    //         aclrtSynchronizeDevice();
    //     }
    // }

    std::string kernel_name_;
    std::string binary_filename_;
    std::string binary_path_;
};

/**
 * @brief Test case: Add kernel matches torch reference
 * 
 * This test:
 * 1. Creates random input tensors on NPU
 * 2. Computes reference result using PyTorch
 * 3. Computes actual result using Triton kernel
 * 4. Compares results with tolerance check
 */
TEST_P(NpuTritonKernelTest, MatchesTorchReference) {
    // aclrtStream stream;
    // aclError ret = aclrtCreateStream(&stream);
    // ASSERT_EQ(ret, ACL_SUCCESS) << "aclrtCreateStream failed: " << ret;

    // // c10::Device npu_device(c10::DeviceType::PrivateUse1, kDeviceId);
    // std::string npu_device = "npu:" + std::to_string(kDeviceId);
    // auto options = torch::TensorOptions().dtype(torch::kFloat32).device(npu_device.c_str());

    // // Create random input tensors
    // torch::Tensor x = torch::rand({kNumElements}, options);
    // torch::Tensor y = torch::rand({kNumElements}, options);

    // // Compute reference result using PyTorch
    // auto torch_output = x + y;

    // // Compute actual result using Triton kernel
    // int32_t gridX = 97, gridY = 1, gridZ = 1;
    // auto triton_output = TritonTorch::launch_add_kernel(x, y, kNumElements, kBlockSize, gridX);

    // ret = aclrtSynchronizeStream(stream);
    // ASSERT_EQ(ret, ACL_SUCCESS) << "aclrtSynchronizeStream failed: " << ret;
    // // Compare results
    // auto diff = torch::abs(torch_output - triton_output);
    // float max_diff = torch::max(diff).item<float>();

    // EXPECT_LT(max_diff, kTolerance)
    //     << "Maximum difference (" << max_diff << ") exceeds tolerance (" << kTolerance << ")";

    // ret = aclrtDestroyStream(stream);
    // ASSERT_EQ(ret, ACL_SUCCESS) << "aclrtDestroyStream failed: " << ret;
}

// Instantiate test cases with different kernel configurations
INSTANTIATE_TEST_SUITE_P(
    AddKernelTests,
    NpuTritonKernelTest,
    ::testing::Values(
        TestParams{"add_kernel.npubin", "add_kernel"}
        // Add more test cases here as needed:
        // TestParams{"other_kernel.npubin", "other_kernel"}
    ),
    [](const ::testing::TestParamInfo<TestParams>& info) {
        return info.param.kernel_name;
    }
);

// Global test environment for ACL initialization/cleanup
class NpuTestEnvironment : public ::testing::Environment {
public:
    void SetUp() override {
        // ACL initialization is done per-test in SetUp()
    }

    void TearDown() override {
        CleanupACL();
        KernelLoader::cleanup();
    }
};

int main(int argc, char** argv) {
    ::testing::InitGoogleTest(&argc, argv);
    
    // Register global test environment
    ::testing::AddGlobalTestEnvironment(new NpuTestEnvironment());
    
    return RUN_ALL_TESTS();
}
