#include <stdio.h>
#include <iostream>
#include <tuple>
#include <vector>
#include <torch/extension.h>


// TODO: previous exp
// std::tuple<torch::IntArrayRef, torch::Tensor> lzs_test()
torch::Tensor lzs_test()
// std::vector<torch::Tensor> lzs_test()
{
    // TODO: previous exp
    /*
    // returned tensor_size wrong, return tensor right
    // int64_t size_arr[3] = {2, 3, 3};
    // auto tensor_size = torch::IntArrayRef(size_arr);
    // printf("The tensor size is: (%d, %d, %d)!\n", tensor_size[0], tensor_size[1], tensor_size[2]);

    // wrong
    // auto tensor_size = torch::IntArrayRef({4, 5, 6});

    // wrong
    // torch::IntArrayRef tensor_size = {4, 5, 6};

    // right
    // auto tensor = torch::randn({4, 5, 6});
    // auto tensor = torch::randn(tensor_size);

    // 把torch::IntArrayRef作为返回值的时候,不管怎么传入初始化参数，都不对
    // return {size_arr, tensor};
    */

    // TODO: These two are okay~
    // auto my_input_tensor = torch::randn({23, 66}, torch::kFloat32);
    // auto my_input_tensor = torch::randn({23, 66}, {torch::Dtype(torch::kFloat64)});

    // TODO: These two are okay~
    // auto my_input_tensor = torch::randn({23, 66}, torch::kCUDA);
    // auto my_input_tensor = torch::randn({23, 66}, {torch::Device(torch::kCUDA, 2)});

    // TODO: These two are okay~
    /* There're both `Tensor Creation API` and `Tensor Indexing API`
       in PyTorch C++ Language Bindings
    */
    // auto my_input_tensor = torch::randn({23, 66}, {torch::Device(torch::kCUDA, 2)});
    // auto my_output_tensor = my_input_tensor.to(torch::Dtype(torch::kFloat64));

    /*
    auto options = torch::TensorOptions().dtype(torch::kFloat64).device(torch::kCUDA, 7).requires_grad(false);
    auto my_input_tensor = torch::randn({2, 3, 3}, options);
    auto my_output_tensor = torch::permute(my_input_tensor, {2, 0, 1}).contiguous();
    my_output_tensor.index_put_({0, 0, 0}, 23333);

    // std::cout << my_output_tensor.dtype() << std::endl;
    //std::cout << my_output_tensor.scalar_type() << std::endl;
    // printf("Number of bytes of Tensor is %d!\n", sizeof(my_output_tensor.dtype()));

    return {my_input_tensor, my_output_tensor};
    */

    /*
    const auto opts = torch::TensorOptions()
                      .dtype(torch::kFloat64)
                      .layout(torch::kStrided)
                      .device(torch::kCUDA, 1)
                      .requires_grad(true);
    auto my_test_tensor = torch::randn({2, 3, 4}, opts);
    my_test_tensor = torch::permute(my_test_tensor, {2, 0, 1}).contiguous();
    return my_test_tensor;
    */

    /*
    const auto opts = torch::TensorOptions()
                      .dtype(torch::kFloat64)
                      .device(torch::kCUDA, 3)
                      .layout(torch::kStrided)
                      .requires_grad(true);
    auto in_tensor = torch::randn({5, 768}, opts);
    auto w = torch::randn({1211, 768}, opts);
    return torch::mm(in_tensor, torch::permute(w, {1, 0}).contiguous());
    */

    const auto opts = torch::TensorOptions().dtype(torch::kFloat32)
                                            .device(torch::kCUDA, 0)
                                            .layout(torch::kStrided)
                                            .requires_grad(false);
    auto res = torch::zeros({233}, opts);

    return res;
}

PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
    m.def("lzs_test", lzs_test, "LZS test...");
}
