// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements.  See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership.  The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License.  You may obtain a copy of the License at
//
//   http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied.  See the License for the
// specific language governing permissions and limitations
// under the License.

#include <ktest/ktest.h>

#include <nebula/array/array_base.h>
#include <nebula/c/dlpack.h>
#include <nebula/c/dlpack_abi.h>
#include <nebula/core/memory_pool.h>
#include <nebula/testing/ktest_util.h>
#include <nebula/ipc/json_simple.h>

namespace nebula::dlpack {
    using nebula::ipc::json::array_from_json;

    class TestExportArray : public ::testing::Test {
    public:
        void SetUp() {
        }
    };

    void CheckDLTensor(const std::shared_ptr<Array> &arr,
                       const std::shared_ptr<DataType> &arrow_type,
                       DLDataTypeCode dlpack_type, int64_t length) {
        ASSERT_OK_AND_ASSIGN(auto dlmtensor, nebula::dlpack::ExportArray(arr));
        auto dltensor = dlmtensor->dl_tensor;

        const auto byte_width = arr->type()->byte_width();
        const auto start = arr->offset() * byte_width;
        ASSERT_OK_AND_ASSIGN(auto sliced_buffer,
                             SliceBufferSafe(arr->data()->buffers[1], start));
        ASSERT_EQ(sliced_buffer->data(), dltensor.data);

        ASSERT_EQ(0, dltensor.byte_offset);
        ASSERT_EQ(NULL, dltensor.strides);
        ASSERT_EQ(length, dltensor.shape[0]);
        ASSERT_EQ(1, dltensor.ndim);

        ASSERT_EQ(dlpack_type, dltensor.dtype.code);

        ASSERT_EQ(arrow_type->bit_width(), dltensor.dtype.bits);
        ASSERT_EQ(1, dltensor.dtype.lanes);
        ASSERT_EQ(DLDeviceType::kDLCPU, dltensor.device.device_type);
        ASSERT_EQ(0, dltensor.device.device_id);

        ASSERT_OK_AND_ASSIGN(auto device, nebula::dlpack::ExportDevice(arr));
        ASSERT_EQ(DLDeviceType::kDLCPU, device.device_type);
        ASSERT_EQ(0, device.device_id);

        dlmtensor->deleter(dlmtensor);
    }

    TEST_F(TestExportArray, TestSupportedArray) {
        const std::vector<std::pair<std::shared_ptr<DataType>, DLDataTypeCode> > cases = {
            {int8(), DLDataTypeCode::kDLInt},
            {uint8(), DLDataTypeCode::kDLUInt},
            {
                int16(),
                DLDataTypeCode::kDLInt,
            },
            {uint16(), DLDataTypeCode::kDLUInt},
            {
                int32(),
                DLDataTypeCode::kDLInt,
            },
            {uint32(), DLDataTypeCode::kDLUInt},
            {
                int64(),
                DLDataTypeCode::kDLInt,
            },
            {uint64(), DLDataTypeCode::kDLUInt},
            {float16(), DLDataTypeCode::kDLFloat},
            {float32(), DLDataTypeCode::kDLFloat},
            {float64(), DLDataTypeCode::kDLFloat}
        };

        const auto allocated_bytes = nebula::default_memory_pool()->bytes_allocated();

        for (auto [arrow_type, dlpack_type]: cases) {
            const std::shared_ptr<Array> array =
                    nebula::ipc::json::array_from_json(arrow_type, "[1, 0, 10, 0, 2, 1, 3, 5, 1, 0]").value_or_die();
            CheckDLTensor(array, arrow_type, dlpack_type, 10);
            ASSERT_OK_AND_ASSIGN(auto sliced_1, array->slice_safe(1, 5));
            CheckDLTensor(sliced_1, arrow_type, dlpack_type, 5);
            ASSERT_OK_AND_ASSIGN(auto sliced_2, array->slice_safe(0, 5));
            CheckDLTensor(sliced_2, arrow_type, dlpack_type, 5);
            ASSERT_OK_AND_ASSIGN(auto sliced_3, array->slice_safe(3));
            CheckDLTensor(sliced_3, arrow_type, dlpack_type, 7);
        }

        ASSERT_EQ(allocated_bytes, nebula::default_memory_pool()->bytes_allocated());
    }

    TEST_F(TestExportArray, TestErrors) {
        const std::shared_ptr<Array> array_null = array_from_json(null(), "[]").value_or_die();
        ASSERT_RAISES_WITH_MESSAGE(turbo::StatusCode::kFailedPrecondition,
                                   "DataType is not compatible with DLPack spec: " +
                                   array_null->type()->to_string(),
                                   nebula::dlpack::ExportArray(array_null));

        const std::shared_ptr<Array> array_with_null = array_from_json(int8(), "[1, 100, null]").value_or_die();
        ASSERT_RAISES_WITH_MESSAGE(turbo::StatusCode::kFailedPrecondition,
                                   "Can only use DLPack on arrays with no nulls.",
                                   nebula::dlpack::ExportArray(array_with_null));

        const std::shared_ptr<Array> array_string =
                array_from_json(utf8(), R"(["itsy", "bitsy", "spider"])").value_or_die();
        ASSERT_RAISES(turbo::StatusCode::kFailedPrecondition,
                                   nebula::dlpack::ExportArray(array_string));

        const std::shared_ptr<Array> array_boolean = array_from_json(boolean(), "[true, false]").value_or_die();
        ASSERT_RAISES(
            turbo::StatusCode::kFailedPrecondition,
            nebula::dlpack::ExportDevice(array_boolean));
    }
} // namespace nebula::dlpack
