// Copyright (C) Kumo inc. and its affiliates.
// Author: Jeff.li lijippy@163.com
// All rights reserved.
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published
// by the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program.  If not, see <https://www.gnu.org/licenses/>.
//

#include <nebula/compute/kernels/scalar_cast_internal.h>
#include <nebula/compute/cast_internal.h>
#include <nebula/compute/kernels/common_internal.h>
#include <nebula/core/extension_type.h>
#include <nebula/types/type_traits.h>
#include <turbo/base/checked_cast.h>
#include <turbo/numeric/float16.h>

namespace nebula {

using internal::PrimitiveScalarBase;

namespace compute {
namespace internal {

// ----------------------------------------------------------------------

namespace {

template <typename OutType, typename InType, typename Enable = void>
struct CastPrimitive {
    TURBO_DISABLE_UBSAN("float-cast-overflow")
  static void Exec(const ArraySpan& arr, ArraySpan* out) {
    using OutT = typename OutType::c_type;
    using InT = typename InType::c_type;
    const InT* in_values = arr.get_values<InT>(1);
    OutT* out_values = out->get_values<OutT>(1);
    for (int64_t i = 0; i < arr.length; ++i) {
      *out_values++ = static_cast<OutT>(*in_values++);
    }
  }
};

// Converting floating types to half float.
template <typename InType>
struct CastPrimitive<Fp16Type, InType, enable_if_physical_floating_point<InType>> {
  static void Exec(const ArraySpan& arr, ArraySpan* out) {
    using InT = typename InType::c_type;
    const InT* in_values = arr.get_values<InT>(1);
    uint16_t* out_values = out->get_values<uint16_t>(1);
    for (int64_t i = 0; i < arr.length; ++i) {
      *out_values++ = turbo::Float16(*in_values++).bits();
    }
  }
};

// Converting from half float to other floating types.
template <>
struct CastPrimitive<Fp32Type, Fp16Type, enable_if_t<true>> {
  static void Exec(const ArraySpan& arr, ArraySpan* out) {
    const uint16_t* in_values = arr.get_values<uint16_t>(1);
    float* out_values = out->get_values<float>(1);
    for (int64_t i = 0; i < arr.length; ++i) {
      *out_values++ = turbo::Float16::from_bits(*in_values++).to_float();
    }
  }
};

template <>
struct CastPrimitive<Fp64Type, Fp16Type, enable_if_t<true>> {
  static void Exec(const ArraySpan& arr, ArraySpan* out) {
    const uint16_t* in_values = arr.get_values<uint16_t>(1);
    double* out_values = out->get_values<double>(1);
    for (int64_t i = 0; i < arr.length; ++i) {
      *out_values++ = turbo::Float16::from_bits(*in_values++).to_double();
    }
  }
};

template <typename OutType, typename InType>
struct CastPrimitive<OutType, InType, enable_if_t<std::is_same<OutType, InType>::value>> {
  // memcpy output
  static void Exec(const ArraySpan& arr, ArraySpan* out) {
    using T = typename InType::c_type;
    std::memcpy(out->get_values<T>(1), arr.get_values<T>(1), arr.length * sizeof(T));
  }
};

// Cast int to half float
template <typename InType>
struct CastPrimitive<Fp16Type, InType, enable_if_integer<InType>> {
  static void Exec(const ArraySpan& arr, ArraySpan* out) {
    using InT = typename InType::c_type;
    const InT* in_values = arr.get_values<InT>(1);
    uint16_t* out_values = out->get_values<uint16_t>(1);
    for (int64_t i = 0; i < arr.length; ++i) {
      float temp = static_cast<float>(*in_values++);
      *out_values++ = turbo::Float16(temp).bits();
    }
  }
};

// Cast half float to int
template <typename OutType>
struct CastPrimitive<OutType, Fp16Type, enable_if_integer<OutType>> {
  static void Exec(const ArraySpan& arr, ArraySpan* out) {
    using OutT = typename OutType::c_type;
    const uint16_t* in_values = arr.get_values<uint16_t>(1);
    OutT* out_values = out->get_values<OutT>(1);
    for (int64_t i = 0; i < arr.length; ++i) {
      *out_values++ = static_cast<OutT>(turbo::Float16::from_bits(*in_values++).to_float());
    }
  }
};

template <typename InType>
void CastNumberImpl(Type::type out_type, const ArraySpan& input, ArraySpan* out) {
  switch (out_type) {
    case Type::INT8:
      return CastPrimitive<Int8Type, InType>::Exec(input, out);
    case Type::INT16:
      return CastPrimitive<Int16Type, InType>::Exec(input, out);
    case Type::INT32:
      return CastPrimitive<Int32Type, InType>::Exec(input, out);
    case Type::INT64:
      return CastPrimitive<Int64Type, InType>::Exec(input, out);
    case Type::UINT8:
      return CastPrimitive<UInt8Type, InType>::Exec(input, out);
    case Type::UINT16:
      return CastPrimitive<UInt16Type, InType>::Exec(input, out);
    case Type::UINT32:
      return CastPrimitive<UInt32Type, InType>::Exec(input, out);
    case Type::UINT64:
      return CastPrimitive<UInt64Type, InType>::Exec(input, out);
    case Type::FP32:
      return CastPrimitive<Fp32Type, InType>::Exec(input, out);
    case Type::FP64:
      return CastPrimitive<Fp64Type, InType>::Exec(input, out);
    case Type::FP16:
      return CastPrimitive<Fp16Type, InType>::Exec(input, out);
    default:
      break;
  }
}

}  // namespace

void CastNumberToNumberUnsafe(Type::type in_type, Type::type out_type,
                              const ArraySpan& input, ArraySpan* out) {
  switch (in_type) {
    case Type::INT8:
      return CastNumberImpl<Int8Type>(out_type, input, out);
    case Type::INT16:
      return CastNumberImpl<Int16Type>(out_type, input, out);
    case Type::INT32:
      return CastNumberImpl<Int32Type>(out_type, input, out);
    case Type::INT64:
      return CastNumberImpl<Int64Type>(out_type, input, out);
    case Type::UINT8:
      return CastNumberImpl<UInt8Type>(out_type, input, out);
    case Type::UINT16:
      return CastNumberImpl<UInt16Type>(out_type, input, out);
    case Type::UINT32:
      return CastNumberImpl<UInt32Type>(out_type, input, out);
    case Type::UINT64:
      return CastNumberImpl<UInt64Type>(out_type, input, out);
    case Type::FP32:
      return CastNumberImpl<Fp32Type>(out_type, input, out);
    case Type::FP64:
      return CastNumberImpl<Fp64Type>(out_type, input, out);
    case Type::FP16:
      return CastNumberImpl<Fp16Type>(out_type, input, out);
    default:
      DKCHECK(false);
      break;
  }
}

// ----------------------------------------------------------------------

turbo::Status UnpackDictionary(KernelContext* ctx, const ExecSpan& batch, ExecResult* out) {
  // TODO: is there an implementation more friendly to the "span" data structures?

  DictionaryArray dict_arr(batch[0].array.to_array_data());
  const CastOptions& options = turbo::checked_cast<const CastState&>(*ctx->state()).options;

  const auto& dict_type = *dict_arr.dictionary()->type();
  const DataType& to_type = *options.to_type;
  if (!to_type.equals(dict_type) && !CanCast(dict_type, to_type)) {
    return turbo::invalid_argument_error("Cast type ", to_type.to_string(),
                           " incompatible with dictionary type ", dict_type.to_string());
  }

  TURBO_MOVE_OR_RAISE(Datum unpacked,
                        Take(dict_arr.dictionary(), dict_arr.indices(),
                             TakeOptions::defaults(), ctx->exec_context()));
  if (!dict_type.equals(to_type)) {
    TURBO_MOVE_OR_RAISE(unpacked, Cast(unpacked, options));
  }
  out->value = std::move(unpacked.array());
  return turbo::OkStatus();
}

turbo::Status OutputAllNull(KernelContext* ctx, const ExecSpan& batch, ExecResult* out) {
  // TODO(wesm): there is no good reason to have to use ArrayData here, so we
  // should clean this up later. This is used in the dict<null>->null cast
  ArrayData* output = out->array_data().get();
  output->buffers = {nullptr};
  output->null_count = batch.length;
  return turbo::OkStatus();
}

turbo::Status CastFromExtension(KernelContext* ctx, const ExecSpan& batch, ExecResult* out) {
  const CastOptions& options = turbo::checked_cast<const CastState*>(ctx->state())->options;

  DKCHECK(batch[0].is_array());
  ExtensionArray extension(batch[0].array.to_array_data());
  std::shared_ptr<Array> result;
  TURBO_RETURN_NOT_OK(Cast(*extension.storage(), out->type()->get_shared_ptr(), options,
                     ctx->exec_context())
                    .try_value(&result));
  out->value = std::move(result->data());
  return turbo::OkStatus();
}

turbo::Status CastFromNull(KernelContext* ctx, const ExecSpan& batch, ExecResult* out) {
  // TODO(wesm): handle this case more gracefully
  std::shared_ptr<Array> nulls;
  TURBO_RETURN_NOT_OK(MakeArrayOfNull(out->type()->get_shared_ptr(), batch.length).try_value(&nulls));
  out->value = nulls->data();
  return turbo::OkStatus();
}

turbo::Result<TypeHolder> ResolveOutputFromOptions(KernelContext* ctx,
                                            const std::vector<TypeHolder>&) {
  const CastOptions& options = turbo::checked_cast<const CastState&>(*ctx->state()).options;
  return options.to_type;
}

/// You will see some of kernels with
///
/// kOutputTargetType
///
/// for their output type resolution. This is somewhat of an eyesore but the
/// easiest initial way to get the requested cast type including the TimeUnit
/// to the kernel (which is needed to compute the output) was through
/// CastOptions

OutputType kOutputTargetType(ResolveOutputFromOptions);

turbo::Status ZeroCopyCastExec(KernelContext* ctx, const ExecSpan& batch, ExecResult* out) {
  // TODO(wesm): alternative strategy for zero copy casts after ARROW-16576
  std::shared_ptr<ArrayData> input = batch[0].array.to_array_data();
  ArrayData* output = out->array_data().get();
  output->length = input->length;
  output->offset = input->offset;
  output->SetNullCount(input->null_count);
  output->buffers = std::move(input->buffers);
  output->child_data = std::move(input->child_data);
  return turbo::OkStatus();
}

void AddZeroCopyCast(Type::type in_type_id, InputType in_type, OutputType out_type,
                     CastFunction* func) {
  auto sig = KernelSignature::create({in_type}, out_type);
  ScalarKernel kernel;
  kernel.exec = ZeroCopyCastExec;
  kernel.signature = sig;
  kernel.null_handling = NullHandling::COMPUTED_NO_PREALLOCATE;
  kernel.mem_allocation = MemAllocation::NO_PREALLOCATE;
  KCHECK_OK(func->add_kernel(in_type_id, std::move(kernel)));
}

static bool CanCastFromDictionary(Type::type type_id) {
  return (is_primitive(type_id) || is_base_binary_like(type_id) ||
          is_fixed_size_binary(type_id));
}

void AddCommonCasts(Type::type out_type_id, OutputType out_ty, CastFunction* func) {
  // From null to this type
  ScalarKernel kernel;
  kernel.exec = CastFromNull;
  kernel.signature = KernelSignature::create({null()}, out_ty);
  kernel.null_handling = NullHandling::COMPUTED_NO_PREALLOCATE;
  kernel.mem_allocation = MemAllocation::NO_PREALLOCATE;
  KCHECK_OK(func->add_kernel(Type::NA, std::move(kernel)));

  // From dictionary to this type
  if (CanCastFromDictionary(out_type_id)) {
    // Dictionary unpacking not implemented for boolean or nested types.
    //
    // XXX: Uses Take and does its own memory allocation for the moment. We can
    // fix this later.
    KCHECK_OK(func->add_kernel(Type::DICTIONARY, {InputType(Type::DICTIONARY)}, out_ty,
                              UnpackDictionary, NullHandling::COMPUTED_NO_PREALLOCATE,
                              MemAllocation::NO_PREALLOCATE));
  }

  // From extension type to this type
  KCHECK_OK(func->add_kernel(Type::EXTENSION, {InputType(Type::EXTENSION)}, out_ty,
                            CastFromExtension, NullHandling::COMPUTED_NO_PREALLOCATE,
                            MemAllocation::NO_PREALLOCATE));
}

}  // namespace internal
}  // namespace compute
}  // namespace nebula
