// Copyright (C) Kumo inc. and its affiliates.
// Author: Jeff.li lijippy@163.com
// All rights reserved.
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published
// by the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program.  If not, see <https://www.gnu.org/licenses/>.
//

#include <benchmark/benchmark.h>

#include <algorithm>
#include <cstdint>
#include <cstring>
#include <memory>
#include <random>
#include <string>
#include <vector>


#include <nebula/compression/compression.h>
#include <nebula/version.h>
#include <turbo/log/logging.h>
#include <turbo/base/macros.h>

namespace nebula::util {

#ifdef NEBULA_WITH_BENCHMARKS_REFERENCE

std::vector<uint8_t> MakeCompressibleData(int data_size) {
  // XXX This isn't a real-world corpus so doesn't really represent the
  // comparative qualities of the algorithms

  // First make highly compressible data
  std::string base_data =
      "Apache Nebula is a cross-language development platform for in-memory data";
  int nrepeats = static_cast<int>(1 + data_size / base_data.size());

  std::vector<uint8_t> data(base_data.size() * nrepeats);
  for (int i = 0; i < nrepeats; ++i) {
    std::memcpy(data.data() + i * base_data.size(), base_data.data(), base_data.size());
  }
  data.resize(data_size);

  // Then randomly mutate some bytes so as to make things harder
  std::mt19937 engine(42);
  std::exponential_distribution<> offsets(0.05);
  std::uniform_int_distribution<> values(0, 255);

  int64_t pos = 0;
  while (pos < data_size) {
    data[pos] = static_cast<uint8_t>(values(engine));
    pos += static_cast<int64_t>(offsets(engine));
  }

  return data;
}

int64_t StreamingCompress(Codec* codec, const std::vector<uint8_t>& data,
                          std::vector<uint8_t>* compressed_data = nullptr) {
  if (compressed_data != nullptr) {
    compressed_data->clear();
    compressed_data->shrink_to_fit();
  }
  auto compressor = *codec->MakeCompressor();

  const uint8_t* input = data.data();
  int64_t input_len = data.size();
  int64_t compressed_size = 0;

  std::vector<uint8_t> output_buffer(1 << 20);  // 1 MB

  while (input_len > 0) {
    auto result = *compressor->Compress(input_len, input, output_buffer.size(),
                                        output_buffer.data());
    input += result.bytes_read;
    input_len -= result.bytes_read;
    compressed_size += result.bytes_written;
    if (compressed_data != nullptr && result.bytes_written > 0) {
      compressed_data->resize(compressed_data->size() + result.bytes_written);
      memcpy(compressed_data->data() + compressed_data->size() - result.bytes_written,
             output_buffer.data(), result.bytes_written);
    }
    if (result.bytes_read == 0) {
      // Need to enlarge output buffer
      output_buffer.resize(output_buffer.size() * 2);
    }
  }
  while (true) {
    auto result = *compressor->End(output_buffer.size(), output_buffer.data());
    compressed_size += result.bytes_written;
    if (compressed_data != nullptr && result.bytes_written > 0) {
      compressed_data->resize(compressed_data->size() + result.bytes_written);
      memcpy(compressed_data->data() + compressed_data->size() - result.bytes_written,
             output_buffer.data(), result.bytes_written);
    }
    if (result.should_retry) {
      // Need to enlarge output buffer
      output_buffer.resize(output_buffer.size() * 2);
    } else {
      break;
    }
  }
  return compressed_size;
}

static void StreamingCompression(CompressionType compression,
                                 const std::vector<uint8_t>& data,
                                 benchmark::State& state) {  // NOLINT non-const reference
  auto codec = *Codec::create(compression);

  while (state.KeepRunning()) {
    int64_t compressed_size = StreamingCompress(codec.get(), data);
    state.counters["ratio"] =
        static_cast<double>(data.size()) / static_cast<double>(compressed_size);
  }
  state.SetBytesProcessed(state.iterations() * data.size());
}

template <CompressionType COMPRESSION>
static void ReferenceStreamingCompression(
    benchmark::State& state) {                        // NOLINT non-const reference
  auto data = MakeCompressibleData(8 * 1024 * 1024);  // 8 MB

  StreamingCompression(COMPRESSION, data, state);
}

int64_t Compress(Codec* codec, const std::vector<uint8_t>& data,
                 std::vector<uint8_t>* compressed_data) {
  const uint8_t* input = data.data();
  int64_t input_len = data.size();
  int64_t compressed_size = 0;
  int64_t max_compressed_len = codec->MaxCompressedLen(input_len, input);
  compressed_data->resize(max_compressed_len);

  if (input_len > 0) {
    compressed_size = *codec->Compress(input_len, input, compressed_data->size(),
                                       compressed_data->data());
    compressed_data->resize(compressed_size);
  }
  return compressed_size;
}

template <CompressionType COMPRESSION>
static void ReferenceCompression(benchmark::State& state) {  // NOLINT non-const reference
  auto data = MakeCompressibleData(8 * 1024 * 1024);         // 8 MB

  auto codec = *Codec::create(COMPRESSION);

  while (state.KeepRunning()) {
    std::vector<uint8_t> compressed_data;
    auto compressed_size = Compress(codec.get(), data, &compressed_data);
    state.counters["ratio"] =
        static_cast<double>(data.size()) / static_cast<double>(compressed_size);
  }
  state.SetBytesProcessed(state.iterations() * data.size());
}

static void StreamingDecompression(
    CompressionType compression, const std::vector<uint8_t>& data,
    benchmark::State& state) {  // NOLINT non-const reference
  auto codec = *Codec::create(compression);

  std::vector<uint8_t> compressed_data;
  TURBO_UNUSED(StreamingCompress(codec.get(), data, &compressed_data));
  state.counters["ratio"] =
      static_cast<double>(data.size()) / static_cast<double>(compressed_data.size());

  while (state.KeepRunning()) {
    auto decompressor = *codec->MakeDecompressor();

    const uint8_t* input = compressed_data.data();
    int64_t input_len = compressed_data.size();
    int64_t decompressed_size = 0;

    std::vector<uint8_t> output_buffer(1 << 20);  // 1 MB
    while (!decompressor->IsFinished()) {
      auto result = *decompressor->Decompress(input_len, input, output_buffer.size(),
                                              output_buffer.data());
      input += result.bytes_read;
      input_len -= result.bytes_read;
      decompressed_size += result.bytes_written;
      if (result.need_more_output) {
        // Enlarge output buffer
        output_buffer.resize(output_buffer.size() * 2);
      }
    }
    NEBULA_CHECK(decompressed_size == static_cast<int64_t>(data.size()));
  }
  state.SetBytesProcessed(state.iterations() * data.size());
}

template <CompressionType COMPRESSION>
static void ReferenceStreamingDecompression(
    benchmark::State& state) {                        // NOLINT non-const reference
  auto data = MakeCompressibleData(8 * 1024 * 1024);  // 8 MB

  StreamingDecompression(COMPRESSION, data, state);
}

template <CompressionType COMPRESSION>
static void ReferenceDecompression(
    benchmark::State& state) {                        // NOLINT non-const reference
  auto data = MakeCompressibleData(8 * 1024 * 1024);  // 8 MB

  auto codec = *Codec::create(COMPRESSION);

  std::vector<uint8_t> compressed_data;
  TURBO_UNUSED(Compress(codec.get(), data, &compressed_data));
  state.counters["ratio"] =
      static_cast<double>(data.size()) / static_cast<double>(compressed_data.size());

  std::vector<uint8_t> decompressed_data(data);
  while (state.KeepRunning()) {
    auto result = codec->Decompress(compressed_data.size(), compressed_data.data(),
                                    decompressed_data.size(), decompressed_data.data());
    NEBULA_CHECK(result.ok());
    NEBULA_CHECK(*result == static_cast<int64_t>(decompressed_data.size()));
  }
  state.SetBytesProcessed(state.iterations() * data.size());
}

BENCHMARK_TEMPLATE(ReferenceStreamingCompression, CompressionType::GZIP);
BENCHMARK_TEMPLATE(ReferenceCompression, CompressionType::GZIP);
BENCHMARK_TEMPLATE(ReferenceStreamingDecompression, CompressionType::GZIP);
BENCHMARK_TEMPLATE(ReferenceDecompression, CompressionType::GZIP);

BENCHMARK_TEMPLATE(ReferenceStreamingCompression, CompressionType::BROTLI);
BENCHMARK_TEMPLATE(ReferenceCompression, CompressionType::BROTLI);
BENCHMARK_TEMPLATE(ReferenceStreamingDecompression, CompressionType::BROTLI);
BENCHMARK_TEMPLATE(ReferenceDecompression, CompressionType::BROTLI);

BENCHMARK_TEMPLATE(ReferenceStreamingCompression, CompressionType::ZSTD);
BENCHMARK_TEMPLATE(ReferenceCompression, CompressionType::ZSTD);
BENCHMARK_TEMPLATE(ReferenceStreamingDecompression, CompressionType::ZSTD);
BENCHMARK_TEMPLATE(ReferenceDecompression, CompressionType::ZSTD);

BENCHMARK_TEMPLATE(ReferenceStreamingCompression, CompressionType::LZ4_FRAME);
BENCHMARK_TEMPLATE(ReferenceCompression, CompressionType::LZ4_FRAME);
BENCHMARK_TEMPLATE(ReferenceStreamingDecompression, CompressionType::LZ4_FRAME);
BENCHMARK_TEMPLATE(ReferenceDecompression, CompressionType::LZ4_FRAME);

BENCHMARK_TEMPLATE(ReferenceCompression, CompressionType::LZ4);
BENCHMARK_TEMPLATE(ReferenceDecompression, CompressionType::LZ4);

BENCHMARK_TEMPLATE(ReferenceCompression, CompressionType::SNAPPY);
BENCHMARK_TEMPLATE(ReferenceDecompression, CompressionType::SNAPPY);

#endif

}  // namespace nebula::util
