// Copyright 2022, Roman Gershman.  All rights reserved.
// See LICENSE for licensing terms.
//

#include "server/tiered_storage.h"

#include <absl/strings/str_cat.h>
#include <gmock/gmock.h>
#include <gtest/gtest.h>

#include "absl/flags/internal/flag.h"
#include "absl/flags/reflection.h"
#include "base/flags.h"
#include "base/logging.h"
#include "facade/facade_test.h"
#include "gtest/gtest.h"
#include "server/engine_shard_set.h"
#include "server/test_utils.h"
#include "util/fibers/fibers.h"

using namespace std;
using namespace testing;
using namespace util;

ABSL_DECLARE_FLAG(bool, force_epoll);
ABSL_DECLARE_FLAG(string, tiered_prefix);
ABSL_DECLARE_FLAG(float, tiered_offload_threshold);
ABSL_DECLARE_FLAG(float, tiered_upload_threshold);
ABSL_DECLARE_FLAG(unsigned, tiered_storage_write_depth);
ABSL_DECLARE_FLAG(bool, tiered_experimental_cooling);

namespace dfly {

using absl::GetFlag;
using absl::SetFlag;

string BuildString(size_t len, char c = 'A') {
  return string(len, c);
}

class TieredStorageTest : public BaseFamilyTest {
 protected:
  TieredStorageTest() {
    num_threads_ = 1;
  }

  void SetUp() override {
    if (GetFlag(FLAGS_force_epoll)) {
      LOG(WARNING) << "Can't run tiered tests on EPOLL";
      exit(0);
    }

    SetFlag(&FLAGS_tiered_storage_write_depth, 15000);
    if (GetFlag(FLAGS_tiered_prefix).empty()) {
      SetFlag(&FLAGS_tiered_prefix, "/tmp/tiered_storage_test");
    }

    BaseFamilyTest::SetUp();
  }

  void UpdateFromFlags() {
    pp_->at(0)->AwaitBrief([] { EngineShard::tlocal()->tiered_storage()->UpdateFromFlags(); });
  }
};

// Test that should run with both modes of "cooling"
class LatentCoolingTSTest : public TieredStorageTest, public testing::WithParamInterface<bool> {
  void SetUp() override {
    fs.emplace();
    SetFlag(&FLAGS_tiered_experimental_cooling, GetParam());
    TieredStorageTest::SetUp();
  }

  optional<absl::FlagSaver> fs;
};

INSTANTIATE_TEST_SUITE_P(TS, LatentCoolingTSTest, testing::Values(true, false));

// Disabled cooling and all values are offloaded
class PureDiskTSTest : public TieredStorageTest {
  void SetUp() override {
    fs.emplace();
    SetFlag(&FLAGS_tiered_offload_threshold, 1.0);
    SetFlag(&FLAGS_tiered_experimental_cooling, false);
    TieredStorageTest::SetUp();
  }

  optional<absl::FlagSaver> fs;
};

// Perform simple series of SET, GETSET and GET
TEST_P(LatentCoolingTSTest, SimpleGetSet) {
  absl::FlagSaver saver;
  SetFlag(&FLAGS_tiered_offload_threshold, 0.0f);  // disable offloading
  UpdateFromFlags();

  const int kMin = 256;
  const int kMax = tiering::kPageSize + 10;

  // Perform SETs
  for (size_t i = kMin; i < kMax; i++) {
    Run({"SET", absl::StrCat("k", i), BuildString(i)});
  }

  // Make sure all entries were stashed, except the one not filling a small page
  size_t stashes = 0;
  ExpectConditionWithinTimeout([this, &stashes] {
    stashes = GetMetrics().tiered_stats.total_stashes;
    return stashes >= kMax - kMin - 1;
  });

  // All entries were accounted for except that one (see comment above)
  auto metrics = GetMetrics();
  EXPECT_EQ(metrics.db_stats[0].tiered_entries, kMax - kMin - 1);
  EXPECT_LE(metrics.db_stats[0].tiered_used_bytes, (kMax - 1 + kMin) * (kMax - kMin) / 2 - 2047);

  // Perform GETSETs
  for (size_t i = kMin; i < kMax; i++) {
    auto resp = Run({"GETSET", absl::StrCat("k", i), string(i, 'B')});
    ASSERT_EQ(resp, BuildString(i)) << i;
  }

  // Perform GETs
  for (size_t i = kMin; i < kMax; i++) {
    auto resp = Run({"GET", absl::StrCat("k", i)});
    ASSERT_EQ(resp, string(i, 'B')) << i;
    Run({"GET", absl::StrCat("k", i)});  // To enforce uploads.
  }

  metrics = GetMetrics();
  EXPECT_EQ(metrics.db_stats[0].tiered_entries, 0);
  EXPECT_EQ(metrics.db_stats[0].tiered_used_bytes, 0);
}

// Use MGET to load multiple offloaded values
TEST_P(LatentCoolingTSTest, MGET) {
  vector<string> command = {"MGET"}, values = {};
  for (char key = 'A'; key <= 'Z'; key++) {
    command.emplace_back(1, key);
    values.emplace_back(3000, key);
    Run({"SET", command.back(), values.back()});
  }

  ExpectConditionWithinTimeout(
      [this, &values] { return GetMetrics().tiered_stats.total_stashes >= values.size(); });

  auto resp = Run(absl::MakeSpan(command));
  auto elements = resp.GetVec();
  for (size_t i = 0; i < elements.size(); i++)
    EXPECT_EQ(elements[i], values[i]);
}

// Issue many APPEND commands to an offloaded value that are executed at once (with CLIENT PAUSE).
// They should all finish within the same io completion loop.
TEST_F(TieredStorageTest, AppendStorm) {
  const size_t kAppends = 20;

  absl::FlagSaver saver;
  absl::SetFlag(&FLAGS_tiered_offload_threshold, 1.0);
  absl::SetFlag(&FLAGS_tiered_upload_threshold, 0.0);
  absl::SetFlag(&FLAGS_tiered_experimental_cooling, false);
  UpdateFromFlags();

  // Offload single value
  string base_value(4096, 'a');
  Run({"SET", "key", base_value});
  ExpectConditionWithinTimeout([this] { return GetMetrics().tiered_stats.total_stashes == 1; });

  // Accumulate APPENDs
  Run({"CLIENT", "pause", "1000"});
  vector<Fiber> fibs;
  for (size_t i = 0; i < kAppends; i++) {
    fibs.emplace_back(pp_->at(0)->LaunchFiber([this, i] {
      Run(absl::StrCat(i), {"APPEND", "key", string(96, 'b')});
    }));
  }

  // Throw in a SETRANGE
  fibs.emplace_back(pp_->at(0)->LaunchFiber([this] {
    Run("range", {"SETRANGE", "key", "0", string(96, 'x')});
  }));

  // Throw in a GETRANGE to a range that keeps constant
  string get_range;
  fibs.emplace_back(pp_->at(0)->LaunchFiber([this, &get_range] {
    get_range = Run("get", {"GETRANGE", "key", "96", "191"}).GetString();
  }));

  // Unlock and wait
  Run({"CLIENT", "unpause"});
  for (auto& f : fibs)
    f.JoinIfNeeded();

  // Check partial result is right
  EXPECT_EQ(get_range, string(96, 'a'));

  // Get value and verify it
  auto value = Run({"GET", "key"});
  EXPECT_EQ(value, string(96, 'x') + string(4000, 'a') + string(kAppends * 96, 'b'));

  // Check value was read no more than once for APPENDs and once for GET
  auto metrics = GetMetrics();
  EXPECT_LE(metrics.tiered_stats.total_fetches, 2u);
  EXPECT_LE(metrics.tiered_stats.total_uploads, 2u);
}

// SETRANGE and GETRANGE
TEST_P(LatentCoolingTSTest, Ranges) {
  Run({"SET", "key", string(3000, 'a')});
  ExpectConditionWithinTimeout([this] { return GetMetrics().tiered_stats.total_stashes >= 1; });

  Run({"SETRANGE", "key", "1000", string(1000, 'b')});
  auto resp = Run({"GET", "key"});
  EXPECT_EQ(resp, string(1000, 'a') + string(1000, 'b') + string(1000, 'a'));

  Run({"DEL", "key"});
  Run({"SET", "key", string(1500, 'c') + string(1500, 'd')});
  ExpectConditionWithinTimeout([this] { return GetMetrics().tiered_stats.total_stashes >= 2; });

  resp = Run({"GETRANGE", "key", "1000", "1999"});
  EXPECT_EQ(resp, string(500, 'c') + string(500, 'd'));
}

// Stash values from different databases and read them back
TEST_P(LatentCoolingTSTest, MultiDb) {
  for (size_t i = 0; i < 10; i++) {
    Run({"SELECT", absl::StrCat(i)});
    Run({"SET", absl::StrCat("k", i), BuildString(3000, char('A' + i))});
  }

  ExpectConditionWithinTimeout([this] { return GetMetrics().tiered_stats.total_stashes >= 10; });

  for (size_t i = 0; i < 10; i++) {
    Run({"SELECT", absl::StrCat(i)});
    EXPECT_EQ(GetMetrics().db_stats[i].tiered_entries, 1);
    string key = absl::StrCat("k", i);
    EXPECT_EQ(Run({"GET", key}), BuildString(3000, char('A' + i)));
    Run({"GET", key});
    EXPECT_EQ(GetMetrics().db_stats[i].tiered_entries, 0);
  }
}

// Trigger defragmentation
TEST_F(TieredStorageTest, Defrag) {
  for (char k = 'a'; k < 'a' + 8; k++) {
    Run({"SET", string(1, k), string(600, k)});
  }

  ExpectConditionWithinTimeout([this] { return GetMetrics().tiered_stats.total_stashes >= 1; });

  // 7 out 8 are in one bin, the last one made if flush and is now filling
  auto metrics = GetMetrics();
  ASSERT_EQ(metrics.tiered_stats.small_bins_cnt, 1u);
  ASSERT_EQ(metrics.tiered_stats.small_bins_entries_cnt, 7u);

  // Distorted due to encoded values.
  ASSERT_EQ(metrics.tiered_stats.small_bins_filling_bytes, 537);

  // Reading 3 values still leaves the bin more than half occupied
  for (unsigned j = 0; j < 2; ++j) {
    Run({"GET", string(1, 'a')});
    Run({"GET", string(1, 'b')});
    Run({"GET", string(1, 'c')});
  }
  metrics = GetMetrics();
  EXPECT_EQ(metrics.tiered_stats.small_bins_cnt, 1u);
  EXPECT_EQ(metrics.tiered_stats.small_bins_entries_cnt, 4u);

  // This tirggers defragmentation, as only 3 < 7/2 remain left
  Run({"GET", string(1, 'd')});

  // Wait that any reads caused by defrags has been finished.
  ExpectConditionWithinTimeout([this] { return GetMetrics().tiered_stats.pending_read_cnt == 0; });
  metrics = GetMetrics();
  EXPECT_EQ(metrics.tiered_stats.total_defrags, 3u);
  EXPECT_EQ(metrics.tiered_stats.small_bins_cnt, 0u);
  EXPECT_EQ(metrics.tiered_stats.allocated_bytes, 0u);
}

TEST_F(PureDiskTSTest, BackgroundOffloading) {
  absl::FlagSaver saver;
  SetFlag(&FLAGS_tiered_upload_threshold, 0.0f);  // upload all values
  UpdateFromFlags();

  const int kNum = 500;

  max_memory_limit = kNum * 4096;

  // Stash all values
  string value = BuildString(3000);
  for (size_t i = 0; i < kNum; i++) {
    Run({"SETEX", absl::StrCat("k", i), "100", value});
  }

  ExpectConditionWithinTimeout([&] { return GetMetrics().db_stats[0].tiered_entries == kNum; });
  ASSERT_EQ(GetMetrics().tiered_stats.total_stashes, kNum);
  ASSERT_EQ(GetMetrics().db_stats[0].tiered_entries, kNum);

  // Trigger re-fetch and test TTL is preserved.
  for (size_t i = 0; i < kNum; i++) {
    string key = absl::StrCat("k", i);
    auto resp = Run({"TTL", key});
    EXPECT_THAT(resp, IntArg(100));

    resp = Run({"GET", key});
    EXPECT_EQ(resp, value);
    resp = Run({"TTL", key});
    EXPECT_THAT(resp, IntArg(100));
    Run({"GET", key});  // enforce uploads
  }

  // Wait for offload to do it all again
  ExpectConditionWithinTimeout([&] { return GetMetrics().db_stats[0].tiered_entries == kNum; });
  auto resp = Run({"INFO", "ALL"});
  VLOG(1) << "INFO " << resp.GetString();
  auto metrics = GetMetrics();

  // Not all values were necessary uploaded during GET calls, but all that were uploaded
  // should be re-stashed again.
  EXPECT_EQ(metrics.tiered_stats.total_stashes, kNum + metrics.tiered_stats.total_uploads)
      << resp.GetString();
  EXPECT_EQ(metrics.tiered_stats.allocated_bytes, kNum * 4096);
}

// Verify correctness of our offloading startegy, offloading values only after second access.
TEST_F(PureDiskTSTest, OffloadingStrategy) {
  // Create value and wait to be offlaoded
  string value = BuildString(3000);
  Run({"set", "key", value});
  ExpectConditionWithinTimeout([&] { return GetMetrics().db_stats[0].tiered_entries == 1; });

  // Check base values
  auto metrics = GetMetrics();
  EXPECT_EQ(metrics.tiered_stats.total_fetches, 0);
  EXPECT_EQ(metrics.tiered_stats.total_uploads, 0);
  EXPECT_EQ(metrics.tiered_stats.total_stashes, 1);

  // Repeat a few times
  for (size_t i = 1; i <= 3; i++) {
    // Value is not uploaded after first read
    Run({"get", "key"});
    metrics = GetMetrics();
    EXPECT_EQ(metrics.tiered_stats.total_fetches, 2 * i - 1);
    EXPECT_EQ(metrics.tiered_stats.total_uploads, i - 1);

    // But on second read upload should happend at the end of chain due to two touches
    Run({"get", "key"});
    ExpectConditionWithinTimeout([&] { return GetMetrics().tiered_stats.total_uploads == i; });
    metrics = GetMetrics();
    EXPECT_EQ(metrics.tiered_stats.total_fetches, 2 * i);

    // Wait for offloading again
    ExpectConditionWithinTimeout([&] { return GetMetrics().db_stats[0].tiered_entries == 1; });
    metrics = GetMetrics();
    EXPECT_EQ(metrics.tiered_stats.total_offloading_stashes, i);
    EXPECT_EQ(metrics.tiered_stats.total_stashes, i + 1);
  }
}

// Test FLUSHALL while reading entries
TEST_F(PureDiskTSTest, FlushAll) {
  const int kNum = 500;
  for (size_t i = 0; i < kNum; i++) {
    Run({"SET", absl::StrCat("k", i), BuildString(3000)});
  }
  ExpectConditionWithinTimeout([&] { return GetMetrics().db_stats[0].tiered_entries == kNum; });

  // Start reading random entries
  atomic_bool done = false;
  auto reader = pp_->at(0)->LaunchFiber([&] {
    while (!done) {
      Run("reader", {"GET", absl::StrCat("k", rand() % kNum)});
      util::ThisFiber::Yield();
    }
  });

  Metrics metrics;
  ExpectConditionWithinTimeout([&] {
    metrics = GetMetrics();

    // Note that metrics.events.hits is not consistent with total_fetches
    // and it can happen that hits is greater than total_fetches due to in-progress reads.
    return metrics.tiered_stats.total_fetches > 2;
  });
  LOG(INFO) << FormatMetrics(metrics);

  Run({"FLUSHALL"});

  done = true;
  util::ThisFiber::SleepFor(100ms);
  reader.Join();

  metrics = GetMetrics();
  LOG(INFO) << FormatMetrics(metrics);

  EXPECT_EQ(metrics.db_stats.front().tiered_entries, 0u);
}

// Check FLUSHALL clears filling bytes of small bins
TEST_F(TieredStorageTest, FlushPending) {
  absl::FlagSaver saver;
  SetFlag(&FLAGS_tiered_offload_threshold, 1.0f);  // offload all values

  const int kNum = 10;
  for (size_t i = 0; i < kNum; i++) {
    Run({"SET", absl::StrCat("k", i), BuildString(256)});
  }
  ExpectConditionWithinTimeout(
      [&] { return GetMetrics().tiered_stats.small_bins_filling_bytes > 0; });
  Run({"FLUSHALL"});
  EXPECT_EQ(GetMetrics().tiered_stats.small_bins_filling_bytes, 0u);
}

// Test that clients are throttled if many stashes are issued.
// Stashes are released with CLIENT UNPAUSE to occur at the same time
TEST_F(PureDiskTSTest, ThrottleClients) {
  absl::FlagSaver saver;
  absl::SetFlag(&FLAGS_tiered_upload_threshold, 0.0);
  UpdateFromFlags();

  // issue client pause to accumualte SETs
  Run({"CLIENT", "PAUSE", "1000"});

  string value(4096, 'a');
  vector<Fiber> fibs;
  for (size_t i = 0; i < 100; i++) {
    fibs.emplace_back(pp_->at(0)->LaunchFiber([this, i, &value] {
      string key = absl::StrCat("k", i);
      Run(key, {"SET", key, value});
    }));
  }
  ThisFiber::Yield();

  // Unpause
  Run({"CLIENT", "UNPAUSE"});

  // Check if at least some of the clients were caugth throttling
  // but we provided backpressure for all of them
  auto metrics = GetMetrics();
  EXPECT_GT(metrics.tiered_stats.clients_throttled, fibs.size() / 10);
  EXPECT_EQ(metrics.tiered_stats.total_clients_throttled, fibs.size());

  for (auto& fib : fibs)
    fib.JoinIfNeeded();

  // Because of the 5ms max wait time for backpressure, we can't rely on the stashes to have
  // finished even after all the fibers joined, so expect the condition with a timeout
  ExpectConditionWithinTimeout(
      [&] { return GetMetrics().tiered_stats.total_stashes == fibs.size(); });
}

TEST_F(TieredStorageTest, Expiry) {
  string val = BuildString(100);
  Run({"psetex", "key1", "1", val});
  AdvanceTime(10);
  Run({"psetex", "key1", "1", val});
  auto resp = Run({"get", "key1"});
  EXPECT_EQ(resp, val);
}

TEST_F(PureDiskTSTest, SetExistingExpire) {
  const int kNum = 20;
  for (size_t i = 0; i < kNum; i++) {
    Run({"SETEX", absl::StrCat("k", i), "100", BuildString(256)});
  }
  ExpectConditionWithinTimeout([&] { return GetMetrics().tiered_stats.total_stashes > 1; });

  for (size_t i = 0; i < kNum; i++) {
    Run({"SETEX", absl::StrCat("k", i), "100", BuildString(256)});
  }

  for (size_t i = 0; i < kNum; i++) {
    auto resp = Run({"TTL", absl::StrCat("k", i)});
    EXPECT_THAT(resp, IntArg(100));
  }
}

TEST_F(PureDiskTSTest, Dump) {
  const int kNum = 10;
  for (size_t i = 0; i < kNum; i++) {
    Run({"SET", absl::StrCat("k", i), BuildString(3000)});  // big enough to trigger offloading.
  }

  ExpectConditionWithinTimeout([&] { return GetMetrics().tiered_stats.total_stashes == kNum; });

  auto resp = Run({"DUMP", "k0"});
  EXPECT_THAT(Run({"del", "k0"}), IntArg(1));
  resp = Run({"restore", "k0", "0", facade::ToSV(resp.GetBuf())});
  EXPECT_EQ(resp, "OK");
}

}  // namespace dfly
