#pragma once

#include "CachePolicy.h"

#include <atomic>
#include <cmath>
#include <cstddef>
#include <cstring>
#include <memory>
#include <mutex>
#include <thread>
#include <unordered_map>
#include <vector>

namespace Cache {

template <typename Key, typename Value>
class LruCache;

template <typename Key, typename Value>
class LruNode {
  private:
    Key key_;
    Value val_;
    std::atomic<int> accessCnt_;
    std::weak_ptr<LruNode<Key, Value>> prev_{};           // weak_ptr{nullptr} 错误
    std::shared_ptr<LruNode<Key, Value>> next_{nullptr};  // shared_ptr{nullptr}

  public:
    LruNode(Key key, Value val) : key_(key), val_(val), accessCnt_(1) {}
    Key getKey() const { return key_; }
    Value getValue() const { return val_; }
    void setValue(const Value &val) { val_ = val; }
    //! C++17标准属性，提示编译器，函数的返回值不应该被忽略
    [[nodiscard]] size_t getAccessCnt() const { return accessCnt_; }
    void incrementAccessCnt() { ++accessCnt_; }

    friend class LruCache<Key, Value>;
};

template <typename Key, typename Value>
class LruCache : public CachePolicy<Key, Value> {
  private:
    using LruNodeType = LruNode<Key, Value>;
    using NodePtr = std::shared_ptr<LruNodeType>;
    using NodeMap = std::unordered_map<Key, NodePtr>;

  public:
    //! 单形参的 ctor 最好添加 explicit
    explicit LruCache(int capacity) : capacity_(capacity) { initializeList(); }
    ~LruCache() override = default;

    void put(Key key, Value val) override {
        if (capacity_ <= 0)
            return;
        std::lock_guard<std::mutex> lock(mtx_);
        auto it = nodeMap_.find(key);
        if (it != nodeMap_.end()) {
            updateExistingNode(it->second, val);
            return;
        }

        addNewNode(key, val);
    }

    bool get(Key key, Value &val) override {
        std::lock_guard<std::mutex> lock(mtx_);
        auto it = nodeMap_.find(key);
        if (it != nodeMap_.end()) {
            moveToMostRecent(it->second);
            val = it->second->getValue();
            return true;
        }
        return false;
    }

    Value get(Key key) override {
        Value val{};
        get(key, val);
        return val;
    }

    void remove(Key key) {
        std::lock_guard<std::mutex> lock(mtx_);
        auto it = nodeMap_.find(key);
        if (it != nodeMap_.end()) {
            removeNode(it->second);
            nodeMap_.erase(key);
        }
    }

  private:
    void initializeList() {
        dummyHead_ = std::make_shared<LruNodeType>(Key(), Value());  // 调用默认构造函数
        dummyTail_ = std::make_shared<LruNodeType>(Key(), Value());
        dummyHead_->next_ = dummyTail_;
        dummyTail_->prev_ = dummyHead_;
    }

    void updateExistingNode(NodePtr node, Value &val) {
        node->setValue(val);
        moveToMostRecent(node);
    }

    void addNewNode(const Key &key, const Value &value) {
        if (nodeMap_.size() >= capacity_)
            evictLeastRecent();
        NodePtr newNode = std::make_shared<LruNodeType>(key, value);
        insertNode(newNode);
        nodeMap_[key] = newNode;
    }

    void moveToMostRecent(NodePtr node) {
        removeNode(node);
        insertNode(node);
    }

    void removeNode(NodePtr node) {
        // expired() return _M_refcount._M_get_use_count() == 0
        if (!node->prev_.expired() && node->next_) {
            auto prev = node->prev_.lock();
            prev->next_ = node->next_;
            node->next_->prev_ = prev;
            node->next_ = nullptr;
        }
    }

    // 尾插
    void insertNode(NodePtr node) {
        node->next_ = dummyTail_;
        node->prev_ = dummyTail_->prev_;
        //! smart_ptr 不能直接解引用
        dummyTail_->prev_.lock()->next_ = node;  // 使用lock()获取shared_ptr
        dummyTail_->prev_ = node;
    }

    // 删除最近最少访问节点
    void evictLeastRecent() {
        NodePtr leastRecent = dummyHead_->next_;
        removeNode(leastRecent);
        nodeMap_.erase(leastRecent->getKey());
    }

  private:
    int capacity_;
    NodeMap nodeMap_;
    std::mutex mtx_;
    NodePtr dummyHead_;
    NodePtr dummyTail_;
};

template <typename Key, typename Value>
class LruKCache : public LruCache<Key, Value> {
  public:
    LruKCache(int capacity, int historyCapacity, const int k)
        : k_(k)
        , historyList_(std::make_unique<LruCache<Key, size_t>>(historyCapacity))
        , LruCache<Key, Value>(capacity) {}

    Value get(Key key) override {
        Value val{};

        if (LruCache<Key, Value>::get(key, val)) {
            return val;
        }

        size_t historyAccessCnt = historyList_->get(key);  //! historyList_ 中没有 key 返回 0
        ++historyAccessCnt;
        historyList_->put(key, historyAccessCnt);

        if (historyAccessCnt >= k_) {
            auto it = historyValMap_.find(key);
            if (it != historyValMap_.end()) {
                Value storedVal = it->second;

                historyList_->remove(key);
                historyValMap_.erase(it);

                LruCache<Key, Value>::put(key, storedVal);

                return storedVal;
            }
        }

        return val;
    }

    void put(Key key, Value val) override {
        Value existingVal{};

        if (LruCache<Key, Value>::get(key, existingVal)) {
            LruCache<Key, Value>::put(key, val);
            return;
        }

        size_t historyAccessCnt = historyList_->get(key);
        ++historyAccessCnt;
        historyList_->put(key, historyAccessCnt);

        historyValMap_[key] = val;

        if (historyAccessCnt >= k_) {
            historyList_->remove(key);
            historyValMap_.erase(key);
            LruCache<Key, Value>::put(key, val);
        }
    }

  private:
    int k_;
    std::unique_ptr<LruCache<Key, size_t>> historyList_;  // 访问数据历史记录<key, 访问次数>
    std::unordered_map<Key, Value> historyValMap_;        // 存储未达到 k 次访问的数据值
};

template <typename Key, typename Value>
class HashLruCaches : public CachePolicy<Key, Value> {
  public:
    HashLruCaches(size_t capacity, const unsigned int sliceNum)
        : capacity_(capacity)
        , sliceNum_(sliceNum > 0 ? sliceNum : std::thread::hardware_concurrency())  //! 操作系统可并发执行的线程数
    {
        size_t sliceSize = std::ceil(capacity / static_cast<double>(sliceNum_));  //! 获取每个分片大小，向上取整
        for (int i = 0; i < sliceNum; ++i) {
            lruSliceCaches_.emplace_back(new LruCache<Key, Value>(sliceSize));
        }
    }

    void put(Key key, Value val) override {
        size_t sliceIdx = Hash(key) % sliceNum_;
        lruSliceCaches_[sliceIdx]->put(key, val);
    }

    bool get(Key key, Value &val) override {
        size_t sliceIdx = Hash(key) % sliceNum_;
        return lruSliceCaches_[sliceIdx]->get(key, val);
    }

    Value get(Key key) override {
        Value val{};
        memset(&val, 0, sizeof(val));
        get(key, val);
        return val;
    }

  private:
    size_t Hash(Key key) {
        std::hash<Key> hashFunc;  //! 定义一个哈希函数对象，可对常见对象直接计算哈希值
        return hashFunc(key);
    }

  private:
    size_t capacity_{0};                                                 // 总容量
    unsigned int sliceNum_{0};                                           // 切片数量
    std::vector<std::unique_ptr<LruCache<Key, Value>>> lruSliceCaches_;  // 切片缓存
};
}  // namespace Cache