// *****************************************************
// >> File Name: /home/diabio/project/al_cache/al_cache/lru_cache.h
// >> Author: diabiolin
// >> Created Time: Wed 16 Apr 2025 12:58:43 PM CST
// >> Description:lru algorithm 
// *****************************************************
#pragma once 
#include "cache_policy.h"
#include <unordered_map>
#include <mutex>
#include <memory>
#include <cmath>
#include <thread>
#include <vector>

namespace al_cache{

// 前向声明
template <typename Key, typename Value> class LruCache;

template <typename Key, typename Value> 
class LruNode{
public:
	LruNode(Key key, Value value)
		: key_(key)
		, value_(value)
		, accessCount_(1)
		, prev_()
		, next_(nullptr){}

	// 一些访问器
	Key getKey() const { return  key_; }
	Value getValue() const { return value_; }
	void setValue(const Value& value) { value_ = value; }
	size_t getAccessCount() const { return accessCount_; }
	void incrementAccessCount() { accessCount_++; }

	friend class LruCache<Key, Value>;

private:
	Key key_;
	Value value_;
	size_t accessCount_;  // 访问次数

	// 双向链表
	std::weak_ptr<LruNode<Key, Value>> prev_;  // 使用弱指针，避免循环引用
	std::shared_ptr<LruNode<Key, Value>> next_;
};


template <typename Key, typename Value>
class LruCache : public CachePolicy<Key, Value>{
public:
	using LruNodeType = LruNode<Key, Value>;
	using NodePtr = std::shared_ptr<LruNodeType>;  
	using NodeMap = std::unordered_map<Key, NodePtr>;

	LruCache(int capacity)
		: capacity_(capacity)
		{
			initializeList();
		}

	~LruCache(){
		nodeMap_.clear();
		dummyHead_->next_ = nullptr;
		dummyTail_->prev_.reset();
		dummyHead_ = nullptr;
		dummyTail_ = nullptr;
	}
	

	void put(Key key, Value value) override {
		if(capacity_ <= 0){
			return;
		}
		std::lock_guard<std::mutex> lock(mutex_);
		auto it = nodeMap_.find(key);
		if(it != nodeMap_.end()){
			updateExitingNode(it->second, value);
		}else{
			addNewNode(key, value);
		}
	}	

	bool get(Key key, Value &value) override {
		std::lock_guard<std::mutex> lock(mutex_);
		auto it = nodeMap_.find(key);
		if(it != nodeMap_.end()){
			moveToMostRecent(it->second);
			value = it->second->getValue();	
			return true;
		}
		return false;	
	} 
	
	Value get(Key key) override {
		Value value{};
		get(key, value);
		return value;
	}

	// 后面在lruk删除访问记录会用到
	void remove(Key key){
		std::lock_guard<std::mutex> lock(mutex_);
		auto it = nodeMap_.find(key);
		if(it != nodeMap_.end()){
			removeNode(it->second);
			nodeMap_.erase(it);
		}
	}

private:
	int capacity_;  // 缓存容量
	NodeMap nodeMap_;  // key到结点的映射
	std::mutex mutex_;  // 互斥锁
	NodePtr dummyHead_; // 虚拟头结点
	NodePtr dummyTail_; // 虚拟尾结点

	// 私有方法
	// 初始化缓存
	void initializeList(){
		// 创建并初始化虚拟头节点
		dummyHead_ = std::make_shared<LruNodeType>(Key(), Value());
		dummyTail_ = std::make_shared<LruNodeType>(Key(), Value());
		dummyHead_->next_ = dummyTail_;
		dummyTail_->prev_ = dummyHead_;
	}

	// 更新最近访问结点
	void updateExitingNode(NodePtr node, const Value& value){
		node->setValue(value);
		moveToMostRecent(node);
	}

	// 添加新结点
	void addNewNode(const Key& key, const Value& value){
		if(nodeMap_.size() >= capacity_){
			evictLeastRecent();
		}
		NodePtr newNode = std::make_shared<LruNodeType>(key, value);
		insertNode(newNode);
		nodeMap_[key] = newNode;
	}

	// 插入结点（尾插）
	void insertNode(NodePtr node){
		auto prev = dummyTail_->prev_.lock();
		if(prev){
			node->prev_ = prev;
			node->next_ = dummyTail_;
			prev->next_ = node;
			dummyTail_->prev_ = node;
		}
		// node->next_ = dummyTail_;
		// node->prev_ = dummyTail_->prev_;
		// dummyTail_->prev_->next_ = node;
		// dummyTail_->prev_ = node;
	}

	// 将结点移动到最新位置
	void moveToMostRecent(NodePtr node){
		removeNode(node);
		insertNode(node);
	}

	// 删除节点
	void removeNode(NodePtr node){
		auto prev = node->prev_.lock();
		if(prev){
			prev->next_ = node->next_;
		}
		if(node->next_){
			node->next_->prev_ = prev;
		}
		// node->prev_->next_ = node->next_;
		// node->next_->prev_ = node->prev_;
	}

	// 移除最早访问结点
	void evictLeastRecent(){
		NodePtr leastRecent = dummyHead_->next_;
		removeNode(leastRecent);
		nodeMap_.erase(leastRecent->getKey());
	}
};

// lru-k 继承至lru
template <typename Key, typename Value>
class LruKCache : public LruCache<Key, Value>{
public:
	LruKCache(int capacity, int historyCapacity, int k)
		: LruCache<Key, Value>(capacity)
		, historyList_(std::make_unique<LruCache<Key, size_t>>(historyCapacity))
		, k_(k)
		{}

	void put(Key key, Value value) override {	
		Value temp{}; // 只是用来当个参数方便使用 bool get()函数 
		if(LruCache<Key, Value>::get(key, temp)){
			LruCache<Key, Value>::put(key, value);
		}
		
		// 增加访问次数
		int historyCount = historyList_->get(key);
		historyList_->put(key, ++historyCount);
		
		if(historyCount >= k_){
			historyList_->remove(key);  // 移除历史记录
			LruCache<Key, Value>::put(key, value);  // 添加进缓存
		}
	}

	Value get(Key key) override {
		int historyCount = historyList_->get(key);
		historyList_->put(key, ++historyCount);
		return LruCache<Key, Value>::get(key);
	}

private:
	size_t k_; 
	std::unique_ptr<LruCache<Key, size_t>> historyList_;  // 访问频率记录
};


// hash-lru 对lru进行分片，降低锁粒度
template <typename Key, typename Value>
class HashLruCache : public CachePolicy<Key, Value>{
public:
	HashLruCache(size_t capacity, int sliceNum)
		: capacity_(capacity)
		, sliceNum_(sliceNum > 0 ? sliceNum : std::thread::hardware_concurrency())
		{
			size_t sliceSize = std::ceil(capacity / static_cast<double>(sliceNum_));
			for(int i = 0; i < sliceNum_; i++){
				// lruSliceCaches_.emplace_back(new LruCache<Key, Value>(sliceSize));
				lruSliceCaches_.emplace_back(std::make_unique<LruCache<Key, Value>>(sliceSize));
			}
		}

	void put(Key key, Value value) override {
		size_t sliceIndex = Hash(key) % sliceNum_;
		lruSliceCaches_[sliceIndex]->put(key, value);
	}

	bool get(Key key, Value &value)override{
		size_t sliceIndex = Hash(key) % sliceNum_;
		return lruSliceCaches_[sliceIndex]->get(key, value);
	}

	Value get(Key key) override {
		Value value{};
		get(key, value);
		return value;
	}

	void remove(Key key){
		size_t sliceIndex = Hash(key) % sliceNum_;
		return lruSliceCaches_[sliceIndex]->remove(key);
	}

private:
	size_t capacity_;  // 缓存容量 
	int sliceNum_;  // 切片数量
	std::vector<std::unique_ptr<LruCache<Key, Value>>> lruSliceCaches_; // 切片LRU缓存

	// 私有方法 
	size_t Hash(Key key){
		std::hash<Key> hashFunc;
		return hashFunc(key);
	}
};

// hash-lru-k 
template <typename Key, typename Value>
class HashLruKCache : public HashLruCache<Key, Value>{
public:
	HashLruKCache(size_t capacity, int historyCapacity, int sliceNum, int k)
		: HashLruCache<Key, Value>(capacity, sliceNum)
		, historyList_(std::make_unique<HashLruCache<Key, size_t>>(historyCapacity, sliceNum))
		, k_(k)
		{}
	
	void put(Key key, Value value) override {
		Value temp{}; // 只是用来当个参数方便使用 bool get
		if(HashLruCache<Key, Value>::get(key, temp)){
			HashLruCache<Key, Value>::put(key, value);
		}
		int historyCount = historyList_->get(key);
		historyList_->put(key, ++historyCount);

		if(historyCount >= k_){
			historyList_->remove(key);
			HashLruCache<Key, Value>::put(key, value);
		}
	}

	Value get(Key key) override {
		int historyCount = historyList_->get(key);
		historyList_->put(key, ++historyCount);
		return HashLruCache<Key, Value>::get(key);
	}

private:
	int k_;
	std::unique_ptr<HashLruCache<Key, size_t>> historyList_;
};


} // namespace al_cache
