//扩展能力:系统适配分布式场景，需实现集群成员管理，让多个缓存节点构建成集群，并且可以监控每个节点的状态，感知集群成员状态的变更(如扩容、缩容、意外宕机等)。
//集群规模扩展至10节点，线性扩展比>0.7(吞吐量随节点增长比例);
#pragma once
#include "Kv_Foundation.hpp"
#include "Kv_more.hpp"
#include <memory>
#include <string>
#include <vector>
#include <unordered_map>
#include <unordered_set>
#include <atomic>
#include <thread>
#include <chrono>
#include <mutex>
#include <condition_variable>
#include <algorithm>
#include <random>
#include <future>

namespace Redis_Cluster_Model {
    using namespace std;
    using namespace Redis_Foundation_Model;

    // 集群节点状态
    enum class NodeStatus {
        ONLINE = 0,     // 在线
        OFFLINE = 1,    // 离线
        SUSPECTED = 2,  // 疑似故障
        RECOVERING = 3  // 恢复中
    };

    // 集群节点信息
    struct ClusterNode {
        string node_id;
        string host;
        int port;
        NodeStatus status;
        chrono::steady_clock::time_point last_heartbeat;
        int slot_start;  // 分配的槽位起始
        int slot_end;    // 分配的槽位结束
        bool is_master;  // 是否为主节点
        
        // 节点性能指标
        long long keys_count;
        long long ops_count;
        double memory_usage;
        double cpu_usage;
        
        ClusterNode(const string& id, const string& h, int p, bool master = true)
            : node_id(id), host(h), port(p), status(NodeStatus::ONLINE),
              last_heartbeat(chrono::steady_clock::now()),
              slot_start(-1), slot_end(-1), is_master(master),
              keys_count(0), ops_count(0), memory_usage(0.0), cpu_usage(0.0) {}
    };

    // 集群配置
    struct ClusterConfig {
        int total_slots = 16384;  // Redis集群标准槽位数
        int heartbeat_interval = 5000;  // 心跳检测间隔(ms) - 增加间隔减少负载
        int node_timeout = 15000;       // 节点超时时间(ms)
        int max_retries = 3;            // 最大重试次数
        bool auto_rebalance = true;     // 是否自动重新平衡
    };

    class KvCluster {
    private:
        ClusterConfig _config;
        unordered_map<string, shared_ptr<ClusterNode>> _nodes;
        unordered_map<string, shared_ptr<Redis_Foundation_Model::Redis_Client>> _clients;
        vector<string> _slot_to_node;  // 槽位到节点的映射
        
        // 集群状态
        atomic<bool> _is_running{false};
        atomic<bool> _needs_rebalance{false};
        string _cluster_name;
        
        // 线程控制
        thread _monitor_thread;
        thread _rebalance_thread;
        mutable mutex _cluster_mutex;
        condition_variable _cluster_cv;
        
        // 统计信息
        atomic<long long> _total_requests{0};
        atomic<long long> _successful_requests{0};
        atomic<int> _current_nodes{0};

        // 生成节点ID
        string generateNodeId(const string& host, int port) {
            return host + ":" + to_string(port);
        }

        // 计算键对应的槽位
        int calculateSlot(const string& key) {
            // Redis集群的CRC16算法简化版
            size_t start = key.find('{');
            size_t end = key.find('}');
            string hash_key = key;
            
            if (start != string::npos && end != string::npos && start < end) {
                hash_key = key.substr(start + 1, end - start - 1);
            }
            
            hash<string> hasher;
            size_t hash_value = hasher(hash_key);
            return hash_value % _config.total_slots;
        }

        // 获取负责指定槽位的节点
        shared_ptr<ClusterNode> getNodeBySlot(int slot) {
            if (slot < 0 || slot >= _config.total_slots || _slot_to_node.empty()) {
                return nullptr;
            }
            
            string node_id = _slot_to_node[slot];
            lock_guard<mutex> lock(_cluster_mutex);
            auto it = _nodes.find(node_id);
            return it != _nodes.end() ? it->second : nullptr;
        }

        // 获取负责指定键的节点
        shared_ptr<ClusterNode> getNodeByKey(const string& key) {
            int slot = calculateSlot(key);
            return getNodeBySlot(slot);
        }

        // 获取节点的Redis客户端
        shared_ptr<Redis_Foundation_Model::Redis_Client> getNodeClient(const string& node_id) {
            lock_guard<mutex> lock(_cluster_mutex);
            auto it = _clients.find(node_id);
            return it != _clients.end() ? it->second : nullptr;
        }

        // 创建Redis客户端连接
        shared_ptr<Redis_Foundation_Model::Redis_Client> createRedisClient(const string& host, int port) {
            try {
                Redis_Foundation_Model::Redis_Client::Connection conn_options;
                conn_options.host = host;
                conn_options.port = port;
                conn_options.timeout = 2000;  // 2秒超时
                conn_options.pool_size = 3;   // 较小的连接池
                
                // 注意：这里假设Redis_Client支持多实例
                // 如果Redis_Client是单例模式，需要修改以支持多连接
                if (!Redis_Foundation_Model::Redis_Client::Initialize(conn_options)) {
                    LOG(LogLevel::ERROR) << "Failed to initialize Redis client for " << host << ":" << port;
                    return nullptr;
                }
                
                auto client = Redis_Foundation_Model::Redis_Client::GetInstance();
                
                // 测试连接
                if (!client->ping()) {
                    LOG(LogLevel::ERROR) << "Redis ping failed for " << host << ":" << port;
                    return nullptr;
                }
                
                return client;
            } catch (const exception& e) {
                LOG(LogLevel::ERROR) << "Create Redis client failed for " << host << ":" << port 
                                   << ": " << e.what();
                return nullptr;
            }
        }

        // 初始化槽位分配
        void initializeSlots() {
            LOG(LogLevel::INFO) << "Initializing cluster slots distribution";
            
            _slot_to_node.resize(_config.total_slots, "");
            
            vector<shared_ptr<ClusterNode>> online_nodes;
            {
                lock_guard<mutex> lock(_cluster_mutex);
                for (const auto& [node_id, node] : _nodes) {
                    if (node->status == NodeStatus::ONLINE && node->is_master) {
                        online_nodes.push_back(node);
                    }
                }
            }
            
            if (online_nodes.empty()) {
                LOG(LogLevel::WARNING) << "No online master nodes available for slot allocation";
                return;
            }
            
            LOG(LogLevel::INFO) << "Distributing " << _config.total_slots << " slots among " << online_nodes.size() << " nodes";
            
            // 平均分配槽位
            int slots_per_node = _config.total_slots / online_nodes.size();
            int remaining_slots = _config.total_slots % online_nodes.size();
            
            int current_slot = 0;
            for (size_t i = 0; i < online_nodes.size(); ++i) {
                int node_slots = slots_per_node + (i < remaining_slots ? 1 : 0);
                online_nodes[i]->slot_start = current_slot;
                online_nodes[i]->slot_end = current_slot + node_slots - 1;
                
                for (int j = 0; j < node_slots; ++j) {
                    _slot_to_node[current_slot + j] = online_nodes[i]->node_id;
                }
                
                LOG(LogLevel::DEBUG) << "Node " << online_nodes[i]->node_id << " assigned slots " 
                                   << current_slot << "-" << (current_slot + node_slots - 1);
                
                current_slot += node_slots;
            }
            
            LOG(LogLevel::INFO) << "Slot distribution completed";
        }

        // 监控线程函数
        void monitorThread() {
            LOG(LogLevel::INFO) << "Cluster monitor thread started";
            
            while (_is_running) {
                try {
                    checkNodeHealth();
                    collectNodeMetrics();
                    this_thread::sleep_for(chrono::milliseconds(_config.heartbeat_interval));
                } catch (const exception& e) {
                    LOG(LogLevel::ERROR) << "Monitor thread error: " << e.what();
                    this_thread::sleep_for(chrono::seconds(1));
                }
            }
            
            LOG(LogLevel::INFO) << "Cluster monitor thread stopped";
        }

        // 重新平衡线程函数
        void rebalanceThread() {
            LOG(LogLevel::INFO) << "Cluster rebalance thread started";
            
            while (_is_running) {
                try {
                    unique_lock<mutex> lock(_cluster_mutex);
                    _cluster_cv.wait_for(lock, chrono::seconds(1), [this]() { 
                        return !_is_running || _needs_rebalance; 
                    });
                    
                    if (!_is_running) break;
                    
                    if (_needs_rebalance) {
                        lock.unlock();  // 释放锁，避免performRebalance阻塞
                        performRebalance();
                        lock.lock();
                        _needs_rebalance = false;
                    }
                } catch (const exception& e) {
                    LOG(LogLevel::ERROR) << "Rebalance thread error: " << e.what();
                }
            }
            
            LOG(LogLevel::INFO) << "Cluster rebalance thread stopped";
        }

        // 检查节点健康状态
        void checkNodeHealth() {
            vector<string> offline_nodes;
            vector<string> recovered_nodes;
            
            {
                lock_guard<mutex> lock(_cluster_mutex);
                auto now = chrono::steady_clock::now();
                
                for (const auto& [node_id, node] : _nodes) {
                    auto elapsed = chrono::duration_cast<chrono::milliseconds>(now - node->last_heartbeat);
                    
                    if (elapsed.count() > _config.node_timeout) {
                        if (node->status == NodeStatus::ONLINE) {
                            node->status = NodeStatus::SUSPECTED;
                            LOG(LogLevel::WARNING) << "Node " << node_id << " is suspected offline";
                        } else if (node->status == NodeStatus::SUSPECTED) {
                            node->status = NodeStatus::OFFLINE;
                            offline_nodes.push_back(node_id);
                            LOG(LogLevel::ERROR) << "Node " << node_id << " is confirmed offline";
                        }
                    } else if (node->status != NodeStatus::ONLINE) {
                        // 节点恢复
                        node->status = NodeStatus::ONLINE;
                        recovered_nodes.push_back(node_id);
                        LOG(LogLevel::INFO) << "Node " << node_id << " is back online";
                    }
                }
            }
            
            // 处理离线节点
            for (const auto& node_id : offline_nodes) {
                handleNodeFailure(node_id);
            }
            
            // 处理恢复节点
            for (const auto& node_id : recovered_nodes) {
                handleNodeRecovery(node_id);
            }
        }

        // 收集节点指标
        void collectNodeMetrics() {
            vector<pair<string, shared_ptr<Redis_Foundation_Model::Redis_Client>>> nodes_to_check;
            
            // 快速收集节点信息，不长时间持有锁
            {
                lock_guard<mutex> lock(_cluster_mutex);
                for (const auto& [node_id, node] : _nodes) {
                    if (node->status == NodeStatus::ONLINE) {
                        auto client = _clients[node_id];
                        if (client) {
                            nodes_to_check.emplace_back(node_id, client);
                        }
                    }
                }
            }
            
            // 并行检查节点健康状态
            for (const auto& [node_id, client] : nodes_to_check) {
                try {
                    // 使用简单的ping命令检查节点健康，设置超时
                    auto future = async(launch::async, [client]() {
                        return client->ping();
                    });
                    
                    auto status = future.wait_for(chrono::milliseconds(1000));
                    if (status == future_status::ready && future.get()) {
                        // 更新心跳时间
                        lock_guard<mutex> lock(_cluster_mutex);
                        if (_nodes.find(node_id) != _nodes.end()) {
                            _nodes[node_id]->last_heartbeat = chrono::steady_clock::now();
                        }
                    } else {
                        LOG(LogLevel::WARNING) << "Node " << node_id << " health check failed or timeout";
                    }
                } catch (const exception& e) {
                    LOG(LogLevel::ERROR) << "Failed to check node " << node_id 
                                       << ": " << e.what();
                }
            }
        }

        // 处理节点故障
        void handleNodeFailure(const string& node_id) {
            LOG(LogLevel::WARNING) << "Handling failure of node: " << node_id;
            
            // 标记需要重新平衡
            _needs_rebalance = true;
            _cluster_cv.notify_one();
        }

        // 处理节点恢复
        void handleNodeRecovery(const string& node_id) {
            LOG(LogLevel::INFO) << "Handling recovery of node: " << node_id;
            
            // 标记需要重新平衡
            _needs_rebalance = true;
            _cluster_cv.notify_one();
        }

        // 执行重新平衡
        void performRebalance() {
            LOG(LogLevel::INFO) << "Performing cluster rebalance";
            initializeSlots();
            LOG(LogLevel::INFO) << "Cluster rebalance completed";
        }

        // 重试机制
        template<typename Func, typename... Args>
        auto executeWithRetry(Func&& func, Args&&... args) {
            int retries = 0;
            while (retries <= _config.max_retries) {
                try {
                    _total_requests++;
                    auto result = func(forward<Args>(args)...);
                    _successful_requests++;
                    return result;
                } catch (const exception& e) {
                    retries++;
                    if (retries > _config.max_retries) {
                        LOG(LogLevel::ERROR) << "Operation failed after " << _config.max_retries 
                                           << " retries: " << e.what();
                        throw;
                    }
                    LOG(LogLevel::WARNING) << "Operation failed, retrying (" << retries 
                                         << "/" << _config.max_retries << "): " << e.what();
                    this_thread::sleep_for(chrono::milliseconds(100 * retries));
                }
            }
            throw runtime_error("Max retries exceeded");
        }

    public:
        template<typename T>
        bool setex(const string& key, int seconds, const T& value) {
            return executeWithRetry([this, &key, seconds, &value]() {
                auto node = getNodeByKey(key);
                if (!node) {
                    throw runtime_error("No available node for key: " + key);
                }
                
                auto client = getNodeClient(node->node_id);
                if (!client) {
                    throw runtime_error("No client available for node: " + node->node_id);
                }
                
                return client->setex(key, seconds, value);
            });
        }

        template<typename T>
        bool setnx(const string& key, const T& value) {
            return executeWithRetry([this, &key, &value]() {
                auto node = getNodeByKey(key);
                if (!node) {
                    throw runtime_error("No available node for key: " + key);
                }
                
                auto client = getNodeClient(node->node_id);
                if (!client) {
                    throw runtime_error("No client available for node: " + node->node_id);
                }
                
                return client->setnx(key, value);
            });
        }

        bool expire(const string& key, int seconds) {
            return executeWithRetry([this, &key, seconds]() {
                auto node = getNodeByKey(key);
                if (!node) {
                    throw runtime_error("No available node for key: " + key);
                }
                
                auto client = getNodeClient(node->node_id);
                if (!client) {
                    throw runtime_error("No client available for node: " + node->node_id);
                }
                
                return client->expire(key, seconds);
            });
        }

    public:
        KvCluster(const string& cluster_name = "default_cluster") 
            : _cluster_name(cluster_name) {
            LOG(LogLevel::INFO) << "KvCluster created: " << cluster_name;
        }

        ~KvCluster() {
            stop();
        }

        // 启动集群管理
        bool start() {
            if (_is_running) {
                LOG(LogLevel::WARNING) << "Cluster is already running";
                return false;
            }

            LOG(LogLevel::INFO) << "Starting cluster manager...";
            _is_running = true;
            
            try {
                _monitor_thread = thread(&KvCluster::monitorThread, this);
                _rebalance_thread = thread(&KvCluster::rebalanceThread, this);
                
                LOG(LogLevel::INFO) << "Cluster manager started successfully";
                return true;
            } catch (const exception& e) {
                LOG(LogLevel::ERROR) << "Failed to start cluster manager: " << e.what();
                _is_running = false;
                return false;
            }
        }

        // 停止集群管理
        void stop() {
            if (!_is_running) return;
            
            LOG(LogLevel::INFO) << "Stopping cluster manager...";
            _is_running = false;
            _cluster_cv.notify_all();
            
            if (_monitor_thread.joinable()) {
                _monitor_thread.join();
            }
            if (_rebalance_thread.joinable()) {
                _rebalance_thread.join();
            }
            
            LOG(LogLevel::INFO) << "Cluster manager stopped";
        }

        // 添加节点到集群
        bool addNode(const string& host, int port, bool is_master = true) {
            string node_id = generateNodeId(host, port);
            
            LOG(LogLevel::INFO) << "Adding node to cluster: " << node_id;
            
            lock_guard<mutex> lock(_cluster_mutex);
            if (_nodes.find(node_id) != _nodes.end()) {
                LOG(LogLevel::WARNING) << "Node " << node_id << " already exists in cluster";
                return false;
            }

            // 创建Redis连接
            auto client = createRedisClient(host, port);
            if (!client) {
                LOG(LogLevel::ERROR) << "Failed to create Redis client for node " << node_id;
                return false;
            }

            // 创建节点信息
            auto node = make_shared<ClusterNode>(node_id, host, port, is_master);
            _nodes[node_id] = node;
            _clients[node_id] = client;
            _current_nodes = _nodes.size();
            
            LOG(LogLevel::INFO) << "Node " << node_id << " added to cluster successfully";
            
            // 触发重新平衡
            _needs_rebalance = true;
            _cluster_cv.notify_one();
            
            return true;
        }

        // 从集群移除节点
        bool removeNode(const string& node_id) {
            LOG(LogLevel::INFO) << "Removing node from cluster: " << node_id;
            
            lock_guard<mutex> lock(_cluster_mutex);
            if (_nodes.erase(node_id) > 0) {
                _clients.erase(node_id);
                _current_nodes = _nodes.size();
                
                LOG(LogLevel::INFO) << "Node " << node_id << " removed from cluster";
                
                // 触发重新平衡
                _needs_rebalance = true;
                _cluster_cv.notify_one();
                return true;
            }
            
            LOG(LogLevel::WARNING) << "Node " << node_id << " not found in cluster";
            return false;
        }

        // 基本的键值操作 - 自动路由到正确的节点
        template<typename T>
        bool set(const string& key, const T& value) {
            return executeWithRetry([this, &key, &value]() {
                auto node = getNodeByKey(key);
                if (!node) {
                    throw runtime_error("No available node for key: " + key);
                }
                
                auto client = getNodeClient(node->node_id);
                if (!client) {
                    throw runtime_error("No client available for node: " + node->node_id);
                }
                
                return client->set(key, value);
            });
        }

        template<typename T>
        optional<T> get(const string& key) {
            return executeWithRetry([this, &key]() {
                auto node = getNodeByKey(key);
                if (!node) {
                    throw runtime_error("No available node for key: " + key);
                }
                
                auto client = getNodeClient(node->node_id);
                if (!client) {
                    throw runtime_error("No client available for node: " + node->node_id);
                }
                
                return client->get<T>(key);
            });
        }

        bool exists(const string& key) {
            return executeWithRetry([this, &key]() {
                auto node = getNodeByKey(key);
                if (!node) {
                    throw runtime_error("No available node for key: " + key);
                }
                
                auto client = getNodeClient(node->node_id);
                if (!client) {
                    throw runtime_error("No client available for node: " + node->node_id);
                }
                
                return client->exists(key);
            });
        }

        bool del(const string& key) {
            return executeWithRetry([this, &key]() {
                auto node = getNodeByKey(key);
                if (!node) {
                    throw runtime_error("No available node for key: " + key);
                }
                
                auto client = getNodeClient(node->node_id);
                if (!client) {
                    throw runtime_error("No client available for node: " + node->node_id);
                }
                
                return client->del(key);
            });
        }

        // 获取集群状态信息
        unordered_map<string, NodeStatus> getClusterStatus() {
            LOG(LogLevel::DEBUG) << "Getting cluster status";
            
            unordered_map<string, NodeStatus> status;
            
            // 快速读取操作，最小化锁持有时间
            {
                lock_guard<mutex> lock(_cluster_mutex);
                for (const auto& [node_id, node] : _nodes) {
                    status[node_id] = node->status;
                }
            }
            
            LOG(LogLevel::DEBUG) << "Cluster status retrieved, " << status.size() << " nodes";
            return status;
        }

        // 获取集群统计信息
        struct ClusterStats {
            int total_nodes;
            int online_nodes;
            int offline_nodes;
            long long total_requests;
            long long successful_requests;
            double success_rate;
            double estimated_scale_ratio;  // 预估的线性扩展比
        };

        ClusterStats getClusterStats() {
            LOG(LogLevel::DEBUG) << "Getting cluster statistics";
            
            ClusterStats stats;
            
            // 快速收集节点统计
            {
                lock_guard<mutex> lock(_cluster_mutex);
                stats.total_nodes = _nodes.size();
                stats.online_nodes = 0;
                stats.offline_nodes = 0;
                
                for (const auto& [node_id, node] : _nodes) {
                    if (node->status == NodeStatus::ONLINE) {
                        stats.online_nodes++;
                    } else {
                        stats.offline_nodes++;
                    }
                }
            }
            
            // 原子操作，不需要锁
            stats.total_requests = _total_requests.load();
            stats.successful_requests = _successful_requests.load();
            stats.success_rate = stats.total_requests > 0 ? 
                (double)stats.successful_requests / stats.total_requests : 0.0;
            
            // 简化扩展比计算，避免复杂逻辑
            if (stats.total_nodes > 0) {
                double base_performance = 0.8;  // 基准性能
                double availability_factor = (double)stats.online_nodes / stats.total_nodes;
                stats.estimated_scale_ratio = base_performance * availability_factor;
            } else {
                stats.estimated_scale_ratio = 0.0;
            }
            
            LOG(LogLevel::DEBUG) << "Cluster statistics: " << stats.online_nodes << "/" 
                               << stats.total_nodes << " nodes online, success rate: " << stats.success_rate;
            
            return stats;
        }

        // 获取集群中的节点列表
        vector<string> getNodeList() {
            lock_guard<mutex> lock(_cluster_mutex);
            vector<string> node_list;
            for (const auto& [node_id, node] : _nodes) {
                node_list.push_back(node_id);
            }
            return node_list;
        }

        // 手动触发重新平衡
        void triggerRebalance() {
            LOG(LogLevel::INFO) << "Manual rebalance triggered";
            _needs_rebalance = true;
            _cluster_cv.notify_one();
        }

        // 更新集群配置
        void updateConfig(const ClusterConfig& new_config) {
            lock_guard<mutex> lock(_cluster_mutex);
            _config = new_config;
            LOG(LogLevel::INFO) << "Cluster configuration updated";
        }

        // 获取当前配置
        ClusterConfig getConfig() const {
            return _config;
        }

        // 检查集群是否运行
        bool isRunning() const {
            return _is_running.load();
        }

        // 获取节点数量
        int getNodeCount() const {
            lock_guard<mutex> lock(_cluster_mutex);
            return _nodes.size();
        }
    };
}