#ifndef CONCURRENT_PLAYER_TEST_H
#define CONCURRENT_PLAYER_TEST_H

#include "../http/HttpServer.h"
#include "../curl/CurlClient.h"
#include "../http/HttpData.h"
#include "../common/global.h"
#include "../common/CCReactor.h"
#include <iostream>
#include <string>
#include <thread>
#include <chrono>
#include <vector>
#include <atomic>
#include <mutex>
#include <condition_variable>
#include <queue>
#include <map>
#include <random>
#include <iomanip>
#include <sstream>
#include <memory>
#include <algorithm>
#include <cmath>

// 性能指标结构
struct PerformanceMetrics {
    std::atomic<uint64_t> total_requests{0};
    std::atomic<uint64_t> successful_requests{0};
    std::atomic<uint64_t> failed_requests{0};
    std::atomic<uint64_t> timeout_requests{0};
    
    std::atomic<double> total_latency{0.0};
    std::atomic<uint64_t> latency_samples{0};
    
    std::atomic<double> min_latency{999999.0};
    std::atomic<double> max_latency{0.0};
    
    std::vector<double> latency_samples_detailed;
    std::mutex latency_mutex;
    
    // 延迟分布统计
    std::atomic<uint64_t> latency_under_10ms{0};
    std::atomic<uint64_t> latency_under_50ms{0};
    std::atomic<uint64_t> latency_under_100ms{0};
    std::atomic<uint64_t> latency_over_100ms{0};
    
    void reset() {
        total_requests = 0;
        successful_requests = 0;
        failed_requests = 0;
        timeout_requests = 0;
        total_latency = 0.0;
        latency_samples = 0;
        min_latency = 999999.0;
        max_latency = 0.0;
        
        std::lock_guard<std::mutex> lock(latency_mutex);
        latency_samples_detailed.clear();
        
        latency_under_10ms = 0;
        latency_under_50ms = 0;
        latency_under_100ms = 0;
        latency_over_100ms = 0;
    }
    
    void add_latency_sample(double latency_ms) {
        total_latency.store(total_latency.load() + latency_ms);
        latency_samples++;
        
        if (latency_ms < min_latency) min_latency = latency_ms;
        if (latency_ms > max_latency) max_latency = latency_ms;
        
        {
            std::lock_guard<std::mutex> lock(latency_mutex);
            latency_samples_detailed.push_back(latency_ms);
        }
        
        // 延迟分布统计
        if (latency_ms < 10) latency_under_10ms++;
        else if (latency_ms < 50) latency_under_50ms++;
        else if (latency_ms < 100) latency_under_100ms++;
        else latency_over_100ms++;
    }
    
    double get_average_latency() const {
        return latency_samples > 0 ? total_latency / latency_samples : 0.0;
    }
    
    double get_latency_percentile(double percentile) const {
        std::vector<double> samples_copy;
        {
            std::lock_guard<std::mutex> lock(const_cast<std::mutex&>(latency_mutex));
            samples_copy = latency_samples_detailed;
        }
        
        if (samples_copy.empty()) return 0.0;
        
        std::sort(samples_copy.begin(), samples_copy.end());
        
        size_t index = static_cast<size_t>(percentile * samples_copy.size() / 100.0);
        if (index >= samples_copy.size()) index = samples_copy.size() - 1;
        
        return samples_copy[index];
    }
};

// 玩家模拟器
class PlayerSimulator {
public:
    PlayerSimulator(int player_id, int requests_per_second, int test_duration_seconds, 
                   PerformanceMetrics& metrics, const std::string& server_url)
        : player_id_(player_id), 
          requests_per_second_(requests_per_second),
          test_duration_seconds_(test_duration_seconds),
          metrics_(metrics),
          server_url_(server_url),
          running_(false),
          request_interval_ms_(1000.0 / requests_per_second) {
    }
    
    void start() {
        running_ = true;
        thread_ = std::thread(&PlayerSimulator::run, this);
    }
    
    void stop() {
        running_ = false;
        if (thread_.joinable()) {
            thread_.join();
        }
    }
    
    uint64_t get_requests_sent() const { return requests_sent_; }
    uint64_t get_requests_completed() const { return requests_completed_; }
    
private:
    void run() {
        std::random_device rd;
        std::mt19937 gen(rd());
        std::normal_distribution<> timing_dist(0.0, request_interval_ms_ * 0.1); // 10% timing jitter
        
        auto start_time = std::chrono::steady_clock::now();
        auto next_request_time = start_time;
        
        while (running_) {
            auto current_time = std::chrono::steady_clock::now();
            auto elapsed = std::chrono::duration_cast<std::chrono::seconds>(current_time - start_time);
            
            if (elapsed.count() >= test_duration_seconds_) {
                break;
            }
            
            if (current_time >= next_request_time) {
                send_request();
                
                // 计算下一个请求时间，添加随机抖动
                auto jitter = std::chrono::milliseconds(static_cast<int>(timing_dist(gen)));
                next_request_time = current_time + 
                    std::chrono::milliseconds(static_cast<int>(request_interval_ms_)) + jitter;
            }
            
            std::this_thread::sleep_for(std::chrono::milliseconds(1));
        }
    }
    
    void send_request() {
        auto start_time = std::chrono::steady_clock::now();
        
        // 创建HTTP客户端
        CurlClient client;
        if (!client.Initialize()) {
            metrics_.failed_requests++;
            return;
        }
        
        // 创建请求
        stHttpRequest req;
        req.method = "GET";
        req.url = server_url_ + "/player_action";
        req.heads["User-Agent"] = "PlayerSimulator/1.0";
        req.heads["X-Player-Id"] = std::to_string(player_id_);
        req.heads["X-Request-Id"] = std::to_string(++requests_sent_);
        req.heads["X-Request-Time"] = std::to_string(
            std::chrono::duration_cast<std::chrono::milliseconds>(
                start_time.time_since_epoch()).count());
        
        // 模拟玩家动作
        std::random_device rd;
        std::mt19937 gen(rd());
        std::uniform_int_distribution<> action_dist(1, 5);
        std::string action = std::to_string(action_dist(gen));
        
        req.heads["X-Player-Action"] = action;
        
        stHttpResponse resp;
        int result = client.Request(std::move(req), resp);
        
        auto end_time = std::chrono::steady_clock::now();
        auto latency = std::chrono::duration_cast<std::chrono::microseconds>(end_time - start_time);
        double latency_ms = latency.count() / 1000.0;
        
        metrics_.total_requests++;
        metrics_.add_latency_sample(latency_ms);
        
        if (result == 0) {
            metrics_.successful_requests++;
        } else {
            metrics_.failed_requests++;
        }
        
        requests_completed_++;
    }
    
    int player_id_;
    int requests_per_second_;
    int test_duration_seconds_;
    PerformanceMetrics& metrics_;
    std::string server_url_;
    std::atomic<bool> running_;
    std::thread thread_;
    uint64_t requests_sent_{0};
    uint64_t requests_completed_{0};
    double request_interval_ms_;
};

// 并发性能测试器
class ConcurrentPlayerTest {
public:
    ConcurrentPlayerTest() : server_running_(false), server_port_(8083) {}
    
    bool setup_server() {
        std::cout << "Setting up concurrent player test server..." << std::endl;
        
        // Initialize reactor
        g_reactor->Init();
        g_reactor->Start();
        
        // Create server
        server_.reset(new HttpServer());
        
        // Set up game server handler
        auto handler = [this](uint32_t handle, stHttpRequest&& req) -> int {
            auto start_time = std::chrono::steady_clock::now();
            
            // 模拟游戏逻辑处理
            std::this_thread::sleep_for(std::chrono::microseconds(500)); // 0.5ms processing time
            
            // Create response
            stHttpResponse resp;
            resp.state = 200;
            resp.heads["Content-Type"] = "application/json";
            resp.heads["Server"] = "GameServer/1.0";
            resp.heads["Connection"] = "keep-alive";
            
            // 模拟游戏响应
            std::ostringstream json_stream;
            json_stream << "{\"status\": \"success\", "
                       << "\"player_id\": \"" << req.heads["X-Player-Id"] << "\", "
                       << "\"action\": \"" << req.heads["X-Player-Action"] << "\", "
                       << "\"request_id\": \"" << req.heads["X-Request-Id"] << "\", "
                       << "\"server_time\": " << std::chrono::duration_cast<std::chrono::milliseconds>(
                           std::chrono::steady_clock::now().time_since_epoch()).count() << ", "
                       << "\"data\": {\"score\": " << (rand() % 1000) << ", \"level\": " << (rand() % 10) << "}}";
            
            resp.body = json_stream.str();
            
            // Send response
            server_->Response(handle, std::move(resp));
            
            // 记录服务器处理时间
            auto end_time = std::chrono::steady_clock::now();
            auto processing_time = std::chrono::duration_cast<std::chrono::microseconds>(end_time - start_time);
            (void)processing_time; // 避免未使用变量警告
            
            return 0;
        };
        
        // Start server
        int result = server_->Start("127.0.0.1", server_port_, handler);
        if (result != 0) {
            std::cerr << "Failed to start concurrent player test server" << std::endl;
            return false;
        }
        
        server_running_ = true;
        std::cout << "Concurrent player test server started on http://127.0.0.1:" << server_port_ << std::endl;
        
        // Wait for server to be ready
        std::this_thread::sleep_for(std::chrono::milliseconds(100));
        
        return true;
    }
    
    void run_concurrent_test(int num_players, int requests_per_second, int test_duration_seconds) {
        std::cout << "\n=== Concurrent Player Test ===" << std::endl;
        std::cout << "Players: " << num_players << std::endl;
        std::cout << "Requests per second per player: " << requests_per_second << std::endl;
        std::cout << "Total QPS: " << (num_players * requests_per_second) << std::endl;
        std::cout << "Test duration: " << test_duration_seconds << " seconds" << std::endl;
        
        if (!server_running_) {
            std::cerr << "Server not running" << std::endl;
            return;
        }
        
        // 重置指标
        metrics_.reset();
        
        std::string server_url = "http://127.0.0.1:" + std::to_string(server_port_);
        
        // 创建玩家模拟器
        std::vector<std::unique_ptr<PlayerSimulator>> players;
        for (int i = 0; i < num_players; i++) {
            players.emplace_back(new PlayerSimulator(
                i + 1, requests_per_second, test_duration_seconds, metrics_, server_url));
        }
        
        // 启动所有玩家
        auto start_time = std::chrono::steady_clock::now();
        std::cout << "Starting all players..." << std::endl;
        
        for (auto& player : players) {
            player->start();
        }
        
        // 监控测试进度
        monitor_progress(test_duration_seconds);
        
        // 等待所有玩家完成
        std::cout << "Waiting for all players to complete..." << std::endl;
        for (auto& player : players) {
            player->stop();
        }
        
        auto end_time = std::chrono::steady_clock::now();
        auto actual_duration = std::chrono::duration_cast<std::chrono::seconds>(end_time - start_time);
        
        // 生成测试报告
        generate_test_report(num_players, requests_per_second, test_duration_seconds, actual_duration);
    }
    
    void find_max_players(int requests_per_second, int test_duration_seconds, 
                         double max_latency_ms = 100.0, double success_rate_threshold = 0.95) {
        std::cout << "\n=== Finding Maximum Concurrent Players ===" << std::endl;
        std::cout << "Target: <" << max_latency_ms << "ms latency, >" << (success_rate_threshold * 100) << "% success rate" << std::endl;
        std::cout << "Requests per second per player: " << requests_per_second << std::endl;
        std::cout << "Test duration per iteration: " << test_duration_seconds << " seconds" << std::endl;
        
        int min_players = 100;
        int max_players = 2000000; // 200万玩家上限
        int optimal_players = 0;
        
        // 二分查找最优玩家数量
        while (min_players <= max_players) {
            int mid_players = (min_players + max_players) / 2;
            
            std::cout << "\nTesting with " << mid_players << " players..." << std::endl;
            
            run_concurrent_test(mid_players, requests_per_second, test_duration_seconds);
            
            double avg_latency = metrics_.get_average_latency();
            double success_rate = metrics_.successful_requests * 1.0 / metrics_.total_requests;
            
            std::cout << "Results: " << avg_latency << "ms avg latency, " << (success_rate * 100) << "% success rate" << std::endl;
            
            if (avg_latency <= max_latency_ms && success_rate >= success_rate_threshold) {
                optimal_players = mid_players;
                min_players = mid_players + 1;
                std::cout << "✅ Passed, trying higher player count" << std::endl;
            } else {
                max_players = mid_players - 1;
                std::cout << "❌ Failed, reducing player count" << std::endl;
            }
        }
        
        std::cout << "\n=== MAXIMUM PLAYERS FOUND ===" << std::endl;
        std::cout << "Maximum concurrent players: " << optimal_players << std::endl;
        std::cout << "Total QPS supported: " << (optimal_players * requests_per_second) << std::endl;
        std::cout << "Configuration: " << requests_per_second << " requests/second/player" << std::endl;
        std::cout << "Requirements: <" << max_latency_ms << "ms latency, >" << (success_rate_threshold * 100) << "% success rate" << std::endl;
    }
    
    void run_benchmark_suite() {
        std::cout << "\n=== Comprehensive Benchmark Suite ===" << std::endl;
        
        // 测试场景配置
        std::vector<std::tuple<int, int, int>> test_scenarios = {
            {100000, 5, 30},    // 10万玩家，5请求/秒，30秒
            {500000, 5, 30},    // 50万玩家，5请求/秒，30秒
            {1000000, 5, 30},   // 100万玩家，5请求/秒，30秒
            {1500000, 5, 30},   // 150万玩家，5请求/秒，30秒
        };
        
        for (auto& scenario : test_scenarios) {
            int players = std::get<0>(scenario);
            int rps = std::get<1>(scenario);
            int duration = std::get<2>(scenario);
            
            std::cout << "\n--- Scenario: " << players << " players, " << rps << " RPS/player, " << duration << "s ---" << std::endl;
            run_concurrent_test(players, rps, duration);
            
            // 给系统一些恢复时间
            std::this_thread::sleep_for(std::chrono::seconds(5));
        }
    }
    
    void cleanup() {
        if (server_running_) {
            std::cout << "\nCleaning up concurrent player test..." << std::endl;
            g_reactor->Stop();
            server_running_ = false;
        }
    }
    
private:
    void monitor_progress(int test_duration_seconds) {
        auto start_time = std::chrono::steady_clock::now();
        
        for (int i = 0; i < test_duration_seconds; i += 5) {
            std::this_thread::sleep_for(std::chrono::seconds(5));
            
            auto elapsed = std::chrono::duration_cast<std::chrono::seconds>(
                std::chrono::steady_clock::now() - start_time);
            
            std::cout << "Progress: " << elapsed.count() << "/" << test_duration_seconds 
                     << " seconds (" << (elapsed.count() * 100 / test_duration_seconds) << "%)" << std::endl;
            std::cout << "  Requests: " << metrics_.total_requests 
                     << " (Success: " << metrics_.successful_requests 
                     << ", Failed: " << metrics_.failed_requests << ")" << std::endl;
            std::cout << "  Avg Latency: " << metrics_.get_average_latency() << "ms" << std::endl;
        }
    }
    
    void generate_test_report(int num_players, int requests_per_second, int test_duration_seconds, 
                             std::chrono::seconds actual_duration) {
        std::cout << "\n=== Test Results ===" << std::endl;
        
        double total_qps = (metrics_.total_requests * 1.0) / actual_duration.count();
        double success_rate = metrics_.successful_requests * 1.0 / metrics_.total_requests;
        double failure_rate = metrics_.failed_requests * 1.0 / metrics_.total_requests;
        
        std::cout << "Configuration:" << std::endl;
        std::cout << "  Players: " << num_players << std::endl;
        std::cout << "  Requests/second/player: " << requests_per_second << std::endl;
        std::cout << "  Total QPS: " << (num_players * requests_per_second) << std::endl;
        std::cout << "  Test duration: " << test_duration_seconds << " seconds" << std::endl;
        std::cout << "  Actual duration: " << actual_duration.count() << " seconds" << std::endl;
        
        std::cout << "\nPerformance Metrics:" << std::endl;
        std::cout << "  Total requests: " << metrics_.total_requests << std::endl;
        std::cout << "  Successful requests: " << metrics_.successful_requests << " (" << (success_rate * 100) << "%)" << std::endl;
        std::cout << "  Failed requests: " << metrics_.failed_requests << " (" << (failure_rate * 100) << "%)" << std::endl;
        std::cout << "  Actual QPS: " << std::fixed << std::setprecision(2) << total_qps << std::endl;
        
        std::cout << "\nLatency Analysis:" << std::endl;
        std::cout << "  Average latency: " << metrics_.get_average_latency() << "ms" << std::endl;
        std::cout << "  Min latency: " << metrics_.min_latency << "ms" << std::endl;
        std::cout << "  Max latency: " << metrics_.max_latency << "ms" << std::endl;
        std::cout << "  P50 latency: " << metrics_.get_latency_percentile(50) << "ms" << std::endl;
        std::cout << "  P95 latency: " << metrics_.get_latency_percentile(95) << "ms" << std::endl;
        std::cout << "  P99 latency: " << metrics_.get_latency_percentile(99) << "ms" << std::endl;
        
        std::cout << "\nLatency Distribution:" << std::endl;
        uint64_t total_samples = metrics_.latency_samples;
        std::cout << "  < 10ms: " << (metrics_.latency_under_10ms * 100.0 / total_samples) << "%" << std::endl;
        std::cout << "  < 50ms: " << (metrics_.latency_under_50ms * 100.0 / total_samples) << "%" << std::endl;
        std::cout << "  < 100ms: " << (metrics_.latency_under_100ms * 100.0 / total_samples) << "%" << std::endl;
        std::cout << "  > 100ms: " << (metrics_.latency_over_100ms * 100.0 / total_samples) << "%" << std::endl;
        
        // 评估结果
        bool meets_requirements = metrics_.get_average_latency() <= 100.0 && success_rate >= 0.95;
        std::cout << "\nAssessment: " << (meets_requirements ? "✅ PASS" : "❌ FAIL") << std::endl;
        if (!meets_requirements) {
            if (metrics_.get_average_latency() > 100.0) {
                std::cout << "  ❌ Average latency exceeds 100ms requirement" << std::endl;
            }
            if (success_rate < 0.95) {
                std::cout << "  ❌ Success rate below 95% requirement" << std::endl;
            }
        }
    }
    
    std::unique_ptr<HttpServer> server_;
    bool server_running_;
    int server_port_;
    PerformanceMetrics metrics_;
};

#endif // CONCURRENT_PLAYER_TEST_H