package com.ww.exercise.coroutine.hard.q18

import com.sun.management.OperatingSystemMXBean
import kotlinx.coroutines.*
import kotlinx.coroutines.sync.Semaphore
import kotlinx.coroutines.sync.withPermit
import java.lang.management.ManagementFactory
import java.util.concurrent.*
import java.util.concurrent.atomic.AtomicInteger
import java.util.concurrent.atomic.AtomicLong
import kotlin.coroutines.cancellation.CancellationException
import kotlin.math.max
import kotlin.system.measureNanoTime

// 性能指标数据类
data class PerformanceStats(
    val totalRequests: Long,
    val completedRequests: Long,
    val failedRequests: Long,
    val avgResponseTimeMs: Double,
    val p99ResponseTimeMs: Double,
    val cpuUsage: Double,
    val peakMemoryMB: Double,
    val totalTimeSec: Double,
    val errorRate: Double
)

// 模拟API请求
data class ApiRequest(val id: Long, val path: String, val payload: String)

// 模拟API响应
data class ApiResponse(
    val requestId: Long,
    val status: Int,
    val data: String,
    val processingTimeNs: Long
)

// 业务处理服务 - 优化：减少对象创建
class BackendService {
    private val delayRange = 1L..10L

    // 预先分配对象池，减少GC
    private val responsePool = ConcurrentLinkedQueue<ApiResponse>()

    private fun getRandomDelay() = delayRange.random()

    // 从对象池获取响应对象
    private fun borrowResponse(): ApiResponse {
        return responsePool.poll() ?: ApiResponse(0, 0, "", 0)
    }

    // 归还响应对象到对象池
    public fun returnResponse(response: ApiResponse) {
        responsePool.offer(
            response.copy(
                requestId = 0,
                status = 0,
                data = "",
                processingTimeNs = 0
            )
        )
    }

    // 协程版本 - 优化：使用可重用的响应对象
    suspend fun handleRequest(request: ApiRequest): ApiResponse {
        val start = System.nanoTime()
        val response = borrowResponse()
        return try {
            delay(getRandomDelay())
            response.copy(
                requestId = request.id,
                status = 200,
                data = "Processed: ${request.path}",
                processingTimeNs = System.nanoTime() - start
            )
        } catch (e: Exception) {
            response.copy(
                requestId = request.id,
                status = if (e is CancellationException) 504 else 500,
                data = "Error: ${e.message ?: e.javaClass.simpleName}",
                processingTimeNs = System.nanoTime() - start
            )
        } finally {
            // 不立即归还，让调用者处理
        }
    }

    // 线程池版本
    fun handleRequestBlocking(request: ApiRequest): ApiResponse {
        val start = System.nanoTime()
        return try {
            Thread.sleep(getRandomDelay())
            ApiResponse(
                requestId = request.id,
                status = 200,
                data = "Processed: ${request.path}",
                processingTimeNs = System.nanoTime() - start
            )
        } catch (e: Exception) {
            ApiResponse(
                requestId = request.id,
                status = 500,
                data = "Error: ${e.message}",
                processingTimeNs = System.nanoTime() - start
            )
        }
    }
}

// 基于协程的API网关 - 核心优化
class CoroutineApiGateway(
    private val backend: BackendService,
    // 优化：根据系统特性动态调整并发数
    private val maxConcurrency: Int = Runtime.getRuntime().availableProcessors() * 8,
    // 优化：使用更高效的调度器配置
    private val dispatcher: CoroutineDispatcher = Dispatchers.IO.limitedParallelism(maxConcurrency)
) {
    // 优化：使用更轻量的结构管理协程
    private val scope = CoroutineScope(dispatcher + SupervisorJob())
    private val semaphore = Semaphore(maxConcurrency)
    private val activeCoroutines = AtomicInteger(0)

    // 优化：使用挂起函数回调替代普通回调，减少线程切换
    fun submitRequest(request: ApiRequest, callback: suspend (ApiResponse) -> Unit) {
        activeCoroutines.incrementAndGet()
        scope.launch {
            var response: ApiResponse? = null
            try {
                semaphore.withPermit {
                    response = backend.handleRequest(request)
                    callback(response!!)
                }
            } catch (e: Exception) {
                response = ApiResponse(
                    requestId = request.id,
                    status = 500,
                    data = "Coroutine error: ${e.message}",
                    processingTimeNs = 0
                )
                callback(response!!)
            } finally {
                activeCoroutines.decrementAndGet()
                // 归还响应对象到对象池
                response?.let { backend.returnResponse(it) }
            }
        }
    }

    fun shutdown() {
        runBlocking(dispatcher) {
            scope.coroutineContext.job.cancelAndJoin()
        }
    }

    fun getActiveCount(): Int = activeCoroutines.get()
}

// 基于线程池的API网关
class ThreadPoolApiGateway(
    private val backend: BackendService,
    coreThreads: Int = Runtime.getRuntime().availableProcessors() * 10,
    maxThreads: Int = Runtime.getRuntime().availableProcessors() * 20,
    queueCapacity: Int = 20000
) {
    private val executor = ThreadPoolExecutor(
        coreThreads,
        maxThreads,
        60L, TimeUnit.SECONDS,
        LinkedBlockingQueue(queueCapacity),
        object : ThreadFactory {
            private val counter = AtomicLong(0)
            override fun newThread(r: Runnable): Thread {
                return Thread(r, "api-thread-${counter.incrementAndGet()}").apply {
                    isDaemon = true
                }
            }
        },
        ThreadPoolExecutor.CallerRunsPolicy()
    )

    fun submitRequest(request: ApiRequest, callback: (ApiResponse) -> Unit) {
        try {
            executor.submit {
                val response = backend.handleRequestBlocking(request)
                callback(response)
            }
        } catch (e: Exception) {
            callback(
                ApiResponse(
                    requestId = request.id,
                    status = 503,
                    data = "Service busy",
                    processingTimeNs = 0
                )
            )
        }
    }

    fun shutdown() {
        executor.shutdown()
        try {
            if (!executor.awaitTermination(1, TimeUnit.MINUTES)) {
                executor.shutdownNow()
            }
        } catch (e: InterruptedException) {
            executor.shutdownNow()
        }
    }

    fun getActiveCount(): Int = executor.activeCount
}

// 系统监控工具 - 优化：减少采样开销
class SystemMonitor {
    private val osBean: OperatingSystemMXBean? = try {
        ManagementFactory.getOperatingSystemMXBean() as? com.sun.management.OperatingSystemMXBean
    } catch (e: ClassCastException) {
        println("警告：当前JVM不支持CPU使用率监控")
        null
    }
    private val memoryBean = ManagementFactory.getMemoryMXBean()
    private var peakMemory = 0L
    private var sampleCount = 0
    private var totalCpuUsage = 0.0

    // 优化：计算平均CPU使用率而非瞬时值
    fun sample(): Pair<Double, Long> {
        val cpuUsage = osBean?.processCpuLoad ?: -1.0
        if (cpuUsage > 0) {
            totalCpuUsage += cpuUsage
            sampleCount++
        }

        val heapMemory = memoryBean.heapMemoryUsage.used
        peakMemory = max(peakMemory, heapMemory)

        return Pair(cpuUsage * 100, peakMemory)
    }

    fun getAverageCpuUsage(): Double {
        return if (sampleCount > 0) (totalCpuUsage / sampleCount) * 100 else -1.0
    }

    fun getPeakMemoryMB(): Double = peakMemory / (1024.0 * 1024.0)
}

// 压测工具 - 核心优化实现
class LoadTester(private val backend: BackendService) {
    // 优化：预生成请求，避免运行时创建
    private fun generateRequests(count: Int): List<ApiRequest> {
        return List(count) { i ->
            ApiRequest(i.toLong() + 1, "/api/service-${(i + 1) % 10}", "payload-${i + 1}")
        }
    }

    // 测试线程池方案
    fun testThreadPoolGateway(
        requestsPerSecond: Int,
        durationSeconds: Int
    ): PerformanceStats {
        val totalRequests = requestsPerSecond * durationSeconds
        val gateway = ThreadPoolApiGateway(backend)
        val responses = mutableListOf<Long>()  // 只存储必要的时间信息，减少内存
        val monitor = SystemMonitor()
        val latch = CountDownLatch(totalRequests)
        val completed = AtomicInteger(0)
        val submitted = AtomicInteger(0)
        val failed = AtomicInteger(0)
        val progressStep = max(1, totalRequests / 20)

        val progressMonitor = Executors.newSingleThreadScheduledExecutor()
        progressMonitor.scheduleAtFixedRate({
            val c = completed.get()
            val s = submitted.get()
            val a = gateway.getActiveCount()
            val percent = (c.toDouble() / totalRequests * 100).toInt()
            print("\r线程池进度: $percent% | 提交: $s/$totalRequests | 完成: $c/$totalRequests | 活跃线程: $a")
        }, 0, 1000, TimeUnit.MILLISECONDS)

        val systemMonitorJob = Executors.newSingleThreadScheduledExecutor()
        systemMonitorJob.scheduleAtFixedRate(
            { monitor.sample() }, 0, 200, TimeUnit.MILLISECONDS
        )

        val totalTimeNs = measureNanoTime {
            val requests = generateRequests(totalRequests)
            val intervalNs = 1_000_000_000L / requestsPerSecond
            val executor = Executors.newSingleThreadExecutor()

            executor.submit {
                var lastSubmitTime = System.nanoTime()
                requests.forEach { req ->
                    val currentTime = System.nanoTime()
                    val nextSubmitTime = lastSubmitTime + intervalNs
                    if (currentTime < nextSubmitTime) {
                        val sleepMs = (nextSubmitTime - currentTime) / 1_000_000
                        if (sleepMs > 0) {
                            Thread.sleep(sleepMs)
                        }
                    }
                    gateway.submitRequest(req) { res ->
                        if (res.status == 200) {
                            synchronized(responses) {
                                responses.add(res.processingTimeNs)
                            }
                        } else {
                            failed.incrementAndGet()
                        }
                        completed.incrementAndGet()
                        latch.countDown()
                    }
                    submitted.incrementAndGet()
                    lastSubmitTime = max(currentTime, nextSubmitTime)

                    if (submitted.get() % progressStep == 0) {
                        val sPercent = (submitted.get().toDouble() / totalRequests * 100).toInt()
                        print("\r线程池提交进度: $sPercent%")
                    }
                }
            }
            executor.shutdown()

            if (!latch.await(durationSeconds + 10L, TimeUnit.SECONDS)) {
                println("\n警告：线程池部分请求超时未完成")
            }
        }

        progressMonitor.shutdown()
        systemMonitorJob.shutdown()
        gateway.shutdown()
        println()

        val times = responses.map { it / 1_000_000.0 }.sorted()
        val completedCount = completed.get().toLong()
        return PerformanceStats(
            totalRequests = totalRequests.toLong(),
            completedRequests = completedCount,
            failedRequests = failed.get().toLong(),
            avgResponseTimeMs = if (times.isNotEmpty()) times.average() else 0.0,
            p99ResponseTimeMs = if (times.isNotEmpty()) {
                val index = max(0, (times.size * 0.99).toInt() - 1)
                times[index]
            } else 0.0,
            cpuUsage = monitor.getAverageCpuUsage(),
            peakMemoryMB = monitor.getPeakMemoryMB(),
            totalTimeSec = totalTimeNs / 1_000_000_000.0,
            errorRate = if (completedCount > 0) failed.get().toDouble() / completedCount else 0.0
        )
    }

    // 测试协程方案 - 核心优化
    fun testCoroutineGateway(
        requestsPerSecond: Int,
        durationSeconds: Int,
        maxConcurrency: Int = Runtime.getRuntime().availableProcessors() * 8
    ): PerformanceStats {
        val totalRequests = requestsPerSecond * durationSeconds
        val gateway = CoroutineApiGateway(backend, maxConcurrency)
        // 优化：使用线程安全的集合，避免synchronized
        val responses = ConcurrentLinkedQueue<Long>()
        val monitor = SystemMonitor()
        val latch = CountDownLatch(totalRequests)
        val completed = AtomicInteger(0)
        val submitted = AtomicInteger(0)
        val failed = AtomicInteger(0)
        val progressStep = max(1, totalRequests / 20)

        // 优化：降低监控频率，减少干扰
        val progressMonitor = Executors.newSingleThreadScheduledExecutor()
        progressMonitor.scheduleAtFixedRate({
            val c = completed.get()
            val s = submitted.get()
            val a = gateway.getActiveCount()
            val percent = (c.toDouble() / totalRequests * 100).toInt()
            print("\r协程进度: $percent% | 提交: $s/$totalRequests | 完成: $c/$totalRequests | 活跃协程: $a")
        }, 0, 2000, TimeUnit.MILLISECONDS)

        val systemMonitorJob = Executors.newSingleThreadScheduledExecutor()
        systemMonitorJob.scheduleAtFixedRate(
            { monitor.sample() }, 0, 200, TimeUnit.MILLISECONDS
        )

        // 优化：使用measureNanoTime获取更精确的时间
        val totalTimeNs = measureNanoTime {
            // 优化：使用更适合的调度器
            runBlocking(Dispatchers.Default.limitedParallelism(4)) {
                val intervalNs = 1_000_000_000L / requestsPerSecond
                var lastSubmitTime = System.nanoTime()

                // 优化：批量提交，减少协程创建开销
                val batchSize = 100
                val batches = (totalRequests + batchSize - 1) / batchSize

                repeat(batches.toInt()) { batchIdx ->
                    val start = batchIdx * batchSize
                    val end = minOf((batchIdx + 1) * batchSize, totalRequests)

                    // 批量创建并启动协程
                    val jobs = (start until end).map { i ->
                        async {
                            val currentTime = System.nanoTime()
                            val nextSubmitTime = lastSubmitTime + (i - start) * intervalNs
                            if (currentTime < nextSubmitTime) {
                                val delayMs = (nextSubmitTime - currentTime) / 1_000_000
                                if (delayMs > 0) {
                                    delay(delayMs)
                                }
                            }

                            val req = ApiRequest(
                                id = i.toLong() + 1,
                                path = "/api/service-${(i + 1) % 10}",
                                payload = "payload-${i + 1}"
                            )

                            // 使用挂起回调，减少线程切换
                            gateway.submitRequest(req) { res ->
                                if (res.status == 200) {
                                    responses.add(res.processingTimeNs)
                                } else {
                                    failed.incrementAndGet()
                                }
                                completed.incrementAndGet()
                                latch.countDown()
                            }

                            submitted.incrementAndGet()

                            if (submitted.get() % progressStep == 0) {
                                val sPercent = (submitted.get().toDouble() / totalRequests * 100).toInt()
                                print("\r协程提交进度: $sPercent%")
                            }
                        }
                    }

                    // 等待当前批次完成再继续，控制提交节奏
                    jobs.awaitAll()
                    lastSubmitTime += batchSize * intervalNs
                }
            }

            if (!latch.await(durationSeconds + 10L, TimeUnit.SECONDS)) {
                println("\n警告：协程部分请求超时未完成")
            }
        }

        progressMonitor.shutdown()
        systemMonitorJob.shutdown()
        gateway.shutdown()
        println()

        // 优化：排序时使用更高效的方式
        val times = responses.map { it / 1_000_000.0 }.sorted().toList()
        val completedCount = completed.get().toLong()
        return PerformanceStats(
            totalRequests = totalRequests.toLong(),
            completedRequests = completedCount,
            failedRequests = failed.get().toLong(),
            avgResponseTimeMs = if (times.isNotEmpty()) times.average() else 0.0,
            p99ResponseTimeMs = if (times.isNotEmpty()) {
                val index = max(0, (times.size * 0.99).toInt() - 1)
                times[index]
            } else 0.0,
            cpuUsage = monitor.getAverageCpuUsage(),
            peakMemoryMB = monitor.getPeakMemoryMB(),
            totalTimeSec = totalTimeNs / 1_000_000_000.0,
            errorRate = if (completedCount > 0) failed.get().toDouble() / completedCount else 0.0
        )
    }
}

fun main() {
    // 优化：预热JVM
    runWarmup()

    val backend = BackendService()
    val tester = LoadTester(backend)
    val reqPerSec = 25000  // 可根据系统性能调整
    val duration = 60

    println("=== 测试线程池方案（QPS: $reqPerSec, 时长: $duration 秒）===")
    val threadStats = tester.testThreadPoolGateway(reqPerSec, duration)

    println("\n=== 测试协程方案（QPS: $reqPerSec, 时长: $duration 秒）===")
    val coroutineStats = tester.testCoroutineGateway(reqPerSec, duration)

    // 计算完成率
    val threadCompletionRate = threadStats.completedRequests.toDouble() / threadStats.totalRequests * 100
    val coroutineCompletionRate = coroutineStats.completedRequests.toDouble() / coroutineStats.totalRequests * 100

    // 打印对比结果
    println("\n===== 最终性能对比 =====")
    println(
        "1. 请求完成率: 线程池 ${String.format("%.2f", threadCompletionRate)}% " +
                "vs 协程 ${String.format("%.2f", coroutineCompletionRate)}%"
    )
    println(
        "2. 平均响应时间: 线程池 ${String.format("%.2f", threadStats.avgResponseTimeMs)}ms " +
                "vs 协程 ${String.format("%.2f", coroutineStats.avgResponseTimeMs)}ms"
    )
    println(
        "3. P99延迟: 线程池 ${String.format("%.2f", threadStats.p99ResponseTimeMs)}ms " +
                "vs 协程 ${String.format("%.2f", coroutineStats.p99ResponseTimeMs)}ms"
    )
    println(
        "4. CPU使用率: 线程池 ${String.format("%.2f", threadStats.cpuUsage)}% " +
                "vs 协程 ${String.format("%.2f", coroutineStats.cpuUsage)}%"
    )
    println(
        "5. 峰值内存: 线程池 ${String.format("%.2f", threadStats.peakMemoryMB)}MB " +
                "vs 协程 ${String.format("%.2f", coroutineStats.peakMemoryMB)}MB"
    )
    println(
        "6. 错误率: 线程池 ${String.format("%.2f", threadStats.errorRate * 100)}% " +
                "vs 协程 ${String.format("%.2f", coroutineStats.errorRate * 100)}%"
    )
    println(
        "7. 请求详情: 线程池（完成: ${String.format("%d", threadStats.completedRequests)} / " +
                "失败: ${String.format("%d", threadStats.failedRequests)}） " +
                "vs 协程（完成: ${String.format("%d", coroutineStats.completedRequests)} / " +
                "失败: ${String.format("%d", coroutineStats.failedRequests)}）"
    )
}

// 优化：JVM预热，确保测量准确性
private fun runWarmup() {
    println("进行JVM预热...")
    val backend = BackendService()
    val tester = LoadTester(backend)
    tester.testThreadPoolGateway(100, 5)
    tester.testCoroutineGateway(100, 5)
    println("预热完成\n")
}
