package com.ww.exercise.coroutine.hard.q18.opt

import com.sun.management.OperatingSystemMXBean
import kotlinx.coroutines.*
import kotlinx.coroutines.sync.Semaphore
import kotlinx.coroutines.sync.withPermit
import java.lang.management.ManagementFactory
import java.util.concurrent.*
import java.util.concurrent.atomic.AtomicInteger
import java.util.concurrent.atomic.AtomicLong
import kotlin.coroutines.cancellation.CancellationException
import kotlin.math.max
import kotlin.math.min
import kotlin.system.measureNanoTime

// 性能指标数据类
data class PerformanceStats(
    val totalRequests: Long,
    val completedRequests: Long,
    val failedRequests: Long,
    val cancelledRequests: Long,
    val avgResponseTimeMs: Double,
    val p99ResponseTimeMs: Double,
    val cpuUsage: Double,
    val peakMemoryMB: Double,
    val totalTimeSec: Double,
    val errorRate: Double
)

// 模拟API请求/响应
data class ApiRequest(val id: Long, val path: String, val payload: String)
data class ApiResponse(
    val requestId: Long,
    val status: Int,
    val data: String,
    val processingTimeNs: Long,
    val isCancelled: Boolean = false
)

// 业务处理服务
class BackendService {
    private val delayRange = 1L..10L
    private val responsePool = ConcurrentLinkedQueue<ApiResponse>()

    private fun getRandomDelay() = delayRange.random()
    private fun borrowResponse(): ApiResponse = responsePool.poll() ?: ApiResponse(0, 0, "", 0)
    fun returnResponse(response: ApiResponse) {
        responsePool.offer(
            // TODO copy用法
            response.copy(0, 0, "", 0, isCancelled = false)
        )
    }

    suspend fun handleRequest(request: ApiRequest): ApiResponse {
        val start = System.nanoTime()
        val response = borrowResponse()
        return try {
            delay(getRandomDelay())
            response.copy(
                requestId = request.id,
                status = 200,
                data = "Processed: ${request.path}",
                processingTimeNs = System.nanoTime() - start
            )
        } catch (e: CancellationException) {
            response.copy(
                requestId = request.id,
                status = 200,
                data = "Request cancelled (normal)",
                processingTimeNs = System.nanoTime() - start,
                isCancelled = true
            )
        } catch (e: Exception) {
            response.copy(
                requestId = request.id,
                status = 500,
                data = "Error: ${e.message ?: e.javaClass.simpleName}",
                processingTimeNs = System.nanoTime() - start
            )
        }
    }

    fun handleRequestBlocking(request: ApiRequest): ApiResponse {
        val start = System.nanoTime()
        return try {
            Thread.sleep(getRandomDelay())
            ApiResponse(request.id, 200, "Processed: ${request.path}", System.nanoTime() - start)
        } catch (e: Exception) {
            ApiResponse(request.id, 500, "Error: ${e.message}", System.nanoTime() - start)
        }
    }
}

// 基于协程的API网关（修复：取消逻辑与回调合并）
class CoroutineApiGateway(
    private val backend: BackendService,
    private val maxConcurrency: Int = Runtime.getRuntime().availableProcessors() * 10,
    /**
     *  limitedParallelism()是Kotlin协程中CoroutineDispatcher的一个扩展函数，用于创建具有特定并行度限制的调度器视图。主要作用：
     *  1、限制并行执行量：创建一个新的调度器，确保最多只有指定数量的协程可以并行执行；
     *  2、资源控制：防止特定任务消耗过多线程资源；
     *  3、视图隔离：不影响原始调度器的行为，创建独立的执行上下文；
     *  4、弹性扩展：对于Dispatchers.IO等弹性调度器，可以安全地替代固定线程池；
     *
     *  核心特点
     *  1、非阻塞性：不会阻塞调用线程，只是限制并行协程数量；
     *  2、线程复用：底层仍使用原始调度器的线程池；
     *  3、轻量级：相比创建新线程池，开销更小；
     *  4、可命名：可选参数为调度器提供描述性名称；
     *
     *  使用场景
     *  1、限制数据库访问并发数
     *  2、控制网络请求并发量
     *  3、替代newFixedThreadPoolContext
     *  4、为特定任务创建隔离的执行上下文
     */
//    private val dispatcher: CoroutineDispatcher = Dispatchers.IO.limitedParallelism(maxConcurrency)
    private val dispatcher: CoroutineDispatcher = Dispatchers.IO
) {
    private val exceptionHandler = CoroutineExceptionHandler { _, throwable ->
        // TODO 学习惯这种写法
        if (throwable !is CancellationException) {
            println("协程未捕获异常: ${throwable.javaClass.simpleName} - ${throwable.message}")
        }
    }

    private val scope = CoroutineScope(dispatcher + SupervisorJob() + exceptionHandler)
    private val semaphore = Semaphore(maxConcurrency * 2)
    private val activeCoroutines = AtomicInteger(0)

    // 修复1：移除onCancelled参数，取消逻辑在callback中统一处理
    fun submitRequest(
        request: ApiRequest,
        callback: suspend (ApiResponse) -> Unit
    ) {
        activeCoroutines.incrementAndGet()
        scope.launch {
            var response: ApiResponse? = null
            try {
                semaphore.withPermit {
                    response = backend.handleRequest(request)
                    try {
                        /**
                         * !!非空断言运算符，作用：
                         * 1、强制解包：明确告诉编译器“这个变量此时一定不为null”
                         * 2、风险操作：如果实际为null则会抛出NullPointerException
                         * 3、使用场景：当开发者比编译器更确定非空状态时使用
                         */
                        callback(response!!)
                    } catch (e: Exception) {
                        println("回调异常（请求ID: ${request.id}）: ${e.message}")
                    }
                }
            } catch (e: CancellationException) {
                // 取消时直接生成取消响应，通过callback统一传递
                response = ApiResponse(
                    requestId = request.id,
                    status = 200,
                    data = "Gateway cancelled",
                    processingTimeNs = 0,
                    isCancelled = true
                )
                try {
                    callback(response!!)
                } catch (ignored: Exception) {}
            } catch (e: Exception) {
                response = ApiResponse(
                    requestId = request.id,
                    status = 500,
                    data = "Gateway error: ${e.message}",
                    processingTimeNs = 0
                )
                try {
                    callback(response!!)
                } catch (ignored: Exception) {}
            } finally {
                activeCoroutines.decrementAndGet()
                response?.let { backend.returnResponse(it) }
            }
        }
    }

    fun shutdown() {
        runBlocking(dispatcher) {
            // 修复2：先停止接收新请求，再等待已有请求处理完成
            scope.coroutineContext.job.cancelAndJoin()
        }
    }

    fun getActiveCount(): Int = activeCoroutines.get()
}

// 基于线程池的API网关
class ThreadPoolApiGateway(
    private val backend: BackendService,
    coreThreads: Int = Runtime.getRuntime().availableProcessors() * 10,
    maxThreads: Int = Runtime.getRuntime().availableProcessors() * 20,
    queueCapacity: Int = 20000
) {
    private val executor = ThreadPoolExecutor(
        coreThreads, maxThreads, 60L, TimeUnit.SECONDS,
        LinkedBlockingQueue(queueCapacity),
        ThreadFactory { Thread(it, "api-thread-${AtomicLong(0).incrementAndGet()}").apply { isDaemon = true } },
        ThreadPoolExecutor.CallerRunsPolicy()
    )

    fun submitRequest(request: ApiRequest, callback: (ApiResponse) -> Unit) {
        try {
            executor.submit {
                val response = backend.handleRequestBlocking(request)
                callback(response)
            }
        } catch (e: Exception) {
            callback(ApiResponse(request.id, 503, "Service busy", 0))
        }
    }

    fun shutdown() {
        executor.shutdown()
        try {
            if (!executor.awaitTermination(1, TimeUnit.MINUTES)) executor.shutdownNow()
        } catch (e: InterruptedException) {
            executor.shutdownNow()
        }
    }

    fun getActiveCount(): Int = executor.activeCount
}

// 系统监控工具
class SystemMonitor {
    private val osBean: OperatingSystemMXBean? = try {
        ManagementFactory.getOperatingSystemMXBean() as? com.sun.management.OperatingSystemMXBean
    } catch (e: ClassCastException) {
        println("警告：当前JVM不支持CPU使用率监控")
        null
    }
    private val memoryBean = ManagementFactory.getMemoryMXBean()
    private var peakMemory = 0L
    private var sampleCount = 0
    private var totalCpuUsage = 0.0

    fun sample(): Pair<Double, Long> {
        val cpuUsage = osBean?.processCpuLoad ?: -1.0
        if (cpuUsage > 0) {
            totalCpuUsage += cpuUsage
            sampleCount++
        }
        val heapMemory = memoryBean.heapMemoryUsage.used
        peakMemory = max(peakMemory, heapMemory)
        return Pair(cpuUsage * 100, peakMemory)
    }

    fun getAverageCpuUsage(): Double = if (sampleCount > 0) (totalCpuUsage / sampleCount) * 100 else -1.0
    fun getPeakMemoryMB(): Double = peakMemory / (1024.0 * 1024.0)
}

// 压测工具（核心修复：计数逻辑去重）
class LoadTester(private val backend: BackendService) {
    // 线程池方案测试
    fun testThreadPoolGateway(
        requestsPerSecond: Int,
        durationSeconds: Int
    ): PerformanceStats {
        val totalRequests = requestsPerSecond.toLong() * durationSeconds
        val gateway = ThreadPoolApiGateway(backend)
        val responses = ConcurrentLinkedQueue<Long>()
        val monitor = SystemMonitor()
        // 修复：latch计数严格等于总请求数
        val latch = CountDownLatch(totalRequests.toInt())
        val completed = AtomicInteger(0)
        val failed = AtomicInteger(0)
        val cancelled = AtomicInteger(0)
        val submitted = AtomicLong(0)
        val progressStep = max(1, (totalRequests / 20).toInt())

        val progressMonitor = Executors.newSingleThreadScheduledExecutor()
        progressMonitor.scheduleAtFixedRate({
            val c = completed.get()
            val s = submitted.get()
            val a = gateway.getActiveCount()
            val percent = (c.toDouble() / totalRequests * 100).toInt()
            print("\r线程池进度: $percent% | 提交: $s/$totalRequests | 完成: $c/$totalRequests | 活跃线程: $a")
        }, 0, 1000, TimeUnit.MILLISECONDS)

        val systemMonitorJob = Executors.newSingleThreadScheduledExecutor()
        systemMonitorJob.scheduleAtFixedRate({ monitor.sample() }, 0, 200, TimeUnit.MILLISECONDS)

        val totalTimeNs = measureNanoTime {
            val intervalNs = 1_000_000_000L / requestsPerSecond
            val executor = Executors.newSingleThreadExecutor()

            executor.submit {
                var lastSubmitTime = System.nanoTime()
                var requestId = 1L

                while (submitted.get() < totalRequests) {
                    val currentTime = System.nanoTime()
                    val nextSubmitTime = lastSubmitTime + intervalNs

                    if (currentTime < nextSubmitTime) {
                        val sleepMs = (nextSubmitTime - currentTime) / 1_000_000
                        if (sleepMs > 0) {
                            Thread.sleep(sleepMs)
                        }
                    }

                    val req = ApiRequest(
                        id = requestId++,
                        path = "/api/service-${requestId % 10}",
                        payload = "payload-$requestId"
                    )

                    gateway.submitRequest(req) { res ->
                        // 每个请求仅计数一次
                        when {
                            res.status == 200 -> responses.add(res.processingTimeNs)
                            res.isCancelled -> cancelled.incrementAndGet()
                            else -> failed.incrementAndGet()
                        }
                        completed.incrementAndGet()
                        latch.countDown() // 每个请求仅countDown一次
                    }

                    submitted.incrementAndGet()
                    lastSubmitTime = max(currentTime, nextSubmitTime)

                    if (submitted.get() % progressStep == 0L) {
                        val sPercent = (submitted.get().toDouble() / totalRequests * 100).toInt()
                        print("\r线程池提交进度: $sPercent%")
                    }
                }
            }
            executor.shutdown()

            if (!latch.await(durationSeconds + 10L, TimeUnit.SECONDS)) {
                println("\n警告：线程池部分请求超时未完成")
            }
        }

        progressMonitor.shutdown()
        systemMonitorJob.shutdown()
        gateway.shutdown()
        println()

        val times = responses.map { it / 1_000_000.0 }.sorted()
        val completedCount = completed.get().toLong()
        return PerformanceStats(
            totalRequests = totalRequests,
            completedRequests = completedCount,
            failedRequests = failed.get().toLong(),
            cancelledRequests = cancelled.get().toLong(),
            avgResponseTimeMs = if (times.isNotEmpty()) times.average() else 0.0,
            p99ResponseTimeMs = if (times.isNotEmpty()) times[max(0, (times.size * 0.99).toInt() - 1)] else 0.0,
            cpuUsage = monitor.getAverageCpuUsage(),
            peakMemoryMB = monitor.getPeakMemoryMB(),
            totalTimeSec = totalTimeNs / 1_000_000_000.0,
            errorRate = if (completedCount > 0) failed.get().toDouble() / completedCount else 0.0
        )
    }

    // 协程方案测试（核心修复：消除重复计数）
    fun testCoroutineGateway(
        requestsPerSecond: Int,
        durationSeconds: Int,
        maxConcurrency: Int = Runtime.getRuntime().availableProcessors() * 2
    ): PerformanceStats {
        val totalRequests = requestsPerSecond.toLong() * durationSeconds
        val gateway = CoroutineApiGateway(backend, maxConcurrency)
        val responses = ConcurrentLinkedQueue<Long>()
        val monitor = SystemMonitor()
        // 修复：latch严格绑定总请求数
        val latch = CountDownLatch(totalRequests.toInt())
        val completed = AtomicInteger(0)
        val failed = AtomicInteger(0)
        val cancelled = AtomicInteger(0)
        val submitted = AtomicLong(0)
        val progressStep = max(1, (totalRequests / 20).toInt())

        val progressMonitor = Executors.newSingleThreadScheduledExecutor()
        progressMonitor.scheduleAtFixedRate({
            val c = completed.get()
            val s = submitted.get()
            val a = gateway.getActiveCount()
            val percent = (c.toDouble() / totalRequests * 100).toInt()
            print("\r协程进度: $percent% | 提交: $s/$totalRequests | 完成: $c/$totalRequests | 活跃协程: $a")
        }, 0, 2000, TimeUnit.MILLISECONDS)

        val systemMonitorJob = Executors.newSingleThreadScheduledExecutor()
        systemMonitorJob.scheduleAtFixedRate({ monitor.sample() }, 0, 200, TimeUnit.MILLISECONDS)

        val totalTimeNs = measureNanoTime {
            runBlocking(Dispatchers.Default.limitedParallelism(4)) {
                val intervalNs = 1_000_000_000L / requestsPerSecond
                var lastSubmitTime = System.nanoTime()
                val batchSize = 100
                val totalBatches = (totalRequests + batchSize - 1) / batchSize
                var requestId = 1L

                repeat(totalBatches.toInt()) {
                    if (submitted.get() >= totalRequests) return@repeat

                    val remaining = totalRequests - submitted.get()
                    val actualBatchSize = min(batchSize.toLong(), remaining).toInt()

                    val jobs = (0 until actualBatchSize).map { batchInnerIdx ->
                        async {
                            // 修复3：计算精确的延迟时间，避免请求发送过快
                            val currentTime = System.nanoTime()
                            val expectedSubmitTime = lastSubmitTime + batchInnerIdx * intervalNs
                            val delayMs = (expectedSubmitTime - currentTime) / 1_000_000

                            if (delayMs > 0) {
                                delay(delayMs)
                            }

                            // 修复4：确保requestId唯一且不重复
                            val currentRequestId = requestId++
                            val req = ApiRequest(
                                id = currentRequestId,
                                path = "/api/service-${currentRequestId % 10}",
                                payload = "payload-$currentRequestId"
                            )

                            // 修复5：单个请求仅通过callback计数一次
                            gateway.submitRequest(req) { res ->
                                when {
                                    res.isCancelled -> cancelled.incrementAndGet()
                                    res.status == 200 -> responses.add(res.processingTimeNs)
                                    else -> failed.incrementAndGet()
                                }
                                completed.incrementAndGet()
                                latch.countDown() // 每个请求仅countDown一次
                            }

                            // 确保提交计数不超过总请求数
                            if (submitted.incrementAndGet() > totalRequests) {
                                println("警告：提交数超过总请求数，可能存在重复发送")
                            }

                            if (submitted.get() % progressStep == 0L) {
                                val sPercent = (submitted.get().toDouble() / totalRequests * 100).toInt()
                                print("\r协程提交进度: $sPercent%")
                            }
                        }
                    }

                    jobs.awaitAll()
                    lastSubmitTime += actualBatchSize * intervalNs
                }
            }

            // 等待所有请求处理完成（超时后未完成的视为失败）
            if (!latch.await(durationSeconds + 10L, TimeUnit.SECONDS)) {
                val uncompleted = totalRequests - completed.get()
                println("\n警告：协程 ${uncompleted} 个请求超时未完成")
                failed.addAndGet(uncompleted.toInt()) // 超时未完成的计入失败
            }
        }

        progressMonitor.shutdown()
        systemMonitorJob.shutdown()
        gateway.shutdown()
        println()

        val times = responses.map { it / 1_000_000.0 }.sorted()
        val completedCount = completed.get().toLong()
        return PerformanceStats(
            totalRequests = totalRequests,
            completedRequests = completedCount,
            failedRequests = failed.get().toLong(),
            cancelledRequests = cancelled.get().toLong(),
            avgResponseTimeMs = if (times.isNotEmpty()) times.average() else 0.0,
            p99ResponseTimeMs = if (times.isNotEmpty()) times[max(0, (times.size * 0.99).toInt() - 1)] else 0.0,
            cpuUsage = monitor.getAverageCpuUsage(),
            peakMemoryMB = monitor.getPeakMemoryMB(),
            totalTimeSec = totalTimeNs / 1_000_000_000.0,
            errorRate = if (totalRequests > 0) failed.get().toDouble() / totalRequests else 0.0
        )
    }
}

fun main() {
    runWarmup()
    val backend = BackendService()
    val tester = LoadTester(backend)
    val reqPerSec = 10000  // 注意：25万QPS对大多数机器是极高压力，可能需要调低
    val duration = 60

    println("=== 测试线程池方案（QPS: $reqPerSec, 时长: $duration 秒）===")
    val threadStats = tester.testThreadPoolGateway(reqPerSec, duration)

    println("\n=== 测试协程方案（QPS: $reqPerSec, 时长: $duration 秒）===")
    val coroutineStats = tester.testCoroutineGateway(reqPerSec, duration)

    // 完成率 = (完成数 + 取消数) / 总请求数 * 100%（取消视为正常终止）
    val threadValid = threadStats.completedRequests - threadStats.failedRequests
    val threadCompletionRate = threadValid.toDouble() / threadStats.totalRequests * 100
    val coroutineValid = coroutineStats.completedRequests - coroutineStats.failedRequests
    val coroutineCompletionRate = coroutineValid.toDouble() / coroutineStats.totalRequests * 100

    println("\n===== 最终性能对比 =====")
    println(
        "1. 请求完成率: 线程池 ${String.format("%.2f", threadCompletionRate)}% " +
                "vs 协程 ${String.format("%.2f", coroutineCompletionRate)}%"
    )
    println(
        "2. 取消请求数: 线程池 ${threadStats.cancelledRequests} " +
                "vs 协程 ${threadStats.cancelledRequests}"
    )
    println(
        "3. 平均响应时间: 线程池 ${String.format("%.2f", threadStats.avgResponseTimeMs)}ms " +
                "vs 协程 ${String.format("%.2f", coroutineStats.avgResponseTimeMs)}ms"
    )
    println(
        "4. P99延迟: 线程池 ${String.format("%.2f", threadStats.p99ResponseTimeMs)}ms " +
                "vs 协程 ${String.format("%.2f", coroutineStats.p99ResponseTimeMs)}ms"
    )
    println(
        "5. CPU使用率: 线程池 ${String.format("%.2f", threadStats.cpuUsage)}% " +
                "vs 协程 ${String.format("%.2f", coroutineStats.cpuUsage)}%"
    )
    println(
        "6. 峰值内存: 线程池 ${String.format("%.2f", threadStats.peakMemoryMB)}MB " +
                "vs 协程 ${String.format("%.2f", coroutineStats.peakMemoryMB)}MB"
    )
    println(
        "7. 错误率: 线程池 ${String.format("%.2f", threadStats.errorRate * 100)}% " +
                "vs 协程 ${String.format("%.2f", coroutineStats.errorRate * 100)}%"
    )
    println(
        "8. 请求详情: 线程池（完成: ${threadStats.completedRequests} / 失败: ${threadStats.failedRequests} / 取消: ${threadStats.cancelledRequests}） " +
                "vs 协程（完成: ${coroutineStats.completedRequests} / 失败: ${coroutineStats.failedRequests} / 取消: ${coroutineStats.cancelledRequests}）"
    )
}

private fun runWarmup() {
    println("进行JVM预热...")
    val backend = BackendService()
    val tester = LoadTester(backend)
    tester.testThreadPoolGateway(100, 5)
    tester.testCoroutineGateway(100, 5)
    println("预热完成\n")
}
