package com.ww.exercise.coroutine.mid.q14

import kotlinx.coroutines.*
import kotlinx.coroutines.sync.Semaphore
import kotlinx.coroutines.sync.withPermit
import java.util.concurrent.ConcurrentHashMap;
import kotlin.random.Random
import kotlin.system.measureTimeMillis

// 1.数据模型定义

// 数据分片
data class DataShard(
    val shardId: String,
    val data: String,
    val initialNodeId: String // 初始分配的节点ID
)

// 分片处理结果
data class ShardResult(
    val shardId: String,
    var nodeId: String,
    val success: Boolean,
    val processingTimeMs: Long,
    val errorMessage: String? = null
)

// 节点状态
enum class NodeStatus { PROCESSING, COMPLETED, FAILED }

// 节点信息
data class NodeInfo(
    val nodeId: String,
    var status: NodeStatus,
    var lastUpdatedTime: Long,
    var completedShards: Int = 0,
    var totalShards: Int = 0
)

// 全局处理报告
data class GlobalReport(
    val totalShards: Int,
    val successfulShards: Int,
    val failedShards: Int,
    val processingTimeMs: Long,
    val nodeStats: Map<String, NodeStats>,
    val failedNodes: List<String>
)

// 节点统计信息
data class NodeStats(
    val nodeId: String,
    val processedShards: Int,
    val successRate: Double,
    val averageProcessingTimeMs: Double
)

// 2.分布式锁服务接口
interface DistributedLockService {
    // 更新节点状态
    fun updateNodeStatus(nodeInfo: NodeInfo)

    // 获取所有节点状态
    fun getAllNodeStatuses(): List<NodeInfo>

    // 获取指定节点状态
    fun getNodeStatus(nodeId: String): NodeInfo?

    // 分配分片给节点
    fun assignShardsToNode(nodeId: String, shardIds: List<String>)

    // 获取节点分配的分片
    fun getNodeAssignedShards(nodeId: String): List<String>

    // 标记分片为已处理
    fun markShardCompleted(shardId: String, result: ShardResult)

    // 获取所有未完成的分片
    fun getUncompletedShards(): List<String>

    // 检查所有分片是否处理完成
    fun isAllShardsCompleted(): Boolean

    // 获取所有处理结果
    fun getAllResults(): List<ShardResult>
}

// 分布式锁服务
class InMemoryDistributedLockService : DistributedLockService {

    private val nodeStatuses = ConcurrentHashMap<String, NodeInfo>()
    private val nodeShards = ConcurrentHashMap<String, MutableList<String>>()
    private val shardResults = ConcurrentHashMap<String, ShardResult>()
    private val allShardIds = mutableSetOf<String>()

    override fun updateNodeStatus(nodeInfo: NodeInfo) {
        nodeStatuses[nodeInfo.nodeId] = nodeInfo.copy(lastUpdatedTime = System.currentTimeMillis())
    }

    override fun getAllNodeStatuses(): List<NodeInfo> {
        return nodeStatuses.values.toList()
    }

    override fun getNodeStatus(nodeId: String): NodeInfo? {
        return nodeStatuses[nodeId]
    }

    override fun assignShardsToNode(nodeId: String, shardIds: List<String>) {
        val currentShards = nodeShards.getOrDefault(nodeId, mutableListOf())
        currentShards.addAll(shardIds)
        nodeShards[nodeId] = currentShards
        allShardIds.addAll(shardIds)
    }

    override fun getNodeAssignedShards(nodeId: String): List<String> {
        return nodeShards[nodeId]?.toList() ?: emptyList()
    }

    override fun markShardCompleted(shardId: String, result: ShardResult) {
        shardResults[shardId] = result

        // 更新节点完成计数
        nodeStatuses[result.nodeId]?.let { nodeInfo ->
            nodeStatuses[result.nodeId] = nodeInfo.copy(
                completedShards = nodeInfo.completedShards + 1,
                lastUpdatedTime = System.currentTimeMillis()
            )
        }
    }

    override fun getUncompletedShards(): List<String> {
        return allShardIds.filterNot { shardResults.containsKey(it) }
    }

    override fun isAllShardsCompleted(): Boolean {
        return allShardIds.all { shardResults.containsKey(it) }
    }

    override fun getAllResults(): List<ShardResult> {
        return shardResults.values.toList()
    }
}

// 3.数据处理节点
class DataProcessingNode(
    val nodeId: String,
    private val lockService: DistributedLockService,
    private val scope: CoroutineScope
) {
    private val cpuCore = Runtime.getRuntime().availableProcessors()
    private val maxConcurrency = cpuCore * 2
    private val semaphore = Semaphore(maxConcurrency)
    private val shardProcessingDeferreds = ConcurrentHashMap<String, Deferred<ShardResult>>()

    // 启动节点处理服务
    fun startProcessing(shards: List<DataShard>) {
        scope.launch {
            // 注册节点状态
            lockService.updateNodeStatus(
                NodeInfo(
                    nodeId = nodeId,
                    status = NodeStatus.PROCESSING,
                    lastUpdatedTime = System.currentTimeMillis(),
                    totalShards = shards.size
                )
            )

            // 并发处理分片
            shards.forEach { shard ->
                semaphore.withPermit {
                    val deferred = scope.async {
                        processShard(shard)
                    }
                    shardProcessingDeferreds[shard.shardId] = deferred

                    // 等待结果并更新状态
                    launch {
                        val result = deferred.await()
                        lockService.markShardCompleted(shard.shardId, result)
                    }
                }
            }

            // 等待所有分片处理完成
            shardProcessingDeferreds.values.joinAll()

            // 更新节点为已完成状态
            lockService.updateNodeStatus(
                NodeInfo(
                    nodeId = nodeId,
                    status = NodeStatus.COMPLETED,
                    lastUpdatedTime = System.currentTimeMillis(),
                    completedShards = shards.size,
                    totalShards = shards.size
                )
            )
        }
    }

    // 处理单个分片
    private suspend fun processShard(shard: DataShard): ShardResult {
        return try {
            val processingTime = measureTimeMillis {
                // 模拟处理耗时（100-500ms）
                delay(Random.nextLong(100, 500))

                // 模拟1%的失败率
                if (Random.nextDouble() < 0.01) {
                    throw Exception("处理失败：模拟错误")
                }
            }

            ShardResult(
                shardId = shard.shardId,
                nodeId = nodeId,
                success = true,
                processingTimeMs = processingTime
            )
        } catch (e: Exception) {
            ShardResult(
                shardId = shard.shardId,
                nodeId = nodeId,
                success = false,
                processingTimeMs = 0,
                errorMessage = e.message
            )
        }
    }

    // 处理额外分配的分片
    fun processAdditionalShards(shards: List<DataShard>) {
        scope.launch {
            shards.forEach { shard ->
                // 检查是否已经在处理该分片
                if (!shardProcessingDeferreds.containsKey(shard.shardId)
                    || shardProcessingDeferreds[shard.shardId]?.isCompleted == true
                ) {
                    semaphore.withPermit {
                        val deferred = scope.async {
                            processShard(shard)
                        }
                        shardProcessingDeferreds[shard.shardId] = deferred

                        launch {
                            val result = deferred.await()
                            lockService.markShardCompleted(shard.shardId, result)
                        }
                    }
                }
            }
        }
    }
}

// 4.主节点控制
class MasterNodeController(
    private val lockService: DistributedLockService,
    private val nodes: List<DataProcessingNode>,
    private val allShards: List<DataShard>,
    private val scope: CoroutineScope,
    private val nodeTimeoutMs: Long = 30000, // 节点超时时间30秒
    private val checkIntervalMs: Long = 10000 // 检查间隔10秒
) {
    private val reportCompletedSignal = CompletableDeferred<Unit>()
    private var isProcessingComplete = false
    private var processingStartTime = 0L

    suspend fun waitForReportCompletion() = reportCompletedSignal.await()

    // 启动主节点控制逻辑
    fun start() {
        processingStartTime = System.currentTimeMillis()

        // 初始化分配分片
        initialShardAllocation()

        // 启动节点监控
        startNodeMonitoring()
    }

    // 初始分配分片
    private fun initialShardAllocation() {
        // 将10万个分片平均分配给10个节点
        val shardsPerNode = allShards.chunked(allShards.size / nodes.size)

        nodes.forEachIndexed { index, node ->
            val nodeShards = shardsPerNode.getOrElse(index) { emptyList() }
            lockService.assignShardsToNode(node.nodeId, nodeShards.map { it.shardId })
            node.startProcessing(nodeShards)
        }
    }

    // 启动节点监控
    private fun startNodeMonitoring() {
        scope.launch {
            while (!isProcessingComplete) {
                delay(checkIntervalMs)
                monitorNodes()

                if (lockService.isAllShardsCompleted()) {
                    isProcessingComplete = true
                    generateGlobalReport()
                }
            }
        }
    }

    // 监控节点状态
    private fun monitorNodes() {
        val allNodes = lockService.getAllNodeStatuses()
        val currentTime = System.currentTimeMillis()

        // 检查超时节点
        val timeoutNodes = allNodes.filter {
            it.status != NodeStatus.COMPLETED &&
                    currentTime - it.lastUpdatedTime > nodeTimeoutMs
        }

        if (timeoutNodes.isNotEmpty()) {
            println("检测到超时节点：${timeoutNodes.joinToString { it.nodeId }}")
            handleTimeoutNodes(timeoutNodes)
        }
    }

    // 处理超时节点
    private fun handleTimeoutNodes(timeoutNodes: List<NodeInfo>) {
        timeoutNodes.forEach { timeoutNode ->
            // 将节点标记为失败
            lockService.updateNodeStatus(timeoutNode.copy(status = NodeStatus.FAILED))

            // 获取该节点未完成的分片
            val nodeShards = lockService.getNodeAssignedShards(timeoutNode.nodeId)
            val uncompletedShards = nodeShards.filter { shardId ->
                !lockService.getAllResults().any { it.shardId == shardId }
            }

            if (uncompletedShards.isNotEmpty()) {
                println("节点 ${timeoutNode.nodeId} 有 ${uncompletedShards.size} 个未完成分片重要重新分配")

                // 重新分配给其他活跃节点
                redistributeShards(uncompletedShards)
            }
        }
    }

    // 重新分配分片
    private fun redistributeShards(shardIds: List<String>) {
        // 找到所有活跃节点
        val activeNodes = nodes.filter { node ->
            lockService.getNodeStatus(node.nodeId)?.status != NodeStatus.FAILED
        }

        if (activeNodes.isEmpty()) {
            println("没有可用的活跃节点来重新分配分片")
            return
        }

        // 将分片平均分配给活跃节点
        val shardsToRedistribute = allShards.filter { shardIds.contains(it.shardId) }
        val shardsPerNode =
            shardsToRedistribute.chunked((shardsToRedistribute.size + activeNodes.size - 1) / activeNodes.size)

        activeNodes.forEachIndexed { index, node ->
            val nodeShards = shardsPerNode.getOrElse(index) { emptyList() }
            if (nodeShards.isNotEmpty()) {
                println("将${nodeShards.size}个分片重新分配给节点${node.nodeId}")
                lockService.assignShardsToNode(node.nodeId, nodeShards.map { it.shardId })
                node.processAdditionalShards(nodeShards)
            }
        }
    }

    // 生成全局报告
    private fun generateGlobalReport() {
        val processingTime = System.currentTimeMillis() - processingStartTime
        val allResults = lockService.getAllResults()

        // 计算总体统计
        val totalShards = allShards.size
        val successfulShards = allResults.count { it.success }
        val failedShards = totalShards - successfulShards

        // 计算每个节点的统计
        val nodeStats = allResults
            .groupBy { it.nodeId }
            .mapValues { (nodeId, results) ->
                val processed = results.size
                val successful = results.count { it.success }
                val successRate = if (processed > 0) successful.toDouble() / processed else 0.0
                val avgTime = results.filter { it.success }
                    .takeIf { it.isNotEmpty() }
                    ?.map { it.processingTimeMs.toDouble() }
                    ?.average()
                    ?: 0.0

                NodeStats(
                    nodeId = nodeId,
                    processedShards = processed,
                    successRate = successRate,
                    averageProcessingTimeMs = avgTime
                )
            }

        // 找出失败的节点
        val failedNodes = lockService.getAllNodeStatuses()
            .filter { it.status == NodeStatus.FAILED }
            .map { it.nodeId }

        // 构建并打印报告
        val report = GlobalReport(
            totalShards = totalShards,
            successfulShards = successfulShards,
            failedShards = failedShards,
            processingTimeMs = processingTime,
            nodeStats = nodeStats,
            failedNodes = failedNodes
        )

        println("\n===== 全局处理报告 =====")
        println("总处理时间: ${report.processingTimeMs}ms")
        println("总分片数: ${report.totalShards}")
        println("成功分片数: ${report.successfulShards} (${"%.2f".format(report.successfulShards * 100.0 / report.totalShards)}%)")
        println("失败分片数: ${report.failedShards} (${"%.2f".format(report.failedShards * 100.0 / report.totalShards)}%)")
        println("失败节点数: ${report.failedNodes.size}")
        println("\n节点统计:")
        report.nodeStats.values.forEach { stats ->
            println("  节点 ${stats.nodeId}:")
            println("    处理分片数: ${stats.processedShards}")
            println("    成功率: ${"%.2f".format(stats.successRate * 100)}%")
            println("    平均处理时间: ${"%.2f".format(stats.averageProcessingTimeMs)}ms")
        }
        println("=======================")

        // 报告生成完成后，触发信号，通知主函数可以继续
        reportCompletedSignal.complete(Unit)
    }
}

fun main() = runBlocking {
    // 创建分布式锁服务
    val lockService = InMemoryDistributedLockService()

    // 创建协程作用域
    val scope = CoroutineScope(Dispatchers.Default + SupervisorJob())

    // 创建10个处理节点
    val nodeCount = 10
    val nodes = (1..nodeCount).map {
        DataProcessingNode("NODE-$it", lockService, scope)
    }

    // 生成10万个数据分片
    val totalShards = 100000
    val allShards = (1..totalShards).map {
        DataShard(
            shardId = "SHARD-$it",
            data = "用户数据分片${it}的内容...",
            initialNodeId = "NODE-${(it % nodeCount) + 1}"
        )
    }

    // 创建并启动主节点控制器
    val masterNode = MasterNodeController(
        lockService = lockService,
        nodes = nodes,
        allShards = allShards,
        scope = scope
    )

    println("启动分布式数据处理系统...")
    println("总节点数: $nodeCount")
    println("总数据分片数: $totalShards")
    println("每个节点初始处理分片数: ${totalShards / nodeCount}")
    println("每个节点最大并发数: 取决于CPU核心数 (核心数 × 2)")

    masterNode.start()

    // 等待主节点报告生成完成
    masterNode.waitForReportCompletion()

    // 等待报告生成完成
    delay(1000)

    // 取消所有协程
    scope.cancel()
    println("系统已关闭")
}