package io.xxx.omni.osc.sync

import com.alibaba.fastjson.JSONObject
import com.baomidou.mybatisplus.extension.kotlin.KtQueryWrapper
import io.github.bucket4j.Bandwidth
import io.github.bucket4j.BlockingBucket
import io.github.bucket4j.Bucket4j
import io.github.bucket4j.Refill
import io.github.bucket4j.local.SynchronizationStrategy
import io.xxx.omni.osc.client.PlatformClient
import io.xxx.omni.osc.client.StoreClient
import io.xxx.omni.osc.common.format
import io.xxx.omni.osc.common.pool
import io.xxx.omni.osc.common.toJSONString
import io.xxx.omni.osc.domain.*
import org.apache.kafka.clients.producer.ProducerRecord
import org.apache.shardingsphere.elasticjob.api.ShardingContext
import org.apache.shardingsphere.elasticjob.simple.job.SimpleJob
import org.slf4j.Logger
import org.slf4j.LoggerFactory
import org.springframework.beans.factory.annotation.Autowired
import org.springframework.cloud.context.config.annotation.RefreshScope
import org.springframework.context.ApplicationContext
import org.springframework.data.repository.findByIdOrNull
import org.springframework.kafka.core.KafkaTemplate
import org.springframework.kafka.core.ProducerFactory
import org.springframework.transaction.support.TransactionTemplate
import org.springframework.util.StopWatch
import org.springframework.web.client.RestTemplate
import java.io.Reader
import java.time.Duration
import java.time.LocalDateTime
import java.util.concurrent.CountDownLatch
import java.util.concurrent.Future
import java.util.concurrent.ThreadPoolExecutor

/**
 * 任务上下文，持有不可变的店铺、开始时间、结束时间和扩展参数信息
 */
class JobContext(
    val store: Store,
    val startTime: LocalDateTime,
    val endTime: LocalDateTime,
    val parameter: Any?,
) {
    operator fun component1(): Store = store

    operator fun component2(): LocalDateTime = startTime

    operator fun component3(): LocalDateTime = endTime

    operator fun component4(): Any? = parameter

    fun logPrefix(): String {
        val platform = store.platform
        return "${platform.name}(${platform.id}) - ${store.name}(${store.id}) [${startTime.format()} - ${endTime.format()}]"
    }
}

/**
 * 数据同步任务骨架，提供统一的检查点更新、接口限流、任务中断和扩展参数的处理，
 * 通过实现[fetchAndProcess]可以快速实现任务处理。
 *
 * 每次任务执行完成之后更新[JobCheckpoint]。
 * 每一个平台只有一个Porter，负责平台下的所有店铺的数据同步，店铺之间并行执行。
 *
 * TODO 将任务修改为按店铺创建，减少店铺的互相干扰
 *
 * @see getId
 */
@RefreshScope
abstract class JobAdapter<T, R> : SimpleJob, PlatformIdentity {

    protected val log: Logger = LoggerFactory.getLogger(javaClass)

    @Autowired
    protected lateinit var applicationContext: ApplicationContext

    @Autowired
    private lateinit var platformClient: PlatformClient

    @Autowired
    private lateinit var storeClient: StoreClient

    @Autowired
    private lateinit var jobRepository: JobRepository

    @Autowired
    private lateinit var jobCheckpointRepository: JobCheckpointRepository

    @Autowired
    private lateinit var jobPropertyRepository: JobPropertyRepository

    open val cron = "0/5 * * * * ?"

    /**
     * 平台信息，当任务触发的时候会更新为最新值
     */
    protected lateinit var platform: Platform

    /**
     * 任务信息，当任务触发的时候会更新为最新值
     */
    protected lateinit var job: Job

    protected abstract val jobType: JobType

    /**
     * 返回任务ID
     * @see [JobCheckpoint]
     */
    abstract fun getId(store: Store): String

    /**
     * 限制开始时间和结束时间的时间范围不超过平台的限制，
     * 当 duration=Duration.ZERO 时，表示没有区间限制。
     * @see Duration.ZERO
     */
    protected open val duration: Duration = Duration.ofDays(1)

    /**
     * 数据延迟时长
     */
    protected open val delay: Duration = Duration.ZERO

    /**
     * 限流，接口每分钟调用次数限制，为 null 时表示不限流
     */
    protected open val permitsPerMinutes: Int? = null

    private val rateLimiter: BlockingBucket by lazy {
        if (permitsPerMinutes!! <= 0) {
            throw IllegalArgumentException("接口每分钟调用次数限制[permitsPerMinutes]不允许 <= 0")
        }
        val tokensPerMinutes = permitsPerMinutes!!.toLong()
        val refill = Refill.intervally(tokensPerMinutes, Duration.ofMinutes(1))
        val capacity = if (tokensPerMinutes < 2) 1 else tokensPerMinutes / 2
        Bucket4j.builder()
            .withSynchronizationStrategy(SynchronizationStrategy.SYNCHRONIZED)
            .addLimit(Bandwidth.classic(capacity, refill))
            .build() as BlockingBucket
    }

    /**
     * 申请可继续操作的资源，ex：进行限流等操作
     */
    protected open fun acquire() {
        if (permitsPerMinutes != null) {
            rateLimiter.consume(1)
        }
    }

    /**
     * 使用不同的参数多次调用接口
     * @return 第一个参数为扩展参数列表，第二个参数为并行标识，标记所有参数是否可以并行处理
     */
    protected open fun getParameters(store: Store): Pair<List<Any>, Boolean> = emptyList<Any>() to false

    /**
     * 检查扩展参数是否为空，如果为空则不继续执行
     */
    protected open val checkParameters = false

    /**
     * 返回平台限制的开始时间，部分平台只允许查询3个月以内的订单
     */
    protected open fun getStartTime(store: Store, jobCheckpoint: JobCheckpoint): LocalDateTime = jobCheckpoint.endTime

    override fun execute(shardingContext: ShardingContext) {
        this.platform = platformClient.getOne(platformId()) ?: throw RuntimeException("平台[${platformId()}]不存在")
        this.job = jobRepository.findByIdOrNull(jobType.id) ?: throw RuntimeException("任务[${jobType.id}]不存在")

        val stores = storeClient.getAll(platformId())
        stores.parallelStream().forEach {
            if (it.platform.enabled && it.enabled && it.authorization.enabled) {
                val id = getId(it)
                var jobCheckpoint = jobCheckpointRepository.findByIdOrNull(id)
                if (jobCheckpoint == null) {    // 任务第一次执行
                    jobCheckpoint = JobCheckpoint(id, it.created, LocalDateTime.now())
                    jobCheckpointRepository.saveAndFlush(jobCheckpoint)
                }

                try {
                    execute(it, jobCheckpoint)
                } catch (e: InterruptedException) {
                    if (log.isDebugEnabled)
                        log.debug(e.localizedMessage)
                }
            }
        }
    }

    private fun execute(store: Store, jobCheckpoint: JobCheckpoint) {
        val id = getId(store)
        val fetchAndProcess =
            { startTime: LocalDateTime, endTime: LocalDateTime, parameters: List<Any>, parallel: Boolean ->
                var jobProperty = jobPropertyRepository.findByIdOrNull(id)
                if (jobProperty == null) {
                    jobProperty = JobProperty(id, LocalDateTime.now())
                    jobPropertyRepository.saveAndFlush(jobProperty)
                }
                if (!jobProperty.enabled) {
                    throw InterruptedException("任务${id}已经中断")
                }

                val stream = parameters.ifEmpty { listOf(null) }.stream()
                if (parallel)
                    stream.parallel()
                stream.forEach {
                    acquire()
                    val context = JobContext(store, startTime, endTime, it)
                    fetchAndProcess(context)
                    afterExecuted(context)
                }
                jobCheckpoint.endTime = endTime
                jobCheckpointRepository.saveAndFlush(jobCheckpoint)
            }

        val stopWatch = StopWatch("execute")
        stopWatch.start()

        val startTime = getStartTime(store, jobCheckpoint)
        val endTime = LocalDateTime.now().minusSeconds(delay.toSeconds())
        if (startTime >= endTime)
            return

        val (parameters, parallel) = getParameters(store)
        if (checkParameters && parameters.isEmpty())
            return

        if (duration == Duration.ZERO) {
            fetchAndProcess(startTime, endTime, parameters, parallel)
        } else {
            val totalSeconds = Duration.between(startTime, endTime).abs().seconds
            val rangeNums = totalSeconds / duration.seconds + if (totalSeconds % duration.seconds == 0L) 0 else 1
            for (i in 1..rangeNums) {
                val actualStartTime = startTime.plusSeconds(duration.seconds * (i - 1))
                val actualEndTime = if (i == rangeNums) endTime else startTime.plusSeconds(duration.seconds * i)
                fetchAndProcess(actualStartTime, actualEndTime, parameters, parallel)
            }
        }

        stopWatch.stop()
        val logPrefix = "$id : ${platform.name}(${platform.id}) - ${store.name}(${store.id}) " +
                "[${startTime.format()} - ${endTime.format()}]"
        log.info("$logPrefix} 数据同步完成，耗时 ${stopWatch.totalTimeSeconds.format()} 秒")
    }

    /**
     * 拉取并处理数据
     */
    protected abstract fun fetchAndProcess(context: JobContext)

    /**
     * 任务执行之后的额外处理程序
     */
    protected open fun afterExecuted(context: JobContext) {}

    /**
     * 记录错误日志并抛出RuntimeException。
     * @param request 请求对象
     * @param response 错误响应对象
     */
    protected fun throwException(context: JobContext, request: Any?, response: Any?) {
        val (store, startTime, endTime) = context
        val message = "${store.name}(${store.id})[${startTime.format()} - ${endTime.format()}] - 接口调用失败, " +
                "请求数据: ${request.toJSONString()}, 响应数据: ${response.toJSONString()}"
        log.error(message)
        throw RuntimeException(message)
    }
}

/**
 * 任务每次启动时都会检查[JobProperty]，如果[JobProperty]已经禁用则任务终止，
 */
abstract class Porter<T, R> : JobAdapter<T, R>() {

    @Autowired
    protected lateinit var transactionTemplate: TransactionTemplate

    @Autowired
    protected lateinit var kafkaTemplate: KafkaTemplate<String, String>

    @Autowired
    protected lateinit var producerFactory: ProducerFactory<String, String>

    @Autowired
    protected lateinit var documentMapper: DocumentMapper

    @Autowired
    protected lateinit var retryDocumentMapper: RetryDocumentMapper

    @Autowired
    protected lateinit var storeJobRepository: StoreJobRepository

    @Autowired
    protected lateinit var lbRestTemplate: RestTemplate

    @Autowired
    protected lateinit var executor: ThreadPoolExecutor

    override val jobType = JobType.SYNC
}

abstract class GenericPorter : Porter<JSONObject, JSONObject>()

/**
 * 订单/退单 报文同步
 */
abstract class DocumentPorter<T, R> : Porter<T, R>() {

    /**
     * 返回任务ID
     * @see [JobCheckpoint]
     */
    override fun getId(store: Store) = "${this::class.simpleName}:${store.id}"

    /**
     * 数据类型
     */
    protected open val documentType = DocumentType.NONE

    /**
     * 每页大小
     */
    protected open val pageSize = 100

    /**
     * 保存并发送消息，如果消息发送失败则记录错误次数，当到达指定次数后发送异常消息。
     */
    protected open fun process(context: JobContext, response: R) {
        val stopWatch = StopWatch("process")
        stopWatch.start()

        val documents = buildDocuments(context, response)
        process(context, documents)

        stopWatch.stop()
        if (log.isDebugEnabled)
            log.debug("${context.logPrefix()} 处理${documents.size}条数据耗时: ${stopWatch.totalTimeSeconds.format()} 秒")
    }

    /**
     * 处理指定店铺下的报文数据，
     * 如果数据为新数据则保存数据并发送消息。
     * @param context 任务上下文
     * @param documents 报文列表
     */
    open fun process(context: JobContext, documents: List<Document>) {
        if (documents.isEmpty())
            return

        val store = context.store
        val porterId = getId(store)
        documents.forEach {
            it.porterId = porterId
            it.platformId = platform.id
            it.storeId = store.vid ?: store.id
        }

        val documentType = if (documentType == DocumentType.NONE) "" else ".${documentType}"
        val topic = "DOCUMENT.${platformId()}$documentType".uppercase()

        val wrapper = KtQueryWrapper(Document::class.java)
            .eq(Document::storeId, store.id)
            .`in`(Document::sn, documents.map { it.sn })
        if (!hasData)
            wrapper.select(Document::sn, Document::modified)

        val oldDocuments = documentMapper.selectList(wrapper).associateBy { it.sn }
        val upsertDocuments = documents.filter {
            val oldDocument = oldDocuments[it.sn]
            oldDocument == null || isNew(it, oldDocument)
        }

        if (upsertDocuments.isEmpty())
            return

        val distinctDocuments = upsertDocuments.associateBy { it.sn }.map { it.value }
        documentMapper.upsertAll(distinctDocuments)

        for (document in upsertDocuments) {
            val key = "${store.id}:${document.sn}"
            val value = document.toMessage()
            val record = ProducerRecord(topic, key, value)
            val listenableFuture = kafkaTemplate.send(record)
            listenableFuture.addCallback({}, { throwable ->
                saveAndNotifyError(document, throwable)
                log.warn("Document[$porterId${document.sn}]消息发送异常", throwable)
            })
        }
    }

    private fun saveAndNotifyError(document: Document, throwable: Throwable) {
        val wrapper = KtQueryWrapper(RetryDocument::class.java)
            .eq(RetryDocument::storeId, document.storeId)
            .eq(RetryDocument::sn, document.sn)
        var retryDocument = retryDocumentMapper.selectOne(wrapper)
        if (retryDocument == null) {
            retryDocument = RetryDocument(document.platformId!!, document.storeId!!, document.sn!!, LocalDateTime.now())
            retryDocumentMapper.insert(retryDocument)
        } else {
            retryDocument = retryDocument.copy(count = retryDocument.count + 1, modified = LocalDateTime.now())
            retryDocumentMapper.update(retryDocument, wrapper)

            if (retryDocument.count >= 3) {
                val message = mapOf(
                    "storeId" to document.storeId,
                    "sn" to document.sn,
                    "message" to throwable.localizedMessage,
                )
                kafkaTemplate.send("DOCUMENT.ERROR", message.toJSONString())
            }
        }
    }

    /**
     * 将Response转换为Document列表
     */
    protected abstract fun buildDocuments(context: JobContext, response: R): List<Document>

    /**
     * 历史数据查询是否需要包含 data，默认不包含，判断数据是否为新数据一般只需要通过比较数据的最后更新时间 modified 就可以判断
     */
    protected open val hasData = false

    /**
     * 判断Document是否是新的
     */
    protected open fun isNew(newDocument: Document, oldDocument: Document): Boolean = newDocument > oldDocument
}

/**
 * 使用分页模式进行数据同步
 */
abstract class PageDocumentPorter<T, R> : DocumentPorter<T, R>() {

    /**
     * 分页起始页，只能取值 0 和 1
     */
    protected open val startPage = 1

    override fun fetchAndProcess(context: JobContext) {
        if (startPage != 0 && startPage != 1)
            throw IllegalArgumentException("起始页[startPage]取值只能为 0 和 1")

        val countedResponse = getResponse(context, startPage, true)
        val count = getCount(context, countedResponse)
        if (log.isDebugEnabled)
            log.debug("${context.logPrefix()} 数据总数: $count")
        if (count == 0)
            return

        val totalPage = count / pageSize + if (count % pageSize > 0) 1 else 0
        val latch = CountDownLatch(totalPage)
        var pageNo = totalPage + startPage
        while (--pageNo >= 0) {
            val actualPageNo = if (startPage == 0) pageNo + 1 else startPage   // 逻辑页码都是从1开始
            val response = getResponse(context, actualPageNo, false)
            val processor = Processor(latch, context, response, this::process) {
                if (log.isDebugEnabled) {
                    log.debug("${context.logPrefix()} 第 $actualPageNo 页同步完成")
                }
            }
            pool.execute(processor)
        }
        latch.await()
    }

    protected abstract fun getCount(context: JobContext, response: R): Int

    protected abstract fun getResponse(context: JobContext, pageNo: Int, hasTotal: Boolean): R

    private class Processor<R>(
        private val latch: CountDownLatch,
        private val context: JobContext,
        private val response: R,
        private val process: (context: JobContext, response: R) -> Unit,
        private val log: () -> Unit,
    ) : Runnable {

        override fun run() {
            process(context, response)
            log()
            latch.countDown()
        }
    }
}

abstract class GenericPageDocumentPorter : PageDocumentPorter<JSONObject, JSONObject>()

/**
 * 使用游标模式进行数据同步
 */
abstract class CursorDocumentPorter<T, R> : DocumentPorter<T, R>() {

    /**
     * 同步指定时间范围内的数据并处理。
     * @param context 任务上下文
     */
    override fun fetchAndProcess(context: JobContext) {
        var response = getResponse(context, null)
        process(context, response)
        while (hasNext(response)) {
            response = getResponse(context, response)
            process(context, response)
        }
    }

    /**
     * 返回指定时间范围内的数据
     * @param context 任务上下文
     * @param prevResponse 上一次的数据
     */
    protected abstract fun getResponse(context: JobContext, prevResponse: R?): R

    /**
     * 判断是否需要继续拉取数据
     */
    protected abstract fun hasNext(response: R): Boolean
}

/**
 * 使用流式模式进行数据同步
 */
abstract class StreamDocumentPorter<T> : DocumentPorter<T, String>() {

    override fun fetchAndProcess(context: JobContext) {
        val (store, startTime, endTime, parameter) = context
        val reader = getReader(store, startTime, endTime, parameter)
        val futures = mutableListOf<Future<*>>()
        reader.use {
            it.forEachLine { line ->
                val future = executor.submit { process(context, line) }
                futures.add(future)
            }
        }
        futures.forEach { it.get() }
    }

    protected abstract fun getReader(
        store: Store,
        startTime: LocalDateTime,
        endTime: LocalDateTime,
        parameter: Any?,
    ): Reader
}

abstract class GenericStreamDocumentPorter : StreamDocumentPorter<JSONObject>()