package com.nft.gateway.app.common;

import com.alibaba.fastjson.JSON;
import com.nft.gateway.app.entity.OprateInterfaceLog;
import com.nft.gateway.app.service.LogService;
import lombok.extern.slf4j.Slf4j;
import org.springframework.beans.factory.InitializingBean;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.data.redis.core.RedisTemplate;
import org.springframework.stereotype.Component;

import javax.annotation.Resource;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;

/**
 * <pre>
 *   数据批量入库服务
 * </pre>
 * Created by RuiXing Hou on 2021-08-05.
 *
 * @since 1.0
 */
@Component
@Slf4j
public class BatchDataStorageService implements InitializingBean {
    /**
     * 最大批次数量
     */
    @Value("${app.db.maxBatchCount}")
    private int maxBatchCount;

    /**
     * 最大线程数
     */
    @Value("${app.db.maxBatchThreads}")
    private int maxBatchThreads;

    /**
     * 超时时间
     */
    @Value("${app.db.batchTimeout}")
    private int batchTimeout;

    /**
     * 批次数量
     */
    private int batchCount = 0;

    /**
     * 批次号
     */
    private static long batchNo = 0;

    /**
     * 线程池定义接口
     */
    private ExecutorService executorService = null;

    /**
     * 服务器缓存工具类，下面提供源码
     */
    @Resource
    private CacheService cacheService;

    /**
     * 业务接口
     */
    @Resource
    private LogService logService;

    /**
     * redis工具类
     */
    @Resource
    private RedisTemplate redisTemplate;

    @Override
    public void afterPropertiesSet() {
        this.executorService = Executors.newFixedThreadPool(this.maxBatchThreads, r -> {
            Thread thread = new Thread(r);
            if (r instanceof BatchWorker) {
                thread.setName("batch-worker-" + ((BatchWorker) r).batchKey);
            }
            return thread;
        });
    }

    /**
     * 需要做高并发处理的类只需要调用该方法
     *
     * @param oprateInterfaceLog
     */
    public void saveRealTimeData(OprateInterfaceLog oprateInterfaceLog) {
        final String failedCacheKey = "device:user_oprate:failed_records";

        try {

            String durationKey = "device:user_oprate:batchDuration" + batchNo;
            String batchKey = "device:user_oprate:batch" + batchNo;

            if (!cacheService.exists(durationKey)) {
                cacheService.put(durationKey, System.currentTimeMillis());
                new BatchTimeoutCommitThread(batchKey, durationKey, failedCacheKey).start();
            }

            cacheService.lPush(batchKey, oprateInterfaceLog);
            if (++batchCount >= maxBatchCount) {
                // 达到最大批次，执行入库逻辑
                dataStorage(durationKey, batchKey, failedCacheKey);
            }

        } catch (Exception ex) {
            log.warn("[DB:FAILED] 保存本批次的日志记录失败: " + ex.getMessage() + ", oprateInterfaceLog: " + JSON.toJSONString(oprateInterfaceLog), ex);
            cacheService.lPush(failedCacheKey, oprateInterfaceLog);
        } finally {
//            updateRealTimeData(oprateInterfaceLog);
        }
    }

    /**
     * 更新实时数据
     * @param oprateInterfaceLog 业务POJO
     */
    private void updateRealTimeData(OprateInterfaceLog oprateInterfaceLog) {
//        redisTemplate.opsForValue().set("user_oprate:" + oprateInterfaceLog.getType() + ":" +oprateInterfaceLog.getGoodsId() + oprateInterfaceLog.getUuid(), oprateInterfaceLog);
    }

    /**
     *
     * @param durationKey 		持续时间标识
     * @param batchKey			批次标识
     * @param failedCacheKey	错误标识
     */
    private void dataStorage(String durationKey, String batchKey, String failedCacheKey) {
        batchNo++;
        batchCount = 0;
        cacheService.del(durationKey);
        if (batchNo >= Long.MAX_VALUE) {
            batchNo = 0;
        }
        executorService.execute(new BatchWorker(batchKey, failedCacheKey));
    }

    private class BatchWorker implements Runnable {

        private final String failedCacheKey;
        private final String batchKey;

        public BatchWorker(String batchKey, String failedCacheKey) {
            this.batchKey = batchKey;
            this.failedCacheKey = failedCacheKey;
        }

        @Override
        public void run() {
            final List<OprateInterfaceLog> oprateInterfaceLogs = new ArrayList<>();
            try {
                OprateInterfaceLog oprateInterfaceLog = cacheService.lPop(batchKey);
                while(oprateInterfaceLog != null) {
                    oprateInterfaceLogs.add(oprateInterfaceLog);
                    oprateInterfaceLog = cacheService.lPop(batchKey);
                }
                long timeMillis = System.currentTimeMillis();

                try {
                    //这里进到这里的都是没有抽奖过的用户
                    logService.saveBatch(oprateInterfaceLogs);

                } finally {
                    cacheService.del(batchKey);
                    log.info("[DB:BATCH_WORKER] 批次：" + batchKey + "，保存本批次的日志记录：" + oprateInterfaceLogs.size() + ", 耗时：" + (System.currentTimeMillis() - timeMillis) + "ms");
                }
            } catch (Exception e) {
                log.warn("[DB:FAILED] 保存本批次的日志记录失败：" + e.getMessage() + ", oprateInterfaceLog: " + oprateInterfaceLogs.size(), e);
                for (OprateInterfaceLog oprateInterfaceLog : oprateInterfaceLogs) {
                    cacheService.lPush(failedCacheKey, oprateInterfaceLog);
                }
            }
        }
    }

    class BatchTimeoutCommitThread extends Thread {

        private final String batchKey;
        private final String durationKey;
        private final String failedCacheKey;

        public BatchTimeoutCommitThread(String batchKey, String durationKey, String failedCacheKey) {
            this.batchKey = batchKey;
            this.durationKey = durationKey;
            this.failedCacheKey = failedCacheKey;
            this.setName("batch-thread-" + batchKey);
        }

        public void run() {
            try {
                Thread.sleep(batchTimeout);
            } catch (InterruptedException e) {
                log.error("[DB] 内部错误，直接提交：" + e.getMessage());
            }

            if (cacheService.exists(durationKey)) {
                // 达到最大批次的超时间，执行入库逻辑
                dataStorage(durationKey, batchKey, failedCacheKey);
            }
        }

    }

}
