/**
 * 抖音关注：程序员三丙
 * 知识星球：https://t.zsxq.com/j9b21
 */
package sanbing.example.dylike.application.task;

import com.google.common.base.Stopwatch;
import io.micrometer.core.instrument.Timer;
import jakarta.annotation.PostConstruct;
import jakarta.annotation.PreDestroy;
import jakarta.annotation.Resource;
import lombok.extern.slf4j.Slf4j;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
import org.springframework.context.annotation.Profile;
import org.springframework.scheduling.annotation.Scheduled;
import org.springframework.stereotype.Component;
import sanbing.example.dylike.domain.LikeCacheGateway;
import sanbing.example.dylike.infrastructure.sql.dao.LikeDao;
import sanbing.example.dylike.util.SanbingExecutors;
import sanbing.example.dylike.util.StatsFactory;

import java.time.LocalDateTime;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.TimeUnit;
import java.util.stream.IntStream;

@Component
@Slf4j
@Profile("!test")
@ConditionalOnProperty(prefix = "cache", value = "type", havingValue = "caffeine", matchIfMissing = true)
public class LikeCacheScheduler {

    @Value("${spring.datasource.hikari.maximum-pool-size}")
    private int hikariMaxPoolSize;

    @Resource
    LikeCacheGateway likeCacheGateway;

    @Resource
    StatsFactory statsFactory;

    @Resource
    LikeDao likeDao;

    private ExecutorService fetchDataThreadPool;
    private ExecutorService flushCacheThreadPool;
    private int batchQuerySize;
    private int batchQueryShardingStep;
    private Timer fetchDataTimer;
    private Timer flushCacheTimer;
    private Timer allTimer;

    @PostConstruct
    public void init() {
        fetchDataThreadPool = SanbingExecutors.newWorkStealingPool(hikariMaxPoolSize + 1, "fetchDataThreadPool");
        flushCacheThreadPool = SanbingExecutors.newWorkStealingPool(Runtime.getRuntime().availableProcessors(), "flushCacheThreadPool");

        batchQuerySize = (int) Math.ceil(100_000F / hikariMaxPoolSize);
        batchQueryShardingStep = 256 / hikariMaxPoolSize;

        fetchDataTimer = statsFactory.createTimer("cache.task", "type", "fetchData");
        flushCacheTimer = statsFactory.createTimer("cache.task", "type", "flushCache");
        allTimer = statsFactory.createTimer("cache.task", "type", "all");
    }

    @PreDestroy
    public void destroy() {
        if (fetchDataThreadPool != null) {
            fetchDataThreadPool.shutdownNow();
        }
        if (flushCacheThreadPool != null) {
            flushCacheThreadPool.shutdownNow();
        }
    }

    @Scheduled(fixedDelay = 5000)
    public void refreshCache() {
        Stopwatch stopwatch = Stopwatch.createStarted();

        try {
            var futures = IntStream.range(0, hikariMaxPoolSize)
                    .mapToObj(i -> {
                                int shardingKeyStart = i * batchQueryShardingStep;
                                int shardingKeyEnd = shardingKeyStart + batchQueryShardingStep;
                                return CompletableFuture.supplyAsync(() -> {
                                                    Stopwatch fetchSw = Stopwatch.createStarted();
                                                    try {
                                                        return likeDao.findTop(shardingKeyStart, shardingKeyEnd, LocalDateTime.now().minusDays(2),
                                                                batchQuerySize);
                                                    } finally {
                                                        fetchDataTimer.record(fetchSw.elapsed());
                                                    }
                                                }
                                                , fetchDataThreadPool)
                                        .thenAcceptAsync(list -> {
                                            Stopwatch flushSw = Stopwatch.createStarted();
                                            try {
                                                list.parallelStream().forEach(vLike ->
                                                        likeCacheGateway.write(vLike.getVId(), vLike.getLikeNum()));
                                            } finally {
                                                flushCacheTimer.record(flushSw.elapsed());
                                            }
                                        }, flushCacheThreadPool);
                            }
                    ).toArray(CompletableFuture[]::new);

            CompletableFuture.allOf(futures).get(1, TimeUnit.MINUTES);

        } catch (Exception e) {
            log.error("refreshCache error {}", e.getMessage(), e);
        } finally {
            allTimer.record(stopwatch.elapsed());
        }

        log.info("[{}] refreshCache elapsed {} ms, total cache keys: {}", likeCacheGateway.getClass().getSimpleName(),
                stopwatch.elapsed(TimeUnit.MILLISECONDS),
                likeCacheGateway.total());
    }
}