package com.example.batch.listener;

import com.example.batch.event.BatchTaskMetaEvent;
import com.example.batch.model.TMission;
import com.example.batch.service.TaskService;
import com.fasterxml.jackson.core.JsonProcessingException;
import com.fasterxml.jackson.databind.ObjectMapper;
import lombok.RequiredArgsConstructor;
import org.springframework.beans.factory.annotation.Value;
import lombok.extern.slf4j.Slf4j;
import org.apache.rocketmq.spring.annotation.ConsumeMode;
import org.apache.rocketmq.spring.annotation.RocketMQMessageListener;
import org.apache.rocketmq.spring.core.RocketMQListener;
import org.springframework.data.redis.core.BoundHashOperations;
import org.springframework.data.redis.core.RedisTemplate;
import org.springframework.retry.backoff.ExponentialBackOffPolicy;
import org.springframework.retry.policy.SimpleRetryPolicy;
import org.springframework.retry.support.RetryTemplate;
import org.springframework.scheduling.annotation.Scheduled;
import org.springframework.stereotype.Service;

import javax.annotation.PostConstruct;
import java.time.Duration;
import java.util.*;
import java.util.concurrent.*;

/**
 * RocketMQ 消费者：并行、幂等、重试处理批量任务
 */
@Slf4j
@Service
@RocketMQMessageListener(
        topic = "BATCH_TASK_TOPIC",
        consumerGroup = "batch-consumer-group",
        consumeMode = ConsumeMode.CONCURRENTLY
)
@RequiredArgsConstructor
public class EnhancedBatchTaskConsumer implements RocketMQListener<String> {

    private final RedisTemplate<String, Object> redisTemplate;
    private final TaskService taskService;
    @Value("${rocketmq.consumer.group}")
    private String consumerGroup;
    private ThreadPoolExecutor executor;
    private final ObjectMapper objectMapper;
    @PostConstruct
    public void initThreadPool() {
        log.info("RocketMQ 消费者已启动，订阅主题: BATCH_TASK_TOPIC");
        executor = new ThreadPoolExecutor(
                50,
                200,
                60L, TimeUnit.SECONDS,
                new LinkedBlockingQueue<>(5000),
                (r, exec) -> {
                    // CallerRunsPolicy
                    if (!exec.isShutdown()) r.run();
                }
        );
    }

    @Override
    public void onMessage(String message) {
        log.info("消费者组: {} 收到消息: {}", consumerGroup, message);
        BatchTaskMetaEvent event = null;
        try {
            event = objectMapper.readValue(message, BatchTaskMetaEvent.class);
            log.info("反序列化成功，事件: {}", event);
        } catch (JsonProcessingException e) {
            log.error("消息反序列化失败，消息内容: {}", message, e);
            // 跳过无效消息，不抛出异常以避免重试
            return;
        }

        // 以下为处理有效消息的逻辑
        String batchKey = "batch_task:" + event.getBatchId();
        String lockKey = "batch_lock:" + event.getBatchId();

        // 幂等锁：避免重复消费
        Boolean lock = redisTemplate.opsForValue()
                .setIfAbsent(lockKey, "LOCKED", Duration.ofHours(2));
        if (Boolean.FALSE.equals(lock)) {
            log.warn("批次 {} 已被处理，跳过重复消费", event.getBatchId());
            return;
        }

        Long missionId = taskService.createMainMission(event);
        Map<String, String> init = new HashMap<>();
        init.put("total", String.valueOf(event.getUrls().size())); // 转换为字符串
        init.put("success", "0");
        init.put("fail", "0");
        init.put("progress", "0");
        redisTemplate.opsForHash().putAll(batchKey, init);

        // 将 URL 列表分片以并行提交子任务
        Phaser phaser = new Phaser(1);
        List<List<String>> shards = partition(event.getUrls(), 50);

        for (List<String> shard : shards) {
            phaser.register();
            BatchTaskMetaEvent finalEvent = event;
            executor.execute(() -> {
                try {
                    for (String url : shard) {
                        processWithRetry(finalEvent, batchKey, missionId, url);
                    }
                } finally {
                    phaser.arriveAndDeregister();
                }
            });
        }

        // 等待所有分片完成
        phaser.arriveAndAwaitAdvance();

        // 鏈缁堟洿鏂颁富浠诲姟鐘舵佷负 COMPLETED
        BoundHashOperations<String, Object, Object> hashOps = redisTemplate.boundHashOps(batchKey);
        Object successObj = hashOps.get("success");
        Object failObj = hashOps.get("fail");
        Integer successCount = (successObj instanceof Integer) ? (Integer) successObj : Integer.parseInt(successObj.toString());
        Integer failCount = (failObj instanceof Integer) ? (Integer) failObj : Integer.parseInt(failObj.toString());
        taskService.updateMissionProgress(missionId, successCount, failCount);
        redisTemplate.opsForHash().put(batchKey, "status", "COMPLETED");
        log.info("批次 {} 处理完成", event.getBatchId());
    }

    private void processWithRetry(BatchTaskMetaEvent event, String batchKey, Long missionId, String url) {
        RetryTemplate retry = new RetryTemplate();
        retry.setRetryPolicy(new SimpleRetryPolicy(3));
        ExponentialBackOffPolicy backOff = new ExponentialBackOffPolicy();
        backOff.setInitialInterval(1000L);
        backOff.setMultiplier(2.0);
        retry.setBackOffPolicy(backOff);

        retry.execute(context -> {
            try {
                // 每个 URL 都作为一个子任务，插入数据库
                List<String> singleUrlList = Collections.singletonList(url);
                taskService.createSingleTask(singleUrlList, event.getAction(), missionId);

                // 更新成功计数
                redisTemplate.opsForHash().increment(batchKey, "success", 1);
            } catch (Exception ex) {
                log.error("URL {} 处理失败，进行重试/记录失败", url, ex);
                redisTemplate.opsForHash().increment(batchKey, "fail", 1);
                throw ex;
            }
            // 更新进度到 Redis
            updateProgress(batchKey);
            return null;
        });
    }

    private void updateProgress(String batchKey) {
        Object totalObj = redisTemplate.opsForHash().get(batchKey, "total");
        Object successObj = redisTemplate.opsForHash().get(batchKey, "success");
        Object failObj = redisTemplate.opsForHash().get(batchKey, "fail");
        
        Integer total = totalObj != null ? Integer.parseInt(totalObj.toString()) : 0;
        Integer success = successObj != null ? Integer.parseInt(successObj.toString()) : 0;
        Integer fail = failObj != null ? Integer.parseInt(failObj.toString()) : 0;

        if (total == null || success == null || fail == null) {
            // 可选：记录日志或抛出自定义异常
            return;
        }

        int done = success + fail;
        int percent = (int) (done * 100.0 / total);
        redisTemplate.opsForHash().put(batchKey, "progress", String.valueOf(percent));
    }


    /** 简单分片工具 */
    private <T> List<List<T>> partition(List<T> list, int size) {
        int total = list.size();
        int shards = (total + size - 1) / size;
        List<List<T>> out = new ArrayList<>(shards);
        for (int i = 0; i < total; i += size) {
            out.add(list.subList(i, Math.min(total, i + size)));
        }
        return out;
    }

    // 定时监控：可以扩展扫描 Redis 中活跃批次并上报指标
    @Scheduled(fixedRate = 5000)
    public void monitorBatchProgress() {
        // 目前留空，可根据需要实现批次进度监听与日志记录
    }
}
