package com.zyb.lmbackend.service.impl;

/**
 * 批量分类服务实现。
 *
 * 负责：
 * - 创建/并发执行分类任务（分页抓取未分类数据）
 * - LLM 分类失败时重试与规则兜底
 * - 实时更新任务进度，便于 SSE 展示
 */
import com.baomidou.mybatisplus.core.conditions.query.LambdaQueryWrapper;
import com.google.common.util.concurrent.RateLimiter;
import com.zyb.lmbackend.classifier.RuleFallbackClassifier;
import com.zyb.lmbackend.config.AppProperties;
import com.zyb.lmbackend.entity.CategoryDict;
import com.zyb.lmbackend.entity.ClassifyTask;
import com.zyb.lmbackend.entity.GoodsBack;
import com.zyb.lmbackend.llm.LlmClient;
import com.zyb.lmbackend.llm.model.ClsResult;
import com.zyb.lmbackend.mapper.ClassifyTaskMapper;
import com.zyb.lmbackend.mapper.GoodsBackClassifiedMapper;
import com.zyb.lmbackend.mapper.GoodsBackMapper;
import com.zyb.lmbackend.service.BatchClassifierService;
import com.zyb.lmbackend.service.CategoryDictService;
import com.zyb.lmbackend.service.ClassifyTaskService;
import com.zyb.lmbackend.util.TokenBudget;
import io.micrometer.core.instrument.Gauge;
import io.micrometer.core.instrument.MeterRegistry;
import java.time.LocalDateTime;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.concurrent.Callable;
import java.util.concurrent.Executors;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
import java.util.stream.Collectors;
import lombok.RequiredArgsConstructor;
import lombok.extern.slf4j.Slf4j;
import org.springframework.stereotype.Service;

@Service
@RequiredArgsConstructor
@Slf4j
public class BatchClassifierServiceImpl implements BatchClassifierService {

    private final GoodsBackMapper goodsBackMapper;
    private final GoodsBackClassifiedMapper classifiedMapper;
    private final CategoryDictService categoryDictService;
    private final ClassifyTaskService classifyTaskService;
    private final ClassifyTaskMapper classifyTaskMapper;
    private final LlmClient llmClient;
    private final RuleFallbackClassifier fallbackClassifier;
    private final AppProperties appProperties;
    private final MeterRegistry meterRegistry;

    @Override
    public ClassifyTask start(Integer batchSizeParam, Integer maxConcurrencyParam, Integer taskVersionParam) {
        // 规范化参数（批大小/并发度）
//        最少处理1000条，最多处理2000条
        int batchSize = batchSizeParam == null ? 1000 : Math.max(1, Math.min(2000, batchSizeParam));
//最少出来8个线程并发执行，最多32个
        int maxConcurrency = maxConcurrencyParam == null ? 8 : Math.max(1, Math.min(32, maxConcurrencyParam));
//查一张专门维护当前大模型走到第几个版本的表的最大版本号，保证即使中断也会从上次的分析的数据之后分析
        int latest = classifyTaskMapper.selectMaxVersion();
//        计算最大版本，防止多版本并发，类似乐观锁的思想
        int version = taskVersionParam == null ? (latest + 1) : taskVersionParam;

        String lockName = "classify_v" + version; // 分布式锁名，避免同版本多实例并发
        Integer locked = classifyTaskMapper.getLock(lockName);
        if (locked == null || locked == 0) {
            // 未获锁：尝试返回该版本最新任务，若有则复用，无则返回占位对象（不保存）
//            没抢到，之前有过这个任务id对应的任务，直接返回，没有创建一个新的返回，这块无很大意义，反正下面抢到锁的会创建新的任务之后覆盖任务表
            ClassifyTask exist = classifyTaskService.getOne(new LambdaQueryWrapper<ClassifyTask>()
                    .eq(ClassifyTask::getVersion, version)
                    .orderByDesc(ClassifyTask::getId)
                    .last("limit 1"));
            if (exist != null) return exist;
            ClassifyTask t = new ClassifyTask();
            t.setTaskName("classify");
            t.setStatus("CREATED");
            t.setVersion(version);
            return t;
        }

        try {
            // 创建任务（按全量数量统计 total）
            long totalRows = goodsBackMapper.countAll();
            ClassifyTask task = new ClassifyTask();
            task.setTaskName("classify");
            task.setStatus("RUNNING");
            task.setVersion(version);
            task.setTotal((int) totalRows);
            task.setProcessed(0);
            task.setSuccess(0);
            task.setFailed(0);
            task.setStartedAt(LocalDateTime.now());
//            抢到了锁，此时先把这次的版本和id记录到任务表，方便查看
            classifyTaskService.save(task);

            // 允许的 codes
            // 构造允许的类目 code 集合（仅启用的）
            List<String> allowedCodes = categoryDictService.list(new LambdaQueryWrapper<CategoryDict>()
                            .eq(CategoryDict::getIsActive, (byte) 1))
                    .stream().map(CategoryDict::getCode).collect(Collectors.toList());

            // 固定大小线程池 + CallerRunsPolicy 保障背压
            ThreadPoolExecutor pool = new ThreadPoolExecutor(
                    maxConcurrency, maxConcurrency, 30, TimeUnit.SECONDS,
                    new LinkedBlockingQueue<>(200), new ThreadPoolExecutor.CallerRunsPolicy());
            RateLimiter limiter = RateLimiter.create(appProperties.getRATE_LIMIT_QPS());
            try { Gauge.builder("classify.queue.size", pool, p -> p.getQueue().size()).register(meterRegistry); } catch (Exception ignored) {}

            int defaultChunk = 50;
            int pageLimit = Math.max(1, batchSize);

            Executors.newSingleThreadExecutor().submit(() -> {
                try {
                    while (true) {
                        // 按任务版本选择未分类记录分页获取
                        // 分页读取“当前版本尚未分类”的记录
                        List<GoodsBack> batch = goodsBackMapper.selectUnclassified(version, pageLimit);
                        if (batch == null || batch.isEmpty()) break;

                        List<Callable<Void>> calls = new ArrayList<>();
                        int idx = 0;
                        while (idx < batch.size()) {
                            int remaining = batch.size() - idx;
                            int csize = Math.min(defaultChunk, remaining);
                            List<GoodsBack> part = new ArrayList<>(batch.subList(idx, idx + csize));
                            List<String> details = part.stream().map(GoodsBack::getBackDetail).collect(Collectors.toList());
                            // 控制分片令牌预算，避免提示超长
                            csize = TokenBudget.fitChunkSize(details, csize, 3500);
                            final List<GoodsBack> chunkItems = part.subList(0, csize);
                            final List<String> chunkDetails = details.subList(0, csize);

                            calls.add(() -> {
                                limiter.acquire();
                                // 批量调用 LLM 分类（内含重试/兜底）
//                                将分批用户真正的退货原因返回给大模型，用大模型或者自己的打分兜底返回批量的分类后的数据
//                                这里先查出category_dict的code也就是限制并且挑选出大模型能使用的分类，而category_dict的code是另外一个大模型生成返回之后再插入到category_dict的
                                List<ClsResult> results = classifyWithRetry(chunkDetails, allowedCodes);
                                int succ = 0;
                                for (int i = 0; i < results.size(); i++) {
                                    ClsResult r = results.get(i);
                                    String code = r.getCategoryCode();
                                    double conf = r.getConfidence();
                                    String reason = r.getReason();
//                                    得到对应的原始用户退货数据
                                    GoodsBack gb = chunkItems.get(i);
                                    // 明细 UPSERT：同一版本、同一 SN 只保留更高置信度
//这里把第二个大模型归纳的分类原因更新到之后要被前端图形化展示的goods_back_classified表中
                                    classifiedMapper.upsert(gb.getSn(), gb.getBackDetail(), code, conf, reason, "dashscope", version);
                                    if (!"OTHER".equalsIgnoreCase(code)){
                                        succ++;
                                    }
                                }
                                synchronized (BatchClassifierServiceImpl.this) {
                                    task.setProcessed(task.getProcessed() + results.size());
                                    task.setSuccess(task.getSuccess() + succ);
                                    task.setFailed(task.getFailed() + (results.size() - succ));
//                                    这里真正完成这次操作表的统计，真正的记录这次调用大模型分析的学习机相关数据，和成功和失败的数量
                                    classifyTaskService.updateById(task);
                                }
                                try {
                                    meterRegistry.counter("classify.throughput").increment(results.size());
                                } catch (Exception ignored) {}
                                return null;
                            });
                            idx += csize;
                        }
//用线程池来异步执行，提升效率
                        pool.invokeAll(calls);
                    }

                    task.setStatus("SUCCEEDED");
                    task.setFinishedAt(LocalDateTime.now());
                    classifyTaskService.updateById(task);
                } catch (InterruptedException e) {
                    Thread.currentThread().interrupt();
                    task.setStatus("FAILED");
                    task.setMsg("interrupted");
                    task.setFinishedAt(LocalDateTime.now());
                    classifyTaskService.updateById(task);
                } catch (Exception e) {
                    task.setStatus("FAILED");
                    task.setMsg(e.getMessage());
                    task.setFinishedAt(LocalDateTime.now());
                    classifyTaskService.updateById(task);
                } finally {
//                    一次的批量处理完用户的退货数据分类，关闭线程池，释放锁
                    pool.shutdown();
                    classifyTaskMapper.releaseLock(lockName);
                }
            });

            return task;
        } catch (Exception e) {
            classifyTaskMapper.releaseLock(lockName);
            throw e;
        }
    }

    /**
     * 调用 LLM 分类，失败最多重试 3 次；仍失败则使用规则兜底。
     */
    private List<ClsResult> classifyWithRetry(List<String> chunk, List<String> allowedCodes) {
        int attempts = 0;
        List<ClsResult> results = Collections.emptyList();
        while (attempts < 3) {
            attempts++;
            try {
                results = llmClient.classifyBatch(chunk, allowedCodes);
                return results;
            } catch (Exception e) {
                sleepBackoff(attempts);
            }
        }
//        走到这里说明3次将原始的用户的退货数据调给大模型分析，都失败，走兜底
        List<ClsResult> fb = new ArrayList<>();
        for (int i = 0; i < chunk.size(); i++) {
            fb.add(fallbackClassifier.classifyOne(chunk.get(i), i));
        }
        return fb;
    }

    /**
     * 指数退避等待。
     */
    private void sleepBackoff(int attempts) {
        long delay = (long) Math.min(10, Math.pow(2, attempts));
        try { TimeUnit.SECONDS.sleep(delay); } catch (InterruptedException ie) { Thread.currentThread().interrupt(); }
    }
}
