package com.thinvent.recommend.manager.manager.impl;

import com.huaban.analysis.jieba.JiebaSegmenter;
import com.huaban.analysis.jieba.SegToken;
import com.thinvent.recommend.manager.manager.JiebaManager;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Component;

import java.util.List;
import java.util.Set;
import java.util.regex.Pattern;
import java.util.stream.Collectors;

@Component
public class JiebaManagerImpl implements JiebaManager {
    private static final Logger log = LoggerFactory.getLogger(JiebaManagerImpl.class);

    private final JiebaSegmenter segmenter;
    private final Set<String> stopWords;

    // 剥离头尾所有标点（ASCII + Unicode）
    private static final Pattern TRIM_PUNCT = Pattern.compile("^[\\p{Punct}\\p{IsPunctuation}]+|[\\p{Punct}\\p{IsPunctuation}]+$");
    // 只允许中英文和数字：所有其他字符（包括任何标点、符号、空白、Emoji）一律当杂符丢掉
    private static final Pattern VALID_CHAR = Pattern.compile("^[0-9a-zA-Z\u4e00-\u9fa5]+$");

    @Autowired
    public JiebaManagerImpl(JiebaSegmenter segmenter, Set<String> jiebaStopWords) {
        this.segmenter = segmenter;
        this.stopWords = jiebaStopWords;
        log.info("JiebaManager 已初始化，停用词共 {} 条", stopWords.size());
    }

    @Override
    public List<String> cutPrecise(String text) {
        return filter(segmenter.process(text, JiebaSegmenter.SegMode.SEARCH));
    }

    @Override
    public List<String> cutSearch(String text) {
        return filter(segmenter.process(text, JiebaSegmenter.SegMode.SEARCH));
    }

    @Override
    public List<String> cutAll(String text) {
        return filter(segmenter.process(text, JiebaSegmenter.SegMode.INDEX));
    }

    private List<String> filter(List<SegToken> tokens) {
        return tokens.stream()
                // 1) 取词 + trim + toLowerCase
                .map(t -> t.word == null ? "" : t.word.trim().toLowerCase())
                // 2) 剥离头尾标点
                .map(w -> TRIM_PUNCT.matcher(w).replaceAll(""))
                // 3) 非空
                .filter(w -> !w.isEmpty())
                // 4) 去掉停用词
                .filter(w -> {
                    boolean isStop = stopWords.contains(w);
                    if (isStop) log.debug("过滤停用词 → '{}'", w);
                    return !isStop;
                })
                // 5) 只保留中英文数字，其他全部丢
                .filter(w -> {
                    boolean ok = VALID_CHAR.matcher(w).matches();
                    if (!ok) log.debug("过滤杂符/标点/符号 → '{}'", w);
                    return ok;
                })
                // 6) 最终保留
                .peek(w -> log.debug("保留 token → '{}'", w))
                .collect(Collectors.toList());
    }
}
