package io.github.wppli.domain.recommond.rservice.text;

import cn.hutool.core.io.resource.ClassPathResource;
import com.google.common.reflect.TypeToken;
import com.google.gson.Gson;
import com.hankcs.hanlp.HanLP;

import io.github.wppli.domain.recommond.model.entity.PostsContentEntity;
import lombok.AllArgsConstructor;
import lombok.extern.slf4j.Slf4j;
import org.apache.lucene.analysis.standard.StandardTokenizer;
import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
import org.springframework.stereotype.Service;

import javax.annotation.PostConstruct;
import java.io.BufferedReader;
import java.io.IOException;
import java.io.InputStreamReader;
import java.io.StringReader;
import java.nio.charset.StandardCharsets;
import java.util.*;
import java.util.concurrent.ConcurrentHashMap;
import java.util.function.Function;
import java.util.stream.Collectors;

/**
 * 文本特征服务
 * @author li--jiaqiang 2025−03−15
 */
@Slf4j
@Service
@AllArgsConstructor
public class TextFeatureService {
    private final static Set<String> STOP_WORDS;

    static {
        STOP_WORDS = loadStopWords();
        log.info("中文停用词加载完毕, 数量:{}", STOP_WORDS.size());
    }

    /**
     * 获取停用词
     */
    public static Set<String> loadStopWords() {
        Set<String> stopWords = new HashSet<>();
        try {
            // 通过类路径加载文件
            ClassPathResource resource = new ClassPathResource("public/stopwords.txt");
            BufferedReader reader = new BufferedReader(
                    new InputStreamReader(resource.getStream(), StandardCharsets.UTF_8) // 指定编码
            );
            String line;
            while ((line = reader.readLine()) != null) {
                stopWords.add(line);
            }
            reader.close();
        } catch (Exception e) {
            throw new RuntimeException("加载停用词文件失败", e);
        }
        return stopWords;
    }

    public boolean isStopWord(String word) {
        return STOP_WORDS.contains(word);
    }

    private final Map<String, Set<Long>> invertedIndex = new ConcurrentHashMap<>();

    @PostConstruct
    public void init() {
        List<PostsContentEntity> allContents = new LinkedList<>();
        buildInvertedIndex(allContents);
    }

    /**
     * 初始化阶段建立倒排索引
     * @param contents 所有内容
     */
    private void buildInvertedIndex(List<PostsContentEntity> contents) {
        contents.forEach(content -> {
            Set<String> terms ; // 使用分词器
            try {
                terms = extractTerms(content.getText());
            } catch (IOException e) {
                throw new RuntimeException(e);
            }
            terms.forEach(term -> {
                invertedIndex.computeIfAbsent(term.toLowerCase(), k -> new HashSet<>())
                        .add(content.getId());
            });
        });
    }

    private Set<String> extractTerms(String text) throws IOException {
        // 处理中文分词
        List<String> keywords;
        if (isChinese(text)) {
            keywords = processChinese(text); // 处理中文
        } else {
            keywords = processEnglish(text);
        }
        log.info("keywords: {}", keywords);
        return new HashSet<>(keywords);
    }



    // 简单的语言检测方法，判断文本是否为中文
//    private boolean isChinese(String text) {
//        for (char c : text.toCharArray()) {
//            if (Character.UnicodeScript.of(c) == Character.UnicodeScript.HAN) {
//                return true;
//            }
//        }
//        return false;
//    }

    public static boolean isChinese(String text) throws IOException {
        return true;
    }


    // 英文处理方法
    private List<String> processEnglish(String text) {
        List<String> tokens = new ArrayList<>();
        try (StandardTokenizer tokenizer = new StandardTokenizer()) {
            tokenizer.setReader(new StringReader(text));
            CharTermAttribute charTermAttribute = tokenizer.addAttribute(CharTermAttribute.class);
            tokenizer.reset();
            while (tokenizer.incrementToken()) {
                String term = charTermAttribute.toString().toLowerCase();
                if (!isStopWord(term)) { // 过滤英文停用词
                    tokens.add(term); // 英文词干提取
                }
            }
        } catch (IOException e) {
            e.printStackTrace();
        }
        return tokens;
    }

    // 中文处理方法
    private List<String> processChinese(String text) {
        return HanLP.extractKeyword(text, text.length() / 2 + 1)
                .stream()
                .filter(word -> {
                    boolean stopWord = isStopWord(word);
                    if (stopWord) {
                        log.info("停用词: {}", word);
                    }
                    return !stopWord;
                })
                .collect(Collectors.toList());
    }


    public Map<String, Double> calculateTFIDF(String text, List<PostsContentEntity> allContents) throws IOException {
        Set<String> terms = extractTerms(text);

        // 计算 TF
        Map<String, Double> tf = terms.stream()
                .collect(Collectors.groupingBy(Function.identity(), Collectors.counting()))
                .entrySet().stream()
                .collect(Collectors.toMap(Map.Entry::getKey, e -> e.getValue().doubleValue() / terms.size()));

        // 计算 IDF
        Map<String, Double> tfidf = new HashMap<>();
        for (String term : terms) {
            long df = invertedIndex.getOrDefault(term, Collections.emptySet()).size();
            double idf = Math.log((double) allContents.size() / (df + 1));
            tfidf.put(term, tf.get(term) * idf);
        }

        return tfidf;
    }

    // JSON转特征向量
    public Map<String, Double> parseFeatureVector(String json) {
        return new Gson().fromJson(json, new TypeToken<Map<String, Double>>(){}.getType());
    }
    // 特征向量转JSON
    public String toJson(Map<String, Double> vec) {
        return new Gson().toJson(vec);
    }
}