package com.example.endusation.config;

import edu.stanford.nlp.pipeline.StanfordCoreNLP;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;

import java.util.Properties;

@Configuration
public class CoreNlpConfig {

    /**
     * 仅保留中文分词功能的 StanfordCoreNLP 配置
     */
    @Bean(name = "chineseCoreNlpPipeline")
    public StanfordCoreNLP chineseCoreNlpPipeline() {
        Properties props = new Properties();

        // 1. 仅配置分词所需的注解器：tokenize(分词)和ssplit(句子分割)
        // 移除了pos(词性标注)等不需要的任务
        props.setProperty("annotators", "tokenize,ssplit");

        // 2. 语言设置为中文
        props.setProperty("language", "zh");
        props.setProperty("tokenize.language", "zh");

        // 3. 中文分词核心配置（仅保留分词必要参数）
        props.setProperty("segment.model", "edu/stanford/nlp/models/segmenter/chinese/ctb.gz");
        props.setProperty("segment.sighanCorporaDict", "edu/stanford/nlp/models/segmenter/chinese");
        props.setProperty("segment.serDictionary", "edu/stanford/nlp/models/segmenter/chinese/dict-chris6.ser.gz");
        props.setProperty("segment.sighanPostProcessing", "true");

        // 移除了词性标注和NER相关配置（因为不需要这些功能）

        // 初始化仅含分词功能的Pipeline
        return new StanfordCoreNLP(props);
    }
}
