package com.huaban.analysis.jieba;


import java.io.BufferedReader;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.util.*;

public class POSTokenizer {

    private Map<String, String> posDictionary; // 词性字典

    public POSTokenizer() {
        this.posDictionary = new HashMap<>();
        loadPosDictionary(this.posDictionary, this.getClass().getResourceAsStream("/processed_dict2.txt"));
    }

    /**
     * 从文件加载词性字典
     * @param posDictionary 词性字典
     * @param inputStream 文件输入流
     */
    private void loadPosDictionary(Map<String, String> posDictionary, InputStream inputStream) {
        if (inputStream == null) {
            throw new IllegalArgumentException("词性字典文件未找到");
        }

        try (BufferedReader reader = new BufferedReader(new InputStreamReader(inputStream, "UTF-8"))) {
            String line;
            while ((line = reader.readLine()) != null) {
                String[] parts = line.split("\\s+");
                if (parts.length == 3) {
                    String word = parts[0];
                    String pos = parts[2];
                    posDictionary.put(word, pos);
                }
            }
        } catch (Exception e) {
            e.printStackTrace();
        }
    }

    /**
     * 获取分词结果及其词性
     * @param content 文本内容
     * @param segments 分词结果
     * @return 带有词性的分词结果
     */
    public List<POSToken> process(String content, List<SegToken> segments) {
        List<POSToken> postokens = new ArrayList<>();

        for (SegToken segToken : segments) {
            String word = segToken.word;
            String pos = posDictionary.getOrDefault(word, "unknown"); // 如果词不在词典中，默认为"unknown"
            postokens.add(new POSToken(word, pos, segToken.startOffset, segToken.endOffset));
        }

        return postokens;
    }
}