package my.st.service.segment;

import com.huaban.analysis.jieba.SegToken;
import my.st.domain.repo.Word;
import my.st.domain.repo.WordRepository;
import org.springframework.beans.factory.annotation.Qualifier;
import org.springframework.stereotype.Service;

import javax.annotation.PostConstruct;
import javax.annotation.Resource;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.stream.Collectors;

/**
 * 完全参考词根库进行分词的，自定义的分词服务，
 */
@Service
@Qualifier("my")
public class MySegmentServiceImpl implements SegmentService{

    @Resource
    private WordRepository wordRepository;

    public static Map<String, List<Word>> KEY_WORD_MAP = new HashMap<>();

    @PostConstruct
    @Override
    public void reloadDict() {
        //按首个中文字分组
        KEY_WORD_MAP = wordRepository.findAll().stream().collect(Collectors.groupingBy(word -> word.getCnName().substring(0, 1)));
    }

    public List<SegToken> doSegment(String sentence) {
        List<SegToken> res = new ArrayList<>();
        int len = sentence.length();

        StringBuilder unFind = new StringBuilder();
        for (int i = 0; i < len; ) {
            char c = sentence.charAt(i);

            // 首个字匹配的单词列表
            List<Word> list = KEY_WORD_MAP.get(String.valueOf(c));

            if(list != null){
                // 能匹配的最大长度的词
                Word maxLenWord = null;
                for (Word word : list) {
                    // 全词匹配
                    if (sentence.indexOf(word.getCnName()) == i) {
                        if (maxLenWord == null || word.getCnName().length() > maxLenWord.getCnName().length()) {
                            maxLenWord = word;
                        }
                    }
                }

                if(maxLenWord != null){
                    // 处理前面的未识别的词
                    if(unFind.length() > 0){
                        res.add(new SegToken(unFind.toString(), i-unFind.length(), i));
                        unFind = new StringBuilder();
                    }
                    res.add(new SegToken(maxLenWord.getCnName(), i, i += maxLenWord.getCnName().length()));
                }else{
                    // 首个字匹配，但不匹配全词
                    unFind.append(c);
                    i++;
                }
            }else{
                // 未找到的当前字开头的词
                unFind.append(c);
                i++;
            }
        }
        //最后结尾的未识别处理
        if(unFind.length() > 0){
            res.add(new SegToken(unFind.toString(), len-unFind.length(), len));
        }
        return res;
    }
}
