package com.zzs.restweb.controller;


import com.zzs.restweb.entity.StanfordCoreNLPVO;
import com.zzs.restweb.entity.StanfordCoreNLPVOHelper;
import com.zzs.restweb.util.StanfordCoreNLPBean;
import edu.stanford.nlp.ling.CoreAnnotations;
import edu.stanford.nlp.ling.CoreLabel;
import edu.stanford.nlp.pipeline.Annotation;
import edu.stanford.nlp.pipeline.StanfordCoreNLP;
import edu.stanford.nlp.semgraph.SemanticGraph;
import edu.stanford.nlp.semgraph.SemanticGraphCoreAnnotations;
import edu.stanford.nlp.trees.Tree;
import edu.stanford.nlp.trees.TreeCoreAnnotations;
import edu.stanford.nlp.util.CoreMap;
import lombok.RequiredArgsConstructor;
import lombok.extern.slf4j.Slf4j;
import org.springframework.web.bind.annotation.GetMapping;
import org.springframework.web.bind.annotation.RequestMapping;
import org.springframework.web.bind.annotation.RequestParam;
import org.springframework.web.bind.annotation.RestController;

import java.util.ArrayList;
import java.util.List;

/**
 * <p>
 * 前端控制器
 * </p>
 *
 * @author zzs
 * @since 2020-06-23
 */
@RestController
@RequestMapping("/api")
@Slf4j
@RequiredArgsConstructor
public class RestLogController {

    private final StanfordCoreNLPBean stanfordCoreNLPBean;

    @GetMapping("/nlp")
    public Object api(@RequestParam String words) {
        List<StanfordCoreNLPVO> resultList = new ArrayList<>();
        StanfordCoreNLP pipeline = stanfordCoreNLPBean.getStanfordCoreNLP();
        long startTime = System.currentTimeMillis();
        // 创造一个空的Annotation对象
        Annotation document = new Annotation(words);
        // 对文本进行分析
        pipeline.annotate(document);
        //获取文本处理结果
        List<CoreMap> sentences = document.get(CoreAnnotations.SentencesAnnotation.class);
        for (CoreMap sentence : sentences) {
            StanfordCoreNLPVO result;
            List<StanfordCoreNLPVOHelper> helperList = new ArrayList<>();
            // traversing the words in the current sentence
            // a CoreLabel is a CoreMap with additional token-specific methods
            for (CoreLabel token : sentence.get(CoreAnnotations.TokensAnnotation.class)) {
                // 获取句子的token（可以是作为分词后的词语）
                String word = token.get(CoreAnnotations.TextAnnotation.class);
                //词性标注
                String pos = token.get(CoreAnnotations.PartOfSpeechAnnotation.class);
                // 命名实体识别
                String original = token.get(CoreAnnotations.NamedEntityTagAnnotation.class);
                String normalized = token.get(CoreAnnotations.NormalizedNamedEntityTagAnnotation.class);
                //词干化处理
                String lema = token.get(CoreAnnotations.LemmaAnnotation.class);
                StanfordCoreNLPVOHelper helper = StanfordCoreNLPVOHelper.builder()
                        .word(word)
                        .pos(pos)
                        .original(original)
                        .normalized(normalized)
                        .lema(lema).build();
                helperList.add(helper);
            }
            // 句子的解析树
            Tree tree = sentence.get(TreeCoreAnnotations.TreeAnnotation.class);
            // 句子的依赖图
            SemanticGraph graph = sentence.get(SemanticGraphCoreAnnotations.CollapsedCCProcessedDependenciesAnnotation.class);
            result = StanfordCoreNLPVO.builder()
                    .word(helperList)
                    .tree(tree.pennString())
                    .graph(graph.toString(SemanticGraph.OutputFormat.LIST)).build();
            resultList.add(result);
        }
        long endTime = System.currentTimeMillis();
        long time = endTime - startTime;
        log.info("The analysis lasts {} ms", time);
        return resultList;
    }
}
