package query_understanding.query_understand.tree_generator;

import algorithm.config.HumanLanguage;
import algorithm.nlp.corenlp.QueryAnnotator;
import algorithm.nlp.corenlp.TextAnnotator;
import com.sun.tools.javac.code.AnnoConstruct;
import common.helper.json.JavaToJsonString;
import edu.stanford.nlp.ling.CoreAnnotations;
import edu.stanford.nlp.ling.IndexedWord;
import edu.stanford.nlp.pipeline.Annotation;
import edu.stanford.nlp.semgraph.SemanticGraph;
import edu.stanford.nlp.semgraph.SemanticGraphCoreAnnotations;
import edu.stanford.nlp.semgraph.SemanticGraphEdge;
import edu.stanford.nlp.util.CoreMap;
import javafx.util.Pair;
import org.elasticsearch.search.suggest.phrase.NoisyChannelSpellChecker;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.stereotype.Component;
import query_understanding.query_understand.tree_progress.DocumentQueryProgress;
import query_understanding.query_language.doc_entity_tree.QueryNodeTokenMap;
import scala.tools.cmd.gen.AnyVals;
import semantic_compute.concept_identify.yago_identify.YagoConceptIdentifier;
import semantic_compute.entity_linking.schema.EntityLinkingResult;
import semantic_compute.entity_linking.yago_linker.YagoQueryEntityLinker;

import java.util.*;

/**
 * Created by julianzliu on 4/22/2017.
 */
//@Component
public class QueryPreprocessor {

    static final Logger logger = LoggerFactory.getLogger(QueryPreprocessor.class);


    private YagoQueryEntityLinker entityLinker;
    private QueryAnnotator textAnnotator;
    private YagoConceptIdentifier conceptIdentifier;


    /*
    文档级别的预处理
     */
    private DocumentQueryProgress documentQueryProgress;


    public QueryPreprocessor(DocumentQueryProgress documentQueryProgress) {
        this.documentQueryProgress = documentQueryProgress;
    }


    public DocumentQueryProgress process(QueryAnnotator textAnnotator){

        getDenpendencyTrees( textAnnotator );

        getConcepts( new YagoConceptIdentifier() );

        getEntityLinking( new YagoQueryEntityLinker() );

        alignEntityWithTreeNode();

        printAlignResult();

        return this.documentQueryProgress;
    }



    private DocumentQueryProgress getDenpendencyTrees(QueryAnnotator textAnnotator) {
        this.textAnnotator = textAnnotator;
        this.documentQueryProgress.document = textAnnotator.getAnnotatedText(this.documentQueryProgress.originalWholeQuery);

        //this.textAnnotator.pipeline.prettyPrint( this.documentQueryProgress.document, System.out );
        return this.documentQueryProgress;
    }


    /*
    识别所有概念
     */
    private DocumentQueryProgress getConcepts(YagoConceptIdentifier conceptIdentifier){
        Annotation document = this.documentQueryProgress.document;
        List<CoreMap> sentences = this.documentQueryProgress.document.get(CoreAnnotations.SentencesAnnotation.class);


        Map<CoreMap, Map<String, IndexedWord> >  sentConceptTokenMap = new HashMap<>();

        Integer sentIndex = 0;
        for (CoreMap sentence : sentences) {
            Set<IndexedWord> Nouns = new HashSet<>();  // NN  NNS
            Map<String,IndexedWord > concepts = new HashMap();
            SemanticGraph dependencies = sentence.get(SemanticGraphCoreAnnotations.EnhancedDependenciesAnnotation.class);
            List<IndexedWord> all_NN = dependencies.getAllNodesByPartOfSpeechPattern("NN");
            Nouns.addAll( all_NN );
            List<IndexedWord> all_NNS = dependencies.getAllNodesByPartOfSpeechPattern("NNS");
            Nouns.addAll( all_NNS );

            /***************************/

            for(IndexedWord word: Nouns){
                List<Pair<String, Double>> canditate_concepts = new ArrayList<>();

                /*
                TODO 检查耗时点
                 */
                //canditate_concepts =  conceptIdentifier.simpleConceptIdentify( word.lemma() );

                String concept = null;
                if(canditate_concepts.size() > 0)
                    concept = canditate_concepts.get( 0 ).getKey();
                if(concept != null){
                    concepts.put( concept, word );
                }
            }
            sentIndex += 1;
            sentConceptTokenMap.put( sentence, concepts );

        }

        this.documentQueryProgress.sentConceptTokenMap = sentConceptTokenMap;
        return this.documentQueryProgress;
    }


    private DocumentQueryProgress getEntityLinking(YagoQueryEntityLinker entityLinker) {
        this.entityLinker = entityLinker;
        this.documentQueryProgress.entityLinkingResult = entityLinker.getYagoEntityLinkingResult(this.documentQueryProgress.originalWholeQuery);
        this.documentQueryProgress.entityLinkingResult.alignWithCoreNlp(this.documentQueryProgress.document);
        return this.documentQueryProgress;
    }




    private DocumentQueryProgress alignEntityWithTreeNode() {
        /*
         将句法分析树 和 命名实体识别结果  对齐：
           1）指向依存句法树中的某个节点
           2）这个节点应该是整个短语的 “核心”，比如 复合名词的中心节点。
          一个比较简单的经验是：
            连通的子树的根是中心节点。
           如果包含不连通的多个子树怎么办？
         */
        Map<CoreMap, Map<EntityLinkingResult.LinkedEntity, QueryNodeTokenMap> > sentEntityTokenMap = new HashMap<>();
        /*
        ( 句子 ->  实体  ->  对应的Token集合 ）
         */


        // 倒排索引
        List<EntityLinkingResult.LinkedEntity> mentions = this.documentQueryProgress.entityLinkingResult.mentions;
        Map< Integer ,Map<Integer, EntityLinkingResult.LinkedEntity> > sentTokenEntityMap = new HashMap<>();
        for (EntityLinkingResult.LinkedEntity mention : mentions) {
            Map<Integer, EntityLinkingResult.LinkedEntity> tokenEntityMap;
            // 对应到句子
            Integer sentIndex = mention.sentIndex;
            if(sentTokenEntityMap.containsKey( sentIndex ))
                tokenEntityMap = sentTokenEntityMap.get( sentIndex );
            else {
                tokenEntityMap = new HashMap<>();
                sentTokenEntityMap.put(sentIndex, tokenEntityMap);
            }
            for (Integer i = mention.startIndex; i <= mention.endIndex; i++) {
                tokenEntityMap.put(i, mention);
            }
        }
        // sentTokenEntityMap 可能为空
        System.out.println("[sentTokenEntityMap]" + JavaToJsonString.mapJavaToJson(sentTokenEntityMap));


        Set<IndexedWord> visited = new HashSet<>();
        List<CoreMap> sentences = this.documentQueryProgress.document.get(CoreAnnotations.SentencesAnnotation.class);
        Integer sentIndex = 0;
        for (CoreMap sentence : sentences) {
            /*
            Enhanced 可能有环，但是有另外一些好处，例如从句主语指向指代对象
             */
            SemanticGraph dependencies = sentence.get(SemanticGraphCoreAnnotations.BasicDependenciesAnnotation.class);
            //SemanticGraph dependencies = sentence.get(SemanticGraphCoreAnnotations.EnhancedDependenciesAnnotation.class);
            IndexedWord firstRoot = dependencies.getFirstRoot();

            Map<Integer, EntityLinkingResult.LinkedEntity> tokenEntityMap;
            // sentIndex key 可能不存在，因为没有是被出实体
            if(sentTokenEntityMap.containsKey(sentIndex)) tokenEntityMap = sentTokenEntityMap.get(sentIndex);
            else tokenEntityMap = new HashMap<>();

            Map<EntityLinkingResult.LinkedEntity, QueryNodeTokenMap> entityTokenMap = new HashMap<>();
            System.out.println("\n\n[parse dependency tree]");
            deepFirstTraversal(dependencies, firstRoot, tokenEntityMap, entityTokenMap, visited);

            for(EntityLinkingResult.LinkedEntity entity: entityTokenMap.keySet()){
                QueryNodeTokenMap tokenMap = entityTokenMap.get( entity );
                System.out.println("entity = " + entity + " mainToken = " + tokenMap.mainToken );
                for(IndexedWord word:tokenMap.tokenSet) System.out.print(word + " ");
                System.out.print("\n");

            }

            // 添加该句子的（实体-> token ） 映射
            sentEntityTokenMap.put( sentence, entityTokenMap);

            sentIndex += 1;
        }
        System.out.println(JavaToJsonString.mapJavaToJson(sentEntityTokenMap));
        this.documentQueryProgress.sentEntityTokenMap = sentEntityTokenMap;
        return this.documentQueryProgress;
    }

    private void deepFirstTraversal(SemanticGraph dependencies, IndexedWord root,
                                    Map<Integer, EntityLinkingResult.LinkedEntity> tokenEntityMap,  //单个句子的Map
                                    Map<EntityLinkingResult.LinkedEntity, QueryNodeTokenMap> entityTokenMap,
                                    Set<IndexedWord> visited) {
        if(visited.contains(root)){
            logger.warn("[Tree Node has alredy been visited] \n{}", root);
            return;
        }
        visited.add(root);
        System.out.println(root);
        /*
        访问到一个节点：
        1） 判断是否在某个链接实体的范围内  -> 字典树（ 多个实体对应多个区间， 判断节点是否在某个区间内。。。）
        2） 判断其父节点 inEdge 是否（在实体范围内，并且没有访问过）—> 若在更新代表性节点为其父节点。
        ======>   其实就是深度优先访问到的第一个节点
         */
        Integer tokenIndex = root.index();
        // 如果 （下标 -> 实体 ） 存在下标对应的实体
        if (tokenEntityMap.containsKey(tokenIndex)) {
            EntityLinkingResult.LinkedEntity entity = tokenEntityMap.get(tokenIndex);
            QueryNodeTokenMap nodeTokenMap;
            if (entityTokenMap.containsKey(entity))
                nodeTokenMap = entityTokenMap.get(entity);
            else {
                // 深度优先 第一次访问到
                nodeTokenMap = new QueryNodeTokenMap();
                nodeTokenMap.mainToken = root;
                entityTokenMap.put(entity, nodeTokenMap);
            }
            nodeTokenMap.tokenSet.add(root);
        }
        List<SemanticGraphEdge> outEdgesSorted = dependencies.getOutEdgesSorted(root);
        for (SemanticGraphEdge edge : outEdgesSorted) {
            //GrammaticalRelation relation = edge.getRelation();
            IndexedWord dep = edge.getDependent();
            deepFirstTraversal(dependencies, dep, tokenEntityMap, entityTokenMap, visited);
        }
    }


    public void printAlignResult(){
        for(CoreMap sentence : documentQueryProgress.sentEntityTokenMap.keySet()){
            System.out.println("\n\nsentence = -------------------->  \n" + sentence);
            Map<EntityLinkingResult.LinkedEntity, QueryNodeTokenMap> entityTokenMap = documentQueryProgress.sentEntityTokenMap.get(sentence);
            for(EntityLinkingResult.LinkedEntity entity : entityTokenMap.keySet() ){
                QueryNodeTokenMap map = entityTokenMap.get(entity);
                System.out.println("[entity = " + entity.entity + " ] , [mainToken = " + map.mainToken + " ]");
                for(IndexedWord token : map.tokenSet ){
                    System.out.println("  ->" + token);
                }
            }
        }
    }


    /*********************************************************
     *
     */
    public static void main(String[] args){

        DocumentQueryProgress progress = new DocumentQueryProgress();
        progress.originalWholeQuery = "Obama's brother who voted Trump.";

        QueryPreprocessor preprocessor = new QueryPreprocessor( progress );
        preprocessor.getEntityLinking( new YagoQueryEntityLinker() );
        //preprocessor.getDenpendencyTrees( new TextAnnotator(HumanLanguage.ENGLISH) );
        QueryAnnotator textAnnotator = new QueryAnnotator();
        preprocessor.getDenpendencyTrees( textAnnotator );
        preprocessor.alignEntityWithTreeNode();
        preprocessor.printAlignResult();

    }


}