package textprocessing.datasources.enwikipedia;

import com.fasterxml.jackson.databind.JsonNode;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.fasterxml.jackson.databind.node.ArrayNode;
import com.fasterxml.jackson.databind.node.ObjectNode;
import org.elasticsearch.action.index.IndexResponse;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import textprocessing.nlpanalysis.context_annotate.ContextAnnotation;
import textprocessing.nlpanalysis.context_annotate.ContextResult;
import textprocessing.nlpanalysis.context_annotate.ParagraphSplitor;

import java.util.ArrayList;
import java.util.HashSet;
import java.util.List;
import java.util.Set;

/**
 * Created by common on 2017/3/30.
 */
public class TransformerEnWikiNews {

    static final Logger logger = LoggerFactory.getLogger(TransformerEnWikiNews.class);

    private ExtractorEnWikiNews extractorEnWikiNews;
    private IndexerEnWikiNews indexerEnWikiNews;
    private ContextAnnotation contextAnnotation;
    private ObjectMapper mapper = null;




    public TransformerEnWikiNews(){
        this.extractorEnWikiNews = new ExtractorEnWikiNews();
        this.indexerEnWikiNews = new IndexerEnWikiNews();
        this.contextAnnotation = new ContextAnnotation();
        this.mapper = new ObjectMapper();
    }




    public void traversalAllRecords(){
        boolean is_initial = true;
        JsonNode scroll_output;
        Integer scroll_size = 0;
        String scroll_id = "";
        ArrayNode documents;
        while (is_initial || scroll_size > 0){
            /*
            1) pull documents
             */
            if(is_initial){
                scroll_output= this.extractorEnWikiNews.getEnwikiInitialScroll();
                is_initial = false;
            }else{
                scroll_output = this.extractorEnWikiNews.getEnwikiContinueScroll(scroll_id);
            }
            scroll_size = this.extractorEnWikiNews.getScrollSize(scroll_output);
            scroll_id = this.extractorEnWikiNews.getScrollId(scroll_output);

            /*
            2) extract documents
             */
            documents = this.extractorEnWikiNews.getDocumentsArray(scroll_output);

            /*
            3) process every scroll output
             */
            traversalScrollRecords(documents);
        }
    }


    public void traversalScrollRecords(ArrayNode records){
        int size = records.size();
        for(int i = 0; i < size; i++){
            JsonNode record = records.get(i);
            processRecord(record);
        }
    }




    /*****************************************************************************************
     * 对每个文档进行处理的主函数
     * @param record
     */
    public void processRecord(JsonNode record){
        String id = record.get("_id").asText();
        logger.info("[processRecord] id = {}", id);
        System.out.println(String.format("[processRecord] id = %s", id));
        ObjectNode documentNode = (ObjectNode)record.get("_source");

        processSingleRecord(documentNode, id);

        /*
        String TEXT = record.get("_source").get("text").asText();

        //String doc_id = record.get("_id").asText();

        Set<String> doc_entities = new HashSet<>();
        Set<String> doc_conetexs = new HashSet<>();
        */


        /*
        String[] paragraphs = ParagraphSplitor.articleToSentences(TEXT);

        //ArrayNode contextsArrayNode = this.mapper.createArrayNode();
        for(String para: paragraphs){
            String para_id = loadParagraph( para , id);
            List<ContextResult> contexts = generateContext(para);
            for(ContextResult context : contexts){
                ObjectNode contextNode = genObjectNode(context, id, para_id);
                String context_id = loadContext( contextNode );
                doc_conetexs.add(context_id);
                doc_entities.addAll( context.entites );
            }
        }

        ArrayNode doc_contexts_arr = this.mapper.createArrayNode();
        for(String context_id : doc_conetexs) doc_contexts_arr.add(context_id);
        ArrayNode doc_entities_arr = this.mapper.createArrayNode();
        for(String entity_id : doc_entities) doc_entities_arr.add(entity_id);

        ((ObjectNode)documentNode).put("contexts", doc_contexts_arr);
        ((ObjectNode)documentNode).put("entities", doc_entities_arr);

        loadDocument((ObjectNode)documentNode, id);
        */

    }


    public void processSingleRecord(JsonNode documentNode, String id){
        String TEXT = documentNode.get("text").asText();
        String TITLE = documentNode.get("title").asText();

        TEXT = TITLE + ".  " + TEXT;

        TEXT = TEXT.replace("\"", "'" );

        List<String> doc_entities = new ArrayList<>();
        List<String> doc_conetexs = new ArrayList<>();

        /*
        1） 整篇文章太长，需要分段进行命名实体识别等操作；
        2） 这里决定如何分段
         */
        String[] paragraphs = ParagraphSplitor.articleToSentences(TEXT);

        //ArrayNode contextsArrayNode = this.mapper.createArrayNode();
        for(String para: paragraphs){
            /*
            先保存 Para
             */
            String para_id = loadParagraph( para , id);
            List<ContextResult> contexts = generateContext(para);
            for(ContextResult context : contexts){
                ObjectNode contextNode = genObjectNode(context, id, para_id);
                String context_id = loadContext( contextNode );
                doc_conetexs.add(context_id);
                doc_entities.addAll( context.entites );
            }
        }

        ArrayNode doc_contexts_arr = this.mapper.createArrayNode();
        for(String context_id : doc_conetexs) doc_contexts_arr.add(context_id);
        ArrayNode doc_entities_arr = this.mapper.createArrayNode();
        for(String entity_id : doc_entities) doc_entities_arr.add(entity_id);

        ((ObjectNode)documentNode).put("contexts", doc_contexts_arr);
        ((ObjectNode)documentNode).put("entities", doc_entities_arr);

        loadDocument((ObjectNode)documentNode, id);
    }



    public List<ContextResult> generateContext(String para){

        //List<ObjectNode> contexts = new ArrayList<>();

        List<ContextResult> contextResults = this.contextAnnotation.getContextsOfDocument( para );

        return contextResults;
    }

    public ObjectNode genObjectNode(ContextResult contextResult, String docid, String para_id){
        ObjectNode contextNode = this.mapper.createObjectNode();
        contextNode.put("text", contextResult.context);
        contextNode.put("docid", docid);
        contextNode.put("paraid", para_id);
        //String[] entities = TextEntityLinker.linkEntityFromKG( para );
        Set<String> entities = contextResult.entites;
        ArrayNode entitiesArrayNode = this.mapper.createArrayNode();
        for(String entity_id : entities){
            entitiesArrayNode.add(entity_id);
        }
        contextNode.put("entities", entitiesArrayNode);
        return contextNode;
    }


    /*
    索引 Context
    返回context_id
     */
    public String loadContext(ObjectNode contextNode){
        System.out.println("\n\n[loadContext] context = \n" + contextNode.toString());

        IndexResponse response =  this.indexerEnWikiNews.saveContextNode(contextNode);

        System.out.println("\n[loadContext] response = " + response.toString() );

        String id = response.getId();

        return id;
    }

    /*
    索引 Doc
     */
    public String loadDocument(ObjectNode documentNode, String id){
        IndexResponse response =  this.indexerEnWikiNews.saveDocumentNode(documentNode, id);
        return response.getId();
    }

    /*
    索引 Para
     */
    public String loadParagraph(String para,String docid){
        ObjectNode paraNode = this.mapper.createObjectNode();
        paraNode.put("text", para);
        paraNode.put("docid", docid);
        IndexResponse response = this.indexerEnWikiNews.saveParaNode( paraNode );
        return response.getId();
    }





    /****************************************************************************************
     * Thread
     多线程处理文档索引
     1） 先将原始json文档分割成很多份，每个文件名对应一个 es_context_type;
     2)  每个线程转换一个 es_type中的文档
     */
    public static class TransformerEnWikiNewsThread implements Runnable{
        public void run(){

        }
    }







    /****************************************************************************************
     *
     */
    public static void main(String[] args){

        TransformerEnWikiNews convertor = new TransformerEnWikiNews();

        convertor.traversalAllRecords();

        System.out.println("\n\n\n\nSUCCESS\n\n\n\n");

    }


}
