package main;

import edu.cn.nlsde.tmfst.EVA.CoherenceScore;
import edu.cn.nlsde.tmfst.mlda.GSLDA_without_background;
import edu.cn.nlsde.tmfst.model.Comment;
import edu.cn.nlsde.tmfst.model.ModelResult;
import edu.cn.nlsde.tmfst.processor.data_pre_process_Chinese;
import wntm.InferenceTopicsForOrgDocs;
import wntm.PrepareInput;


import java.util.ArrayList;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;

/**
 * Created by dell on 2016/12/20.
 */
public class RWNTM {
    //    private ModelResult instance = new ModelResult();
    private List<String> word_sequence;
    List<List<String>> origin_doc;
    List<String> comments;
    List<List<String>> docs;


    public ModelResult trainData(List<Comment> data) {
        comments = CommentToString(data);
        origin_doc = preprocess_data(comments);
        docs = data_pre_process_Chinese.get_only_segment(comments);
        PrepareInput instance = new PrepareInput(origin_doc, 5);
        List<List<String>> pseudo_doc = instance.get_pseudo();
        word_sequence = pseudo_doc.get(pseudo_doc.size() - 1);
        pseudo_doc.remove(pseudo_doc.size() - 1);

        GSLDA_without_background process = new GSLDA_without_background(pseudo_doc, getTopicNum(origin_doc.size()));
        process.run();
        System.out.println("finished train");
        ModelResult result = fillResult(process);
        System.out.println("finished fill result");
        result.setPhotoList(data.get(0).getPhotoList());
        System.out.println("finished getData");
        CoherenceScore calCohe = new CoherenceScore(origin_doc);
        System.out.println("finished CoherenceScore");
        result.setCoherence_5(calCohe.CoherenceScores(process.wordMap, result.getT2W(), 5));
        System.out.println("finished CoherenceScores 5");
        result.setCoherence_10(calCohe.CoherenceScores(process.wordMap, result.getT2W(), 10));
        System.out.println("finished cal CoherenceScores 10");
        return result;
    }

    private void get_word_num(HashMap<String, Integer> wordmap, ModelResult result) {
        result.wordCount = new int[wordmap.size()];
        result.wordIDF = new int[wordmap.size()];

        for (List<String> line : this.origin_doc) {
            HashSet<String> isExist = new HashSet<>();
            for (String word : line) {
                if (!wordmap.containsKey(word)) {
                    continue;
                }
                result.wordCount[wordmap.get(word)]++;
                if (!isExist.contains(word)) {
                    isExist.add(word);
                    result.wordIDF[wordmap.get(word)]++;
                }
            }
        }
    }

    private ModelResult fillResult(GSLDA_without_background process) {

        ModelResult result = new ModelResult(process.word_num_in_topic.length, process.id2Word);
        System.out.println("finished ModelResult");
        get_word_num(process.wordMap, result);
        System.out.println("finished get_word_num");
        result.setW2T(process.word_num_in_doc_topic);
        System.out.println("finished setW2T");
//        result.setD2W(get_assign(origin_doc, process.wordMap));
        result.setDocs(docs);
        System.out.println("finished setDocs");
//        result.setI2W(process.id2Word);
        result.setT2W(process.word_num_in_topic_word);
        System.out.println("finished setT2W");

        InferenceTopicsForOrgDocs infer_org = new InferenceTopicsForOrgDocs();
        result.setD2T(process.word_num_in_doc_topic);
        System.out.println("finished setD2T");
        result.setD2T(infer_org.InferenceTopics(word_sequence, result.getD2T(), origin_doc));
        System.out.println("finished setD2T");
        result.setW2T_Vec(process.word_num_in_doc_topic);
        System.out.println("finished setW2T_Vec");
        return result;
    }

    private static int[][] get_assign(List<List<String>> origin_doc, HashMap<String, Integer> wordMap) {
        int[][] assign = new int[origin_doc.size()][];
        for (int i = 0; i < assign.length; i++) {
            List<String> oneDoc = origin_doc.get(i);
            assign[i] = new int[oneDoc.size()];
            for (int k = 0; k < assign[i].length; k++) {
                if (wordMap.containsKey(oneDoc.get(k))) {
                    assign[i][k] = wordMap.get(oneDoc.get(k));
                } else {
                    assign[i][k] = -1;
                }
            }
        }
        return assign;
    }

    private static int getTopicNum(int num) {
        return (int) (Math.log(num) + 5);
    }

    private static List<String> CommentToString(List<Comment> data) {
        List<String> result = new ArrayList<>();
        for (Comment item : data) {
            result.add(item.comment_content.replaceAll("<[^<>]+?>", ""));
        }
        return result;
    }

    public static List<List<String>> preprocess_data(List<String> data) {
        return data_pre_process_Chinese.get_segment_Corpus(data);
    }
}
