package com.jd.jdnlp;

import com.jd.jdnlp.lda.Corpus;
import com.jd.jdnlp.lda.LdaGibbsSampler;
import com.jd.jdnlp.lda.LdaUtil;
import org.ansj.domain.Term;
import org.ansj.splitWord.Analysis;
import org.ansj.splitWord.analysis.*;

import java.io.*;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;

/**
 * Created by liteng5 on 2016/11/3.
 */
public class Analyzer {


    /**
     * 标准分词，默认模式
     */
    public static final int TOMODLE = 0;

    /**
     * 基本的分词.只做了.ngram模型.和数字发现.其他一律不管
     */
    public static final int BASEMODLE = 1;

    /**
     * 默认用户自定义词性优先
     */
    public static final int DICMODLE = 2;

    /**
     * 用于检索的分词方式
     */
    public static final int INDEXMODLE = 3;

    /**
     * 自然语言分词,具有未登录词发现功能。建议在自然语言理解中用。搜索中不要用
     */
    public static final int NLPMODLE = 4;


    private Analyzer() {
    }

    /**
     * 根据int值构建对应分词类
     *
     * @param model
     * @return
     */
    public static Map<String,Double> doAnalyze(Reader reader, int model) throws IOException {
        Analysis analysis = null;
        switch (model) {
            case TOMODLE:
                analysis = new ToAnalysis(reader);
                break;
            case BASEMODLE:
                analysis = new BaseAnalysis(reader);
                break;
            case DICMODLE:
                BufferedReader bf = new BufferedReader(reader);
                analysis = new DicAnalysis(bf);
                break;
            case INDEXMODLE:
                analysis = new IndexAnalysis(reader);
                break;
            case NLPMODLE:
                analysis = new NlpAnalysis(reader);
                break;
            default:
                analysis = new ToAnalysis(reader);
                break;
        }
        Analyzer analyzer = new Analyzer();
        return analyzer.lda(analysis);
    }


    /**
     * 读取文档，构建Corpus
     *
     * @param analysis
     * @return
     * @throws IOException
     */
    public Corpus getCorpus(Analysis analysis) throws IOException {
        Corpus corpus = new Corpus();
        Term term;
        List<String> wordList = new LinkedList<String>();
        ByteArrayInputStream inputStream = null;
        while ((term = analysis.next()) != null) {
            if (term.getName().trim().length() < 2) {
                continue;
            }
            wordList.add(term.getName().trim());
        }
        corpus.addDocument(wordList);
        return corpus;
    }

    /**
     * 对文档进行LDA分析
     *
     * @param analysis
     * @throws IOException
     */
    public Map<String,Double> lda(Analysis analysis) throws IOException {
        // 1. 从磁盘加载语料库
        Corpus corpus = this.getCorpus(analysis);
        // 2. 创建 LDA 采样器
        LdaGibbsSampler ldaGibbsSampler = new LdaGibbsSampler(corpus.getDocument(), corpus.getVocabularySize());
        // 3. 训练，目标10个主题
        ldaGibbsSampler.gibbs(10);
        // 4. The phi matrix is a LDA model, you can use LdaUtil to explain it.
        double[][] phi = ldaGibbsSampler.getPhi();
        Map<String, Double>[] topicMap = LdaUtil.translate(phi, corpus.getVocabulary(), 10);
//        LdaUtil.explain(topicMap);

        int[] document = corpus.getDocument(corpus);
        double[] tp = LdaGibbsSampler.inference(phi, document);
        Map<String, Double> topic = LdaUtil.translate(tp, phi, corpus.getVocabulary(), 10);
        LdaUtil.explain(topic);
        return topic;
    }


    public static void main(String[] args) {
        String source = "d://ldatest//source//论文.txt";
        Reader reader = null;
        try {
            reader = new FileReader(source);
            Analyzer.doAnalyze(reader, Analyzer.TOMODLE);
        } catch (FileNotFoundException e) {
            e.printStackTrace();
        } catch (IOException e) {
            e.printStackTrace();
        } finally {
            try {
                if (reader != null)
                    reader.close();
            } catch (IOException e) {
                e.printStackTrace();
            }
        }
    }
}
