package Entity;

import Main.Constraint;
import util.FileUtil;
import util.GeneralUtil;
import util.RegexJudge;
import util.StringUtil;

import java.io.BufferedWriter;
import java.io.IOException;
import java.util.*;

/**
 * Created by zxp on 2016/11/15.
 */
public class Tensor {


    /**
     * <时间, <会议, <term, 频数>>>
     */
    Map<String, Map<String, Map<String, Integer>>> tensor = new HashMap<>();

    /**
     * <term, 频数>
     */
    Map<String, Integer> termsFre = new HashMap<>();

    /**
     * <term, <时间_会议, 频数>>   记录每个 term 在每个 doc 里出现的次数
     */
    Map<String, Map<String, Integer>> docNumPerTerms_useless = new HashMap<>();

    /**
     * <term, 出现该term的文档集合>
     */
    Map<String, Set<String>> docNumPerTerms = new HashMap<>();

    /**
     * 词典集合（过滤之后的）
     */
    Set<String> dic = null;

    Set<String> conferenceDic = null;

    Set<String> timeDic = null;

    Set<String> docDic = null;

    /**
     * 文档：<"时间_会议", "term1 term2 term 3 ……">
     * <p>
     * 注意：docs 里的词是没有经过过滤的（没过滤掉词频过小、词的相关文档太少、词是纯数字等几种情况），因此输出时要做一次过滤。
     */
    Map<String, String> docs = new LinkedHashMap<>();


    public void obverseTermsFre(int min, int max) {
        int len = termsFre.size();
        System.out.println("termsFre：总term数量（未经过滤）：" + len);

        Map<String, Integer> termsFreSorted = GeneralUtil.sortMapByValue(termsFre);

        BufferedWriter bw = FileUtil.initBufferedWriter(Constraint.termsFreFile);
        int countFiltered = 0; // 经过过滤后的数量
        try {
            for (Map.Entry<String, Integer> e : termsFreSorted.entrySet()) {
                if (e.getValue() >= min && e.getValue() <= max) {
                    bw.write(e.getKey() + "\t" + e.getValue());
                    bw.newLine();
                    countFiltered++;
                }
            }
            bw.flush();
            bw.close();
        } catch (IOException e) {
            e.printStackTrace();
        }
        System.out.println("termsFre：总term数量（过滤之后的）：" + countFiltered);
    }


    public void obverseDocNumPerTerms(int min, int max) {
        int len = docNumPerTerms.size();
        System.out.println("docNumPerTerms：总term数量（未经过滤）：" + len);

        // 处理成 <term, 出现该term的文档个数> 的情况
        Map<String, Integer> term_docNum = new HashMap<>();
        for (Map.Entry<String, Set<String>> e : docNumPerTerms.entrySet()) {
            term_docNum.put(e.getKey(), e.getValue().size());
        }

        Map<String, Integer> term_docNum_sorted = GeneralUtil.sortMapByValue(term_docNum);

        BufferedWriter bw = FileUtil.initBufferedWriter(Constraint.docNumPerTermsFile);
        int countFiltered = 0; // 经过过滤后的数量
        try {
            for (Map.Entry<String, Integer> e : term_docNum_sorted.entrySet()) {
                if (e.getValue() >= min && e.getValue() <= max) {
                    bw.write(e.getKey() + "\t" + e.getValue());
                    bw.newLine();
                    countFiltered++;
                }
            }
            bw.flush();
            bw.close();
        } catch (IOException e) {
            e.printStackTrace();
        }
        System.out.println("docNumPerTerms：总term数量（经过最大、最小值过滤后的过滤之后的）：" + countFiltered);

    }


    /**
     * 双重过滤
     */
    public void obverse_termsFre_and_docNumPerTerms(int minTermNum, int maxTermNum, int minDocNum, int maxDocNum) {
        int len = termsFre.size();
        System.out.println("总term数量（未经过滤）：" + len);

        if (dic == null) {
            dic = new TreeSet<>();
            conferenceDic = new TreeSet<>();
            timeDic = new TreeSet<>();
            docDic = new TreeSet<>();
        }

        Map<String, Integer> termsFreSorted = GeneralUtil.sortMapByValue(termsFre);

        BufferedWriter bw = FileUtil.initBufferedWriter(Constraint.termsFreAndDocNumPerTermsFile);
        int countFiltered = 0; // 经过过滤后的数量
        try {
            for (Map.Entry<String, Integer> e : termsFreSorted.entrySet()) {
                if (e.getValue() >= minTermNum && e.getValue() <= maxTermNum) { // 关于 “每个term的频数” 的过滤

                    int docNumCurTerm = docNumPerTerms.get(e.getKey()).size(); // 当前term相关的doc的数量
                    if (docNumCurTerm >= minDocNum && docNumCurTerm <= maxDocNum) { // 关于 “每个term相关的doc数量” 的过滤

                        if (!RegexJudge.isNumber(e.getKey())) { // 把全是数字的过滤掉
                            dic.add(e.getKey()); // 将这个词加到词典里

                            bw.write(e.getKey() + "\t" + e.getValue() + "\t" + docNumCurTerm);
                            bw.newLine();
                            countFiltered++;
                        }


                    }
                }
            }
            bw.flush();
            bw.close();

            // 保存词典
            bw = FileUtil.initBufferedWriter(Constraint.dicFile);
            for (String curTermInDic : dic) {
                bw.write(curTermInDic);
                bw.newLine();
            }
            bw.flush();
            bw.close();


        } catch (IOException e) {
            e.printStackTrace();
        }
        System.out.println("总term数量（双重过滤之后的）：" + countFiltered);
    }


//    Map<String, Map<String, Map<String, Integer>>> tensor = new HashMap<>();


    /**
     * 生成最终的tensor元组文件
     */
    public void generateTensorFile() {

        BufferedWriter bw = FileUtil.initBufferedWriter(Constraint.tensorFile);

        try {
            boolean isFirst = true;
            for (Map.Entry<String, Map<String, Map<String, Integer>>> curTimeMap : tensor.entrySet()) {
                String time = curTimeMap.getKey();
                timeDic.add(time);
                Map<String, Map<String, Integer>> conference_term_count = curTimeMap.getValue();

                for (Map.Entry<String, Map<String, Integer>> curConferenceMap : conference_term_count.entrySet()) {
                    String conference = curConferenceMap.getKey();
                    conferenceDic.add(conference);
                    String doc = time + "_" + conference;
                    docDic.add(doc);
                    Map<String, Integer> term_count = curConferenceMap.getValue();

                    for (Map.Entry<String, Integer> curTermMap : term_count.entrySet()) {
                        String term = curTermMap.getKey();

                        if (dic.contains(term)) { // 词典中有这个词，表示这个词通过了过滤条件;

                            if (isFirst) {
                                isFirst = false;
                            } else {
                                bw.newLine();
                            }

                            bw.write(time + " " + conference + " " + term + " " + curTermMap.getValue());

                        }
                    }
                }
            }
            bw.flush();
            bw.close();


            // 保存 D 矩阵的稀疏表示
            bw = FileUtil.initBufferedWriter(Constraint.DFile);
            isFirst = true;
            for (Map.Entry<String, Map<String, Integer>> curTermMap : docNumPerTerms_useless.entrySet()) {
                String term = curTermMap.getKey();
                Map<String, Integer> doc_count = curTermMap.getValue();
                for (Map.Entry<String, Integer> curDoc : doc_count.entrySet()) {
                    String doc = curDoc.getKey();

                    if (dic.contains(term)) { // 词典中有这个词，表示这个词通过了过滤条件;
                        if (isFirst) {
                            isFirst = false;
                        } else {
                            bw.newLine();
                        }
                        bw.write(doc + " " + term + " " + curDoc.getValue());
                    }
                }
            }
            bw.flush();
            bw.close();


            // 保存 conference 和 time 的词典
            bw = FileUtil.initBufferedWriter(Constraint.conferenceDicFile);
            for (String curTermInDic : conferenceDic) {
                bw.write(curTermInDic);
                bw.newLine();
            }
            bw.flush();
            bw.close();

            bw = FileUtil.initBufferedWriter(Constraint.timeDicFile);
            for (String curTermInDic : timeDic) {
                bw.write(curTermInDic);
                bw.newLine();
            }
            bw.flush();
            bw.close();


            // 保存 doc（time_conference）词典
            bw = FileUtil.initBufferedWriter(Constraint.docDicFile);
            for (String curTermInDic : docDic) {
                bw.write(curTermInDic);
                bw.newLine();
            }
            bw.flush();
            bw.close();


            // 保存 经过 tf-idf 过滤的 term（即在最终的词典中出现的 term）对应的文档的数目
//            bw = FileUtil.initBufferedWriter(Constraint.docNumPerTermsFilteredFile);
//            for (Map.Entry<String, S>)
//            bw.flush();
//            bw.close();


            // 保存文档（一行一个文档，文档与文档之间是乱序）
            bw = FileUtil.initBufferedWriter(Constraint.docsFile);
            int count = 0;
            for (Map.Entry<String, String> curDoc : docs.entrySet()) {

                // 需要对词进行过滤，只有存在于字典里的词才被写入文件
                String terms = curDoc.getValue();
                String[] termArr = terms.split(" ");
                boolean curIsFirst = true;

                for (String curTerm : termArr) {
                    if (dic.contains(curTerm)) { // 词典里有这个词，表示这个词没被过滤掉
                        if (curIsFirst) {
                            curIsFirst = false;
                        } else {
                            bw.write(" ");
                        }
                        bw.write(curTerm);
                    }
                }
                bw.newLine();
                count++;
                System.out.println("第" + count + "个文档：" + curDoc.getKey());
            }
            bw.flush();
            bw.close();

        } catch (IOException e) {
            e.printStackTrace();
        }


    }


    public void addTriple(String time, String conference, String term) {

        if (time.equals("2005")) {  // 2005年的数据不需要
            return;
        }

        if (time.equals("2006") && !conference.equals("AAAI")) {  // 2006年的数据（除了AAAI的）也不需要
            return;
        }

        if (!Constraint.conferenceSet.contains(conference)) { // 不是提前认定的几个会议的数据也不需要
            return;
        }

        if (conference.equals("AAAI")) {
            if (time.equals("2006")) {
                time = "2007";
            } else {
                if (time.equals("2007")) {
                    time = "2008";
                } else {
                    if (time.equals("2008")) {
                        time = "2009";
                    }
                }
            }
        }


        // 记录每个词的出现的次数
        if (termsFre.get(term) == null) {
            termsFre.put(term, 1);
        } else {
            termsFre.put(term, termsFre.get(term) + 1);
        }


        // 记录每个 term 在每个 doc 里出现的次数
        String time_conference = time + "_" + conference;
        if (docNumPerTerms_useless.get(term) == null) {
            Map<String, Integer> curMap = new HashMap<>();
            curMap.put(time_conference, 1);
            docNumPerTerms_useless.put(term, curMap);
        } else {
            Map<String, Integer> curMap = docNumPerTerms_useless.get(term);
            if (curMap.get(time_conference) == null) {
                curMap.put(time_conference, 1);
            } else {
                curMap.put(time_conference, curMap.get(time_conference) + 1);
            }
        }


        // 记录每个词所对应的文档集合
        time_conference = time + "_" + conference;
        if (docNumPerTerms.get(term) == null) {
            Set<String> doc = new HashSet<>();
            doc.add(time_conference);
            docNumPerTerms.put(term, doc);
        } else {
            Set<String> doc = docNumPerTerms.get(term);
            doc.add(time_conference);
        }


        // 记录tensor元组的信息
        if (tensor.get(time) == null) {
            Map<String, Map<String, Integer>> conAndTerm = new HashMap<>();
            Map<String, Integer> curTerm = new HashMap<>();
            curTerm.put(term, 1);
            conAndTerm.put(conference, curTerm);
            tensor.put(time, conAndTerm);
        } else {
            Map<String, Map<String, Integer>> conAndTerm = tensor.get(time);
            if (conAndTerm.get(conference) == null) {
                Map<String, Integer> curTerm = new HashMap<>();
                curTerm.put(term, 1);
                conAndTerm.put(conference, curTerm);
            } else {
                Map<String, Integer> curTerm = conAndTerm.get(conference);
                if (curTerm.get(term) == null) {
                    curTerm.put(term, 1);
                } else {
                    curTerm.put(term, curTerm.get(term) + 1);
                }
            }
        }


        // 记录文档
        time_conference = time + "_" + conference;
        if (docs.get(time_conference) == null) {
            docs.put(time_conference, term);
        } else {
            docs.put(time_conference, docs.get(time_conference) + " " + term); // 用空格分隔
        }

    }

    public Map<String, Map<String, Map<String, Integer>>> getTensor() {
        return tensor;
    }

    public Map<String, Integer> getTermsFre() {
        return termsFre;
    }

    public Map<String, Map<String, Integer>> getDocNumPerTerms_useless() {
        return docNumPerTerms_useless;
    }


}
