package com.example.nlp_clusteringproject.util;

import java.io.BufferedWriter;
import java.io.FileWriter;
import java.io.IOException;
import java.io.StringBufferInputStream;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;

public class tf_idfAlgorithms extends Algorithm {

    List<String> constructCodeWords;
    List<String> wholeDataset;
    static double[][] tf_idfMatrix;
    double[][] wordnumMatrix;
    double[][] tfMatrix;
    double[] idfMatrix;
    NlpPreProcess nlp;

    tf_idfAlgorithms(List<String> data) {
        wholeDataset = data;//文本总篇数
    }

    public List<String> constructCodeWords() {
        nlp = new NlpPreProcess();
        List<String> codeWords = new ArrayList<>();
        for (int i = 0; i < wholeDataset.size(); i++) {
            String temp = wholeDataset.get(i);
            List<String> tokens = nlp.textTokenization(temp);
            for (int j = 0; j < tokens.size(); j++) {
                temp = tokens.get(j);
                if (!codeWords.contains(temp)) {
                    codeWords.add(temp);
                }
            }

        }
        constructCodeWords = codeWords;

        return codeWords;
    }

    public double[][] constructTfidf(int fileNumber, int categoryNumber, int testFileNumber) {
        constructCodeWords();

        tfMatrix = new double[wholeDataset.size()][constructCodeWords.size()];
        tf_idfMatrix = new double[wholeDataset.size()][constructCodeWords.size()];
        wordnumMatrix = new double[wholeDataset.size()][constructCodeWords.size()];
        for (int i = 0; i < wholeDataset.size(); i++) {
            for (int j = 0; j < constructCodeWords.size(); j++) {
                tf_idfMatrix[i][j] = 0;
            }
        }

        //construct tfMatrix
        for (int i = 0; i < wholeDataset.size(); i++) {
            String temp = wholeDataset.get(i);
            List<String> tokens = nlp.textTokenization(temp);
            for (int j = 0; j < tokens.size(); j++) {
                temp = tokens.get(j);
                if (constructCodeWords.contains(temp)) {
                    int index = constructCodeWords.indexOf(temp);
                    tf_idfMatrix[i][index] = tf_idfMatrix[i][index] + 1.0;
                }
            }

            int codeSize = constructCodeWords.size();
            double documentWordSize = tokens.size();
            for (int j = 0; j < codeSize; j++) {
                tfMatrix[i][j] = tf_idfMatrix[i][j] / documentWordSize;//tf=一个词在某个文件中的出现次数/ 该文件中所有词出现的次数总和

            }

        }

        int codeSize = constructCodeWords.size();
        idfMatrix = new double[codeSize];
        for (int i = 0; i < codeSize; i++) {
            idfMatrix[i] = 0;
        }
        int documentSize = wholeDataset.size();
        for (int i = 0; i < codeSize; i++) {
            for (int j = 0; j < documentSize; j++) {
                if (tf_idfMatrix[j][i] != 0.0)
                    idfMatrix[i] = 1.0 + idfMatrix[i];
            }
            idfMatrix[i] = documentSize / (idfMatrix[i] + 1);//laplacian smooth
            idfMatrix[i] = Math.log(idfMatrix[i]);//idf = log 文档总数/（包含这个词的所有文档数之和+1）
        }

        //记录每篇文章词频
        for (int i = 0; i < documentSize; i++) {
            for (int j = 0; j < codeSize; j++) {
                wordnumMatrix[i][j] = tf_idfMatrix[i][j];
            }
        }
        //计算每篇文档特征词的tf-idf权值
        for (int i = 0; i < documentSize; i++) {
            for (int j = 0; j < codeSize; j++) {
                tf_idfMatrix[i][j] = tfMatrix[i][j] * idfMatrix[j];
            }
        }

        //将每篇文章对应每个特征词的tf-idf权值保存起来（分类器代码就是调用这个txt文件）
        double category = 0.0;//记录类别
        try {
            BufferedWriter trainOut = new BufferedWriter(new FileWriter("E:\\bishe\\train.txt"));
            BufferedWriter testOut = new BufferedWriter(new FileWriter("E:\\bishe\\test.txt"));
            for (int i = 0, k = 1; i < documentSize; i++) {
                //写入每篇文档特征词的tf-idf权值
                if (i < fileNumber * categoryNumber)
                {
                    for (int j = 0; j < codeSize; j++) {
                        String sTemp = "" + tf_idfMatrix[i][j];
                        trainOut.write(sTemp);
                        trainOut.write(" ");
                    }
                    //写入文档类别
                    String sTemp2 = "" + category;
                    trainOut.write(sTemp2);
                    if (k == fileNumber) {
                        category = category + 1.0;
                        k = 0;
                    }
                    trainOut.write("\n");
                    k++;
                } else {
                    for (int j = 0; j < codeSize; j++) {
                        String sTemp = "" + tf_idfMatrix[i][j];
                        testOut.write(sTemp);
                        testOut.write(" ");
                    }
                    //写入文档类别
                    category = 0.0;
                    String sTemp2 = "" + (i - fileNumber * categoryNumber)/ (testFileNumber / categoryNumber);
                    testOut.write(sTemp2);
                    testOut.write("\n");
                    k++;
                }

            }
            trainOut.close();
            testOut.close();
            System.out.println("The train data‘s tfidf matrix after dimension reduction  has been wrote into the train.txt");
        } catch (IOException e) {
        }

        return tf_idfMatrix;
    }

    //输出每个类别出现频率最高的n个词
    public List topic(int n, int categoryNumber, int fileNumber) {
        List<List<String>> result = new ArrayList<>();
        int codeSize = constructCodeWords.size();
        double[][] topicArray = new double[categoryNumber][codeSize];
        for (int i = 0; i < categoryNumber; i++) {
            for (int j = 0; j < codeSize; j++) {
                topicArray[i][j] = 0;
            }
        }
        for (int i = 0; i < categoryNumber; i++) {
            for (int j = 0; j < fileNumber; j++) {
                for (int k = 0; k < codeSize; k++) {
                    topicArray[i][k] = wordnumMatrix[i * fileNumber + j][k] + topicArray[i][k];
                }
            }
        }
        for (int i = 0; i < categoryNumber; i++) {
            List<String> tmp = new ArrayList<>();
            double[] temp = new double[codeSize];
            for (int j = 0; j < codeSize; j++) {
                temp[j] = topicArray[i][j];
            }
            Arrays.sort(temp);
            double topicThreshold = temp[codeSize - n];
            for (int j = 0; j < codeSize; j++) {
                if (topicArray[i][j] >= topicThreshold) {
                    tmp.add(constructCodeWords.get(j));
                    System.out.print(constructCodeWords.get(j));
                    System.out.print(" ");
                }
            }
            result.add(tmp);
            System.out.println(" ");
        }
        return result;
    }
}
