package com.example.nlp_clusteringproject.util;

import edu.stanford.nlp.simple.Sentence;

import java.io.BufferedReader;
import java.io.File;
import java.io.FileReader;
import java.io.IOException;
import java.nio.file.Files;
import java.nio.file.Paths;
import java.util.ArrayList;
import java.util.List;

public class NlpPreProcess {

    String fullText;
    Sentence sentPre;
    List<String> lemmaWords;
    List<String> stopWords;
    List<String> listNerString;
    String fileName;
    String stopFileName;

    //file to string
    private String txt2String(File file) {
        String result = "";
        try {
            BufferedReader br = new BufferedReader(new FileReader(file));
            String s = null;
            while ((s = br.readLine()) != null) {
                result = result + "\n" + s;
            }
            br.close();
        } catch (Exception e) {
            e.printStackTrace();
        }
        return result;
    }

    public void setFile(String FileName) {
        fileName = FileName;
    }

    public void setStopFile(String FileName) {
        stopFileName = FileName;
    }

    //read file and transform it to a string
    public String readText(String fileName) {
        File file = new File(fileName);
        fullText = txt2String(file);
        return fullText;
    }

    //before operating this, load the file. This function tokenize the string.
    //标记化
    public List<String> textTokenization(String text) {
        sentPre = new Sentence(text);
        List<String> words = sentPre.words();
        //for(String word:words)
        //{
        //System.out.println(word);
        //}
        //String newWords=words.toString();
        //System.out.println(newWords);
        return words;
    }

    //stem and lemma词干提取和词形还原
    public String textStemAndLem() {
        sentPre = new Sentence(fullText);
        List<String> words = sentPre.lemmas();
        lemmaWords = words;

        return fullText;
    }

    //remove the stops words去除停止词
    public StringBuilder removeStopWords() {
        try {
            stopWords = Files.readAllLines(Paths.get(stopFileName));
        } catch (IOException e) {
            e.printStackTrace();
        }

        StringBuilder builder = new StringBuilder();
        for (String word : lemmaWords) {
            if (!stopWords.contains(word)) {

                builder.append(word);
                builder.append(' ');
            }
        }
        //System.out.println(builder.toString());
        return builder;
    }

    //ner operation命名实体识别
    public String textNer() {
        readText(fileName);
        textStemAndLem();
        StringBuilder preText = removeStopWords();
        sentPre = new Sentence(preText.toString());

        List<String> nerString = sentPre.nerTags();
        List<String> tempList = sentPre.words();
        int flag = 0;
        String org = "ORGANIZATION";
        String name = "PERSON";
        StringBuilder builder = new StringBuilder();
        for (int i = 0; i < nerString.size() - 1; i++) {
            String s1 = nerString.get(i);
            String s2 = nerString.get(i + 1);
            String text = tempList.get(i);
            if ((s1.equals(org)) && (s2.equals(org))) {
                flag = 1;
            } else {
                if ((s1.equals(name)) && (s2.equals(name))) {
                    flag = 1;
                } else
                    flag = 0;
            }
            if (flag == 0) {
                builder.append(text);
                builder.append(' ');
            } else {
                builder.append(text);
            }
        }
        listNerString = nerString;
        return builder.toString();
    }

    //n-gram, output the tokens those are greater than tokenThresHold
    public List<String> textNGramKeyWords(int n, int tokenThresHold, List<String> wholeText) {
        int textSize = wholeText.size();
        int[] textIndex = new int[textSize - n + 1];
        for (int i = 0; i < textSize - n + 1; i++) {
            textIndex[i] = 0;
        }
        List<String> keyWords = new ArrayList<>();
        StringBuilder tempString = new StringBuilder();
        for (int i = 0; i < wholeText.size() - n + 1; i++) {
            for (int j = 0; j < n; j++) {
                tempString.append(wholeText.get(i + j));
            }
            String temp = tempString.toString();
            if (!keyWords.contains(temp)) {
                keyWords.add(temp);
            }

            int index = keyWords.indexOf(temp);
            //System.out.println(index);
            if (index >= 0) {
                textIndex[index]++;
            }
            tempString.setLength(0);
        }
        List<String> cleanKeyWords = new ArrayList<>();
        for (int i = 0; i < keyWords.size(); i++) {
            if (textIndex[i] >= tokenThresHold) {
                cleanKeyWords.add(keyWords.get(i));
            }
        }
        return cleanKeyWords;
    }

    //transform the data to n-gram string
    public String textNGram(int n, List<String> wholeText, List<String> keyWords) {
        StringBuilder tempString = new StringBuilder();
        StringBuilder gramWholeText = new StringBuilder();
        for (int i = 0; i < wholeText.size() - n + 1; i++) {
            tempString.setLength(0);
            for (int j = 0; j < n; j++) {
                tempString.append(wholeText.get(i + j));
            }
            String temp = tempString.toString();
            //System.out.println(keyWords);
            //System.out.println(keyWords.contains(temp));
            if (keyWords.contains(temp)) {

                gramWholeText.append(temp);
                gramWholeText.append(" ");
                i = i + n - 1;
            } else {
                gramWholeText.append(wholeText.get(i));
                gramWholeText.append(" ");
            }
        }
        String temp = tempString.toString();
        if (!keyWords.contains(temp)) {
            for (int i = wholeText.size() - n + 1; i < wholeText.size(); i++) {
                gramWholeText.append(wholeText.get(i));
                gramWholeText.append(" ");
            }
        }
        return gramWholeText.toString();
    }
}
