package tf_idf;

import java.io.File;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.UnsupportedEncodingException;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;

import nlpir.NLPIR_String;
import batching.TextBatch;

public class ReadFiles {
    //存放文件夹下的文件路径
    private static List<String> fileList = new ArrayList<String>();
    //存放文件夹下的每个文件路径、每个词和每个词的词频
    private static HashMap<String, HashMap<String, Float>> allTheTf = new HashMap<String, HashMap<String, Float>>();
    //存放文件夹下的每个文件路径、每个词和每个词的频数
    private static HashMap<String, HashMap<String, Integer>> allTheNormalTF = new HashMap<String, HashMap<String, Integer>>();
    /**
     * 将filepath文件夹下的文件路径全部加入到fileList链表中
     * @param filepath 文件夹路径
     * @return fileList链表
     * @throws FileNotFoundException
     * @throws IOException
     */
    public static List<String> readDirs(String filepath) throws FileNotFoundException, IOException {
    	
        try {
            File file = new File(filepath);
            if (!file.isDirectory()) {
                System.out.println("输入的参数应该为[文件夹名]");
                System.out.println("filepath: " + file.getAbsolutePath());
            } else if (file.isDirectory()) {
            	//返回该目录下的文件列表，形式为字符串数组
                String[] filelist = file.list();
                for (int i = 0; i < filelist.length; i++) {
                    File readfile = new File(filepath + "\\" + filelist[i]);
                    if (!readfile.isDirectory()) {
                        //System.out.println("filepath: " + readfile.getAbsolutePath());
                        fileList.add(readfile.getAbsolutePath());
                    } else if (readfile.isDirectory()) {
                        readDirs(filepath + "\\" + filelist[i]);
                    }
                }
            }

        } catch (FileNotFoundException e) {
            System.out.println(e.getMessage());
        }catch(IOException e1){
        	System.out.println(e1.getMessage());
        }
        return fileList;
    }
    /**
     * 读出文件内容
     * @param file 文件路径
     * @return 文件内容
     * @throws FileNotFoundException
     * @throws IOException
     */
     
//    public static String readFiles(String file) throws FileNotFoundException, IOException {
//        StringBuffer sb = new StringBuffer();
//        InputStreamReader is = new InputStreamReader(new FileInputStream(file), "utf-8");//
//        BufferedReader br = new BufferedReader(is);
//        String line = br.readLine();
//        while (line != null) {
//            sb.append(line).append("\r\n");
//            line = br.readLine();
//        }
//        br.close();
//        return sb.toString();
//    }
    /**
     * 把文件内容读入到字符串text中，再将text分词赋值给tempCutWordResult，用空格分隔，最后将
     * CutWordResult分割成数组。
     * @param file 文件路径
     * @return 分割后的数组CutWordResult
     * @throws IOException
     */
    public static String[] cutWord(String file) throws IOException {
    	TextBatch tb=new TextBatch();
    	NLPIR_String nlpir=new NLPIR_String();
        String[] cutWordResult = null;      
        String text = tb.fileToString(file);
        
        String tempCutWordResult=nlpir.stringSplit(text,1,0);
        
//        System.out.println("nlpir的分词结果为："+tempCutWordResult);
        
        cutWordResult = tempCutWordResult.split(" ");
        
        return cutWordResult;
    }
    /**
     * 将数组cutWordResult中的每个词和每个词的词频放入HashMap<String, Float>中
     * @param cutWordResult存储分词后的文件内容
     * @return HashMap<String, Float>
     */
    public static HashMap<String, Float> tf(String[] cutWordResult) {
        HashMap<String, Float> tf = new HashMap<String, Float>();//正规化
        int wordNum = cutWordResult.length;
        int wordtf = 0;
        for (int i = 0; i < wordNum; i++) {
            wordtf = 0;
            for (int j = 0; j < wordNum; j++) {
                if (cutWordResult[i] != " " && i != j) {
                    if (cutWordResult[i].equals(cutWordResult[j])) {
                        cutWordResult[j] = " ";
                        wordtf++;
                    }
                }
            }
            if (cutWordResult[i] != " ") {
                tf.put(cutWordResult[i], (new Float(++wordtf)) / wordNum);
                cutWordResult[i] = " ";
            }
        }
        return tf;
    }
    /**
     * 将数组cutWordResult中的每个词和每个词的频数放入HashMap<String, Integer>中
     * @param cutWordResult String[] 存储分词后的文件内容
     * @return HashMap<String, Integer>
     */
    public static HashMap<String, Integer> normalTF(String[] cutWordResult) {
        HashMap<String, Integer> tfNormal = new HashMap<String, Integer>();//没有正规化
        int wordNum = cutWordResult.length;
        int wordtf = 0;
        for (int i = 0; i < wordNum; i++) {
            wordtf = 0;
            if (cutWordResult[i] != " ") {
                for (int j = 0; j < wordNum; j++) {
                    if (i != j) {
                        if (cutWordResult[i].equals(cutWordResult[j])) {
                            cutWordResult[j] = " ";
                            wordtf++;

                        }
                    }
                }
                tfNormal.put(cutWordResult[i], ++wordtf);
                cutWordResult[i] = " ";
            }
        }
        return tfNormal;
    }
    /**
     * 将文件夹下的每个文件路径、每个词和每个词的词频放入allTheTf<String, HashMap<String, Float>>
     * @param dir文件夹路径
     * @return allTheTf<String, HashMap<String, Float>>
     * @throws IOException
     */
    public static Map<String, HashMap<String, Float>> tfOfAll(String dir) throws IOException {
        List<String> fileList = ReadFiles.readDirs(dir);
        for (String file : fileList) {
            HashMap<String, Float> dict = new HashMap<String, Float>();
            dict = ReadFiles.tf(ReadFiles.cutWord(file));
            allTheTf.put(file, dict);
        }
        return allTheTf;
    }
    /**
     * 将文件夹下的每个文件路径、每个词和每个词的频数放入allTheNormalTF<String, HashMap<String, Float>>
     * @param dir文件夹路径
     * @return allTheNormalTF<String, HashMap<String, Float>> 
     * @throws IOException
     */
    public static Map<String, HashMap<String, Integer>> NormalTFOfAll(String dir) throws IOException {
        List<String> fileList = ReadFiles.readDirs(dir);
        for (int i = 0; i < fileList.size(); i++) {
            HashMap<String, Integer> dict = new HashMap<String, Integer>();
            dict = ReadFiles.normalTF(ReadFiles.cutWord(fileList.get(i)));
            allTheNormalTF.put(fileList.get(i), dict);
        }
        return allTheNormalTF;
    }
    /**
     * 计算文件夹dir下的每个文档中的每个词都有多少个文件包含该词，并与文档总数相除得到IDF
     * 
     * @param dir 文件夹路径
     * @return Map<String, Float>存放每个词和它的IDF
     * @throws FileNotFoundException
     * @throws UnsupportedEncodingException
     * @throws IOException
     */
    public static Map<String, Float> idf(String dir) throws FileNotFoundException, UnsupportedEncodingException, IOException {
        //公式IDF＝log((1+|D|)/|Dt|)，其中|D|表示文档总数，|Dt|表示包含关键词t的文档数量。
        Map<String, Float> idf = new HashMap<String, Float>();
        List<String> located = new ArrayList<String>();

        float Dt = 1;
        float D = allTheNormalTF.size();//文档总数
        List<String> key = fileList;//存储各个文档名的List
        Map<String, HashMap<String, Integer>> tfInIdf = allTheNormalTF;//存储各个文档tf的Map
        /**
         * 从第0个文档的第0个词开始查看后面的文档是否包含该词，若包含则Dt++，
         * 并将该词放入located中(若后面再次出现该词则跳过不再计算)。 
         */
        for (int i = 0; i < D; i++) {
            HashMap<String, Integer> temp = tfInIdf.get(key.get(i));
            for (String word : temp.keySet()) {
                Dt = 1;
                if (!(located.contains(word))) {
                    for (int k = 0; k < D; k++) {
                        if (k != i) {
                            HashMap<String, Integer> temp2 = tfInIdf.get(key.get(k));
                            if (temp2.keySet().contains(word)) {
                                located.add(word);
                                Dt = Dt + 1;
                                continue;
                            }
                        }
                    }
                    idf.put(word, Log.log((1 + D) / Dt, 10));
                }
            }
        }
        return idf;
    }
    /**
     * 计算每个词的TF*IDF
     * @param dir
     * @return Map<String, HashMap<String, Float>>
     * @throws IOException
     */
    public static Map<String, HashMap<String, Float>> tfidf(String dir) throws IOException {

        Map<String, Float> idf = ReadFiles.idf(dir);
        Map<String, HashMap<String, Float>> tf = ReadFiles.tfOfAll(dir);
        for (String file : tf.keySet()) {
            Map<String, Float> singelFile = tf.get(file);
            for (String word : singelFile.keySet()) {
                singelFile.put(word, ( idf.get(word) * singelFile.get(word) ));
            }
        }
        return tf;
    }
    
}
