package org.simtube.preprocess;

import java.io.BufferedReader;
import java.io.BufferedWriter;
import java.io.File;
import java.io.FileReader;
import java.io.FileWriter;
import java.io.IOException;
import java.io.Reader;
import java.io.StringReader;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Comparator;
import java.util.HashMap;

import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.PorterStemFilter;
import org.apache.lucene.analysis.SimpleAnalyzer;
import org.apache.lucene.analysis.StopFilter;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.shingle.ShingleFilter;
import org.apache.lucene.analysis.standard.StandardFilter;
import org.apache.lucene.analysis.tokenattributes.TermAttribute;
import org.simtube.util.TermDF;
import org.simtube.util.TermTFIDF;

public class FilePreprocess {
	public final static double FILE_NUMBER = 14035;
	private ArrayList<String> stopwords;
	private ArrayList<TermDF> termDF;
	private HashMap<String, Integer> termIndex;
	private ArrayList<Integer> docIDs;
	private ArrayList<ArrayList<TermTFIDF>> tfidf;
	private HashMap<Integer, Integer> docIndex;
	
	public FilePreprocess(){
		termDF = new ArrayList<TermDF>();
		termIndex = new HashMap<String, Integer>();
		stopwords = new ArrayList<String>();
		loadStopWordsFromFile("data/stopwords.txt");
		docIndex = new HashMap<Integer, Integer>();
		docIDs = new ArrayList<Integer>();
		tfidf = new ArrayList<ArrayList<TermTFIDF>>();
		makeDocIndex("data/text/");
	}
	
	public void makeDocIndex(String text){
		File textDir = new File(text);
		if(!textDir.exists()){
			System.out.println("Files don't exist.");
			return;
		}
		File[] files = textDir.listFiles();
		for(int i = 0; i < files.length; i++){
			docIndex.put(Integer.parseInt((files[i].getName())), i+1);
		}
	}
	/**
	 * 得到给定文件的内容
	 * @param docId
	 * @return
	 */
	public String getDocText(int docId){
		File file = new File("data/text/"+docId);
		
		if(!file.exists()){
			System.out.println("File "+docId+" doesn't exist.");
			return null;
		}
		String text = "";
		try{
			BufferedReader br = new BufferedReader(new FileReader(file));
			String line = br.readLine();
			while(line != null){
				text += "\n"+line;
				line = br.readLine();
			}
			br.close();
		}
		catch(IOException e){
			e.printStackTrace();
		}
		return text;
	}
	/**
	 * 得到给定文件的内容
	 * @param docId
	 * @return
	 */
	public String[] getDocsText(int[] docId){
		String[] text = new String[docId.length];
		
		for(int i = 0; i < docId.length; i++){
			text[i] = getDocText(docId[i]);
		}
		return text;
	}
	public String getTermById(int termId){
		return termDF.get(termId).getTerm();
	}
	/**
	 * 从文件中装载停用词
	 * @param fileName
	 */
	public void loadStopWordsFromFile(String fileName){
		File file = new File(fileName);
		if(!file.exists()){
			System.out.println("The stopwords file "+fileName+" doesn't exists");
			return;
		}
		try{
			BufferedReader br = new BufferedReader(new FileReader(file));
			String line = br.readLine();
			while(line != null){
				stopwords.add(line);
				line = br.readLine();
			}
			System.out.println("Stop Words Number "+stopwords.size());
			br.close();
		}
		catch(IOException e){
			e.printStackTrace();
		}
	}
	/**
	 * 对文档进行分词
	 */
	public boolean wordSegmentForFile(String srcPath, String dstPath){
		File srcFile = new File(srcPath);
		if(!srcFile.exists()){
			System.out.println("Source File "+srcPath+" doesn't exist.");
			return false;
		}
		
		Analyzer analyzer = new SimpleAnalyzer();
		try{
			TokenStream ts = analyzer.tokenStream(null, new FileReader(srcFile));
			//ts = new StandardFilter(ts);
			ts = new StopFilter(false, ts, StopFilter.makeStopSet(stopwords));
			ts = new PorterStemFilter(ts);
			ts = new ShingleFilter(ts);
	        TermAttribute termAttr = (TermAttribute)(ts.getAttribute(TermAttribute.class));
	        
        	String str;
        	int index;
	        int diffTermCount = 0; //文件中不相同单词数量
        	int totalTermCount = 0; //文件中单词总数
        	ArrayList<String> fileTerms = new ArrayList<String>();
        	ArrayList<Integer> termFrequency = new ArrayList<Integer>();
        	HashMap<String, Integer> fileTermIndex = new HashMap<String, Integer>();
	        while(ts.incrementToken()){
	        	str = termAttr.term();
	        	if(fileTermIndex.containsKey(str)){
	        		index = fileTermIndex.get(str);
	        		termFrequency.set(index, termFrequency.get(index) + 1);
	        	}
	        	else{
	        		fileTerms.add(str);
	        		termFrequency.add(1);
	        		fileTermIndex.put(str, diffTermCount);
	        		diffTermCount++;
	        	}
	        	totalTermCount++;
        		System.out.println(termAttr.term());
        	}
        	ts.close();
        	
        	File parentFile = (new File(dstPath)).getParentFile();
        	if(!parentFile.exists()){
        		parentFile.mkdirs();
        	}
        	BufferedWriter bw = new BufferedWriter(new FileWriter(dstPath));
        	for(int i = 0; i < diffTermCount; i++){
        		bw.write(fileTerms.get(i));
        		bw.write(" "+(((double)termFrequency.get(i)) / totalTermCount));
        		bw.newLine();
        	}
        	bw.close();
		}
        catch(IOException e){
        	e.printStackTrace();
        }
        return true;
	}
	/**
	 * 对给定的字符串，得到其特征向量
	 * @param text
	 * @return
	 */
	public double[] getStringFeature(String text){
		return getFeature(new StringReader(text));
	}
	/**
	 * 得到指定文件的特征向量
	 * @param fileName
	 * @return
	 */
	public double[] getDocFeature(String fileName){
		double[] feature = null;
		
		File file = new File(fileName);
		if(!file.exists()){
			System.out.println("The stopwords file "+fileName+" doesn't exists");
			return null;
		}
		try{
			feature = getFeature(new FileReader(file));
		}
		catch(IOException e){
			e.printStackTrace();
		}
		return feature;
	}
	public double[] getFeature(Reader reader){
		if(reader == null)
			return null;
		
		double[] feature = new double[termDF.size()];
		
		try{
			Analyzer analyzer = new SimpleAnalyzer();
			TokenStream ts = analyzer.tokenStream(null, reader);
			ts = new StopFilter(false, ts, StopFilter.makeStopSet(stopwords));
			ts = new PorterStemFilter(ts);
			ts = new ShingleFilter(ts);
	        TermAttribute termAttr = (TermAttribute)(ts.getAttribute(TermAttribute.class));
	        
        	String str;
        	int index;
	        int diffTermCount = 0; //文件中不相同单词数量
        	int totalTermCount = 0; //文件中单词总数
        	ArrayList<String> fileTerms = new ArrayList<String>();
        	ArrayList<Integer> termFrequency = new ArrayList<Integer>();
        	HashMap<String, Integer> fileTermIndex = new HashMap<String, Integer>();
	        while(ts.incrementToken()){
	        	str = termAttr.term();
	        	if(fileTermIndex.containsKey(str)){
	        		index = fileTermIndex.get(str);
	        		termFrequency.set(index, termFrequency.get(index) + 1);
	        	}
	        	else{
	        		fileTerms.add(str);
	        		termFrequency.add(1);
	        		fileTermIndex.put(str, diffTermCount);
	        		diffTermCount++;
	        	}
	        	totalTermCount++;
        		//System.out.println(termAttr.term());
        	}
        	ts.close();
        	
        	String s;
        	int in;
        	for(int i = 0; i < diffTermCount; i++){
        		s = fileTerms.get(i);
        		if(termIndex.containsKey(s)){
        			in = termIndex.get(s);
        			feature[in] = (((double)termFrequency.get(fileTermIndex.get(s)))/totalTermCount) * Math.log(FILE_NUMBER / termDF.get(in).getDF());
        		}
        	}
		}
        catch(IOException e){
        	e.printStackTrace();
        }
        /*
        System.out.print(""+feature[0]);
        for(int i = 1; i < feature.length; i++){
        	System.out.print(" "+feature[i]);
        }
        System.out.println();*/
        return feature;
	}
	/**
	 * 对某个文件夹中的所有文件进行分词,并计算文件中单词词频
	 * 把结果写入文件中
	 * 得到所有文件中的词项
	 * @param dir
	 */
	public void wordSegmentForFiles(String textDir, String tfDir){
		File srcPath = new File(textDir);
		File dstPath = new File(tfDir);
		if(!srcPath.exists()){
			System.out.println("source files "+textDir+" don't exist.");
		}
		if(!dstPath.exists()){
			dstPath.mkdirs();
		}
		File[] files = srcPath.listFiles();

		Analyzer analyzer = new SimpleAnalyzer();
		try{
			int allTermCount = 0;
			for(int i = 0; i < files.length; i++){
				System.out.println("=================="+files[i].getName()+" starts =====================");
				TokenStream ts = analyzer.tokenStream(null, new FileReader(files[i]));
			
				ts = new StopFilter(false, ts, StopFilter.makeStopSet(stopwords));
				
				ts = new PorterStemFilter(ts);
				ts = new ShingleFilter(ts);
		        TermAttribute termAttr = (TermAttribute)(ts.getAttribute(TermAttribute.class));
		        
	        	String str;
	        	int index;
		        int diffTermCount = 0; //文件中不相同单词数量
	        	int totalTermCount = 0; //文件中单词总数
	        	ArrayList<String> fileTerms = new ArrayList<String>();
	        	ArrayList<Integer> termFrequency = new ArrayList<Integer>();
	        	HashMap<String, Integer> fileTermIndex = new HashMap<String, Integer>();
		        while(ts.incrementToken()){
		        	str = termAttr.term();
		        	if(fileTermIndex.containsKey(str)){
		        		index = fileTermIndex.get(str);
		        		termFrequency.set(index, termFrequency.get(index) + 1);
		        	}
		        	else{
		        		fileTerms.add(str);
		        		termFrequency.add(1);
		        		fileTermIndex.put(str, diffTermCount);
		        		diffTermCount++;
		        		
		        		if(termIndex.containsKey(str)){
		        			index = termIndex.get(str);
		        			termDF.get(index).incrementDF();
		        		}
		        		else{
		        			termDF.add(new TermDF(str));
		        			termIndex.put(str, allTermCount);
		        			allTermCount++;
		        		}
		        	}
		        	totalTermCount++;
	        		//System.out.println(termAttr.term());
	        	}
	        	ts.close();
	        	
	        	BufferedWriter bw = new BufferedWriter(new FileWriter(tfDir.concat(files[i].getName())));
	        	for(int j = 0; j < diffTermCount; j++){
	        		bw.write(fileTerms.get(j));
	        		bw.write(" "+(((double)termFrequency.get(j)) / totalTermCount));
	        		bw.newLine();
	        	}
	        	bw.close();
			}
			System.out.println("All term count "+allTermCount);
			System.out.println("TermDF count "+termDF.size());
		}
        catch(IOException e){
        	e.printStackTrace();
        }
	}
	/**
	 * 去掉低频词
	 * 注意这个过程中，必须调整哈希数组中的键值
	 * @param dfThreshold 词频的阈值，删除阈值以下的词项
	 */
	public void removeLessFrequecyTerms(int dfThreshold){
		int index = 0;
		String term;
		HashMap<String, Integer> hash = new HashMap<String, Integer>(); 
		while(index < termDF.size()){
			term = termDF.get(index).getTerm();
			if(termDF.get(index).getDF() <= dfThreshold){
				//termIndex.remove(term);
				termDF.remove(index);
				continue;
			}
			else{
				hash.put(term, index);
				index++;
			}
			/*
			else if(termIndex.get(term) >= termDF.size()){
				termIndex.remove(term);
				termIndex.put(term, index);
			}*/
		}
		termIndex = hash;
		System.out.println("TermDF count "+termDF.size());
	}
	public void removeTerms(int downThreshold, int upThreshold){
		int index = 0;
		while(index < termDF.size()){
			if(termDF.get(index).getDF() <= downThreshold || termDF.get(index).getDF() >= upThreshold){
				termIndex.remove(termDF.remove(index).getTerm());
				continue;
			}
			index++;
		}
		System.out.println("Total Term Number is "+termDF.size());
	}
	public int getMaxDF(){
		int max = termDF.get(0).getDF();
		for(int i = 1; i < termDF.size(); i++){
			if(termDF.get(i).getDF() > max)
				max = termDF.get(i).getDF();
		}
		System.out.println("The maximum df is "+max);
		return max;
	}
	/**
	 * 将所有词项保存到词项文件中
	 * @param termFileName 词项文件名
	 */
	public void saveTermsIntoFile(String termFileName){
		File parentFile = (new File(termFileName)).getParentFile();
		if(!parentFile.exists()){
			parentFile.mkdirs();
		}
		
		System.out.println("start to save terms into "+termFileName);
		try{
			BufferedWriter bw = new BufferedWriter(new FileWriter(termFileName));
			for(int i = 0; i < termDF.size(); i++){
				bw.write(termDF.get(i).getTerm().concat(" "+termDF.get(i).getDF()));
				bw.newLine();
			}
			bw.close();
		}
		catch(IOException e){
			e.printStackTrace();
		}
		System.out.println("there are "+termDF.size()+" terms.");
	}
	/**
	 * 从词项文件中读出所有的词项以及词项对应的df
	 * @param termFileName
	 */
	public void readTermsFromFile(String termFileName){
		File termFile = new File(termFileName);
		if(!termFile.exists()){
			System.out.println("Term File "+termFileName+" doesn't exist.");
			return;
		}
		try{
			BufferedReader br = new BufferedReader(new FileReader(termFileName));
			int count = 0;
			String line = br.readLine();
			String[] ss = new String[2];
			while(line != null){
				int t = line.lastIndexOf(' ');
				ss[0] = line.substring(0, t);
				ss[1] = line.substring(t+1);
				termDF.add(new TermDF(ss[0], Integer.parseInt(ss[1])));
				termIndex.put(ss[0], count);
				line = br.readLine();
				count++;	
			}
			br.close();
		}
		catch(IOException e){
			e.printStackTrace();
		}
	}
	/**
	 * 得到指定文档的tf-idf特征向量
	 * @param fileName 文档文件名
	 * @return
	 */
	public ArrayList<TermTFIDF> getDocTFIDF(String fileName){
		File file = new File(fileName);
		if(!file.exists()){
			System.out.println("File "+fileName+" doesn't exist.");
			return null;
		}
		
		ArrayList<TermTFIDF> termtfidf = new ArrayList<TermTFIDF>();
		try{
			BufferedReader br = new BufferedReader(new FileReader(file));
			String[] ss = new String[2];
			String line = br.readLine();
			while(line != null){
				int t = line.lastIndexOf(' ');
				ss[0] = line.substring(0, t);
				ss[1] = line.substring(t+1);
				
				if(termIndex.containsKey(ss[0])){
					int index = termIndex.get(ss[0]);
					double idf = Math.log(FILE_NUMBER / termDF.get(index).getDF());
					termtfidf.add(new TermTFIDF(index, Double.parseDouble(ss[1]), idf));
				}
				line = br.readLine();
			}
			br.close();
		}
		catch(IOException e){
			e.printStackTrace();
		}
		return termtfidf;
	}
	public void getFilesTFIDF(String tfDir){
		File srcPath = new File(tfDir);
		if(!srcPath.exists()){
			System.out.println("data path "+tfDir+" doesn't exist.");
			return;
		}
		int docId;
		File[] files = srcPath.listFiles();
		for(int i = 0; i < files.length; i++){
			docId = Integer.parseInt(files[i].getName());
			tfidf.add(new ArrayList<TermTFIDF>());
			docIDs.add(docId);
			docIndex.put(docId, i);
		}
		for(int i = 0; i < files.length; i++){
			System.out.println("================="+files[i].getName()+" starts ====================");	
			tfidf.set(Integer.parseInt(files[i].getName()), getDocTFIDF(files[i].getAbsolutePath()));
		}
		System.out.println("=================TFIDF Finished====================");
	}
	public void saveFeatureIntoFile(String featureFileName){
		try{
			BufferedWriter bw = new BufferedWriter(new FileWriter(featureFileName));
			ArrayList<TermTFIDF> feature; 
			TermTFIDF termTFIDF;
			for(int i = 0; i < docIDs.size(); i++){
				feature = tfidf.get(docIndex.get(docIDs.get(i)));
				for(int j = 0; j < feature.size(); j++){
					termTFIDF = feature.get(j);
					bw.write(""+docIDs.get(i)+" "+termTFIDF.getTermIndex()+" "+termTFIDF.getTFIDF());
					bw.newLine();
				}
			}
			bw.close();
			System.out.println("=================TFIDF Finished====================");
		}
		catch(IOException e){
			e.printStackTrace();
		}
	}
	public void readFeatureFromFile(String featureFileName){
		try{
			BufferedReader br = new BufferedReader(new FileReader(featureFileName));
			br.close();
		}
		catch(IOException e){
			e.printStackTrace();
		}
	}
	/**
	 * 得到所有文档的tf-idf向量，并写入到特征文件中
	 * 文件格式为：docId, dim, tf-idf
	 * @param dir 数据文件目录
	 * @param featureFileName 特征文件名
	 */
	public void getFeatureFile(String tfDir, String featureFileName){
		File srcPath = new File(tfDir);
		if(!srcPath.exists()){
			System.out.println("data path "+tfDir+" doesn't exist.");
			return;
		}
		File featurePath = (new File(featureFileName)).getParentFile();
		if(!featurePath.exists()){
			featurePath.mkdirs();
		}
		
		try{
			BufferedWriter bw = new BufferedWriter(new FileWriter(featureFileName));
			File[] files = srcPath.listFiles();
			ArrayList<TermTFIDF> feature; 
			TermTFIDF termTFIDF;
			for(int i = 0; i < files.length; i++){
				System.out.println("================="+files[i].getName()+" starts ====================");
				feature = getDocTFIDF(files[i].getAbsolutePath());
				for(int j = 0; j < feature.size(); j++){
					termTFIDF = feature.get(j);
					bw.write((termTFIDF.getTermIndex()+1)+" "+docIndex.get(Integer.parseInt(files[i].getName()))+" "+termTFIDF.getTFIDF());
					bw.newLine();
				}
			}
			bw.close();
			System.out.println("=================TFIDF Finished====================");
		}
		catch(IOException e){
			e.printStackTrace();
		}
	}
	public void saveInvertIndex(String tfDir, String indexFileName){
		File tfPath = new File(tfDir);
		if(!tfPath.exists()){
			System.out.println("TF File "+tfDir+" doesn't exist.");
			return;
		}
		
		System.out.println("=======================start to construct invert index========================");
		File indexPath = (new File(indexFileName)).getParentFile();
		if(!indexPath.exists()){
			indexPath.mkdirs();
		}
		
		ArrayList<ArrayList<Integer>> docs = new ArrayList<ArrayList<Integer>>();
		for(int i = 0; i < termDF.size(); i++){
			docs.add(new ArrayList<Integer>());
		}
		try{
			File[] files = tfPath.listFiles();
			for(int i = 0; i < files.length; i++){
				BufferedReader br = new BufferedReader(new FileReader(files[i]));
				String str;
				String line = br.readLine();
				while(line != null){
					str = line.substring(0, line.lastIndexOf(' '));
					if(termIndex.containsKey(str)){
						docs.get(termIndex.get(str)).add(Integer.parseInt(files[i].getName()));
					}
					line = br.readLine();
				}
				br.close();
			}
			
			BufferedWriter bw = new BufferedWriter(new FileWriter(indexFileName));
			for(int i = 0; i < docs.size(); i++){
				bw.write(""+i);
				for(int j = 0; j < docs.get(i).size(); j++){
					bw.write(" "+docs.get(i).get(j));
				}
				bw.newLine();
			}
			bw.close();
		}
		catch(IOException e){
			e.printStackTrace();
		}
		System.out.println("======================= invert index finished ========================");
	}
	
	public void preprocess(){
		wordSegmentForFiles("data/text/", "data/tf/");
		removeLessFrequecyTerms(10);
		getFeatureFile("data/tf/", "data/tfidf.txt");
		saveInvertIndex("data/tf/", "data/index.txt");
		saveTermsIntoFile("data/terms.txt");
	}
	
	public int getDocIndexById(int id){
		return docIndex.get(id) - 1;
	}
	public static void main(String args[]) {
		FilePreprocess filePreprocess = new FilePreprocess();
		//filePreprocess.wordSegmentForFile("data/text/1", "data/tf/1");
		//filePreprocess.wordSegmentForFiles("data/text/", "data/tf/");
		//filePreprocess.removeLessFrequecyTerms(10);
		filePreprocess.preprocess();
		//System.out.println("Term Number "+filePreprocess.termDF.size());
		
		//filePreprocess.readTermsFromFile("data/terms.txt");
		//filePreprocess.getFeatureFile("data/tf/", "data/tfidf.txt");
		//filePreprocess.saveInvertIndex("data/tf/", "data/index.txt");
		//filePreprocess.saveTermsIntoFile("data/terms.txt");
		
	}
	
	private class TermDFIndex{
		int df;
		int index;
		
		public TermDFIndex(int df, int index){
			this.df = df;
			this.index = index;
		}
	}
	
	public void sortTermDF(){
		ArrayList<TermDFIndex> termDfIndex = new ArrayList<TermDFIndex>();
		for(int i = 0; i < termDF.size(); i++){
			termDfIndex.add(new TermDFIndex(termDF.get(i).getDF(), i));
		}
		Collections.sort(termDfIndex, new Comparator<Object>(){
			public int compare(Object a, Object b){
				return ((TermDFIndex)b).df - ((TermDFIndex)a).df;
			}
		});
		ArrayList<TermDF> termdf = new ArrayList<TermDF>();
		for(int i = 0; i < termDfIndex.size(); i++){
			termdf.add(termDF.get(termDfIndex.get(i).index));
		}
		termDF = termdf;
	}
}
