package org.clockwise.util;

import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashMap;
import java.util.HashSet;

import org.clockwise.driver.LinkerDriver;
import org.clockwise.feature.Account;
import org.clockwise.nlpir.TestNLPIR;

public class GlobalTFIDF {
	
	public static int cacheSize = 3000;
	
	public static void computeTFIDF() {
		HashMap<String, Word> dict = new HashMap<String, Word>();
		
		// in double[], [0] is TF of a word, [1] is the num of docs that contains a word
		
		String sourceFile = LinkerDriver.DATA_DIR + "cleanDoc.txt";
		MyFileReader mfr = new MyFileReader(sourceFile);
		String temp = null;
		StringBuilder sb = new StringBuilder();
		int docNum = 0;
		
		ArrayList<String> docPool = new ArrayList<String>();
		while ((temp = mfr.getNextLine()) != null) {
			if (temp.contains("<doc>")) {
				sb = new StringBuilder();
			}
			else if (temp.contains("</doc>")) {
				docNum++;
				docPool.add(sb.toString());
				
				if (docPool.size() == cacheSize) {
					String[] docs = new String[cacheSize];
					docPool.toArray(docs);
					docPool.clear();
					String[] results = TestNLPIR.batchTest(docs);
					for (String result : results) {
						String[] seg = result.split(" ");
						HashMap<String, Integer> docStat = new HashMap<String, Integer>();
						for (int i = 0; i < seg.length; i++) {
							String[] pair = seg[i].split("/");
							if (pair.length != 2) {
								continue;
							}
							String word = seg[i];
							
							if (docStat.containsKey(word)) {
								docStat.put(word, docStat.get(word) + 1);
							}
							else {
								docStat.put(word, new Integer(1));
							}
						}
						for (String key : docStat.keySet()) {
							
							if (dict.containsKey(key)) {
								Word w = dict.get(key);
								w.tf += (double)docStat.get(key) / seg.length;
								w.appearDoc++;
							}
							else {
								Word w = new Word(key);
								w.tf = (double)docStat.get(key) / seg.length;
								w.appearDoc = 1;
								dict.put(key, w);
							}
						}
					}
				}
				if (docNum % cacheSize == 0) {
					System.out.println(docNum + " documents are scanned");
					break;
				}
			} 
			else {
				if (temp.length() > 0) {
					temp.replace(" ", "");
					sb.append(temp);
				}
			}
			
		}
		
		mfr.close();
		
		for (String key : dict.keySet()) {
			dict.get(key).calTFIDF(docNum);
		}
		
		recordTFIDF(dict);
	}

	public static void computeUserTFIDF() {
		HashMap<String, Word> dict = new HashMap<String, Word>();
		
		// in double[], [0] is TF of a word, [1] is the num of docs that contains a word
		
		String sourceFile = LinkerDriver.DATA_DIR + "Labeled_hot_users";
		MyFileReader mfr = new MyFileReader(sourceFile);
		String temp = null;
		StringBuilder sb = new StringBuilder();
		int docNum = 0;
		
		while ((temp = mfr.getNextLine()) != null) {
			docNum++;
			Account a = new Account(temp);
			HashMap<String, Integer> perStat = new HashMap<String, Integer>();
			
			String[] seg = a.nameSegWord;
			for (int i = 0; i < seg.length; i++) {
				if (perStat.containsKey(seg[i])) {
					perStat.put(seg[i], perStat.get(seg[i]) + 1);
				}
				else {
					perStat.put(seg[i], 1);
				}
			}
			seg = a.infoSegWord;
			for (int i = 0; i < seg.length; i++) {
				if (perStat.containsKey(seg[i])) {
					perStat.put(seg[i], perStat.get(seg[i]) + 1);
				}
				else {
					perStat.put(seg[i], 1);
				}
			}
			for (String key : perStat.keySet()) {
				if (dict.containsKey(key)) {
					Word w = dict.get(key);
					w.tf += (double)perStat.get(key) / seg.length;
					w.appearDoc++;
				}
				else {
					Word w = new Word(key);
					w.tf = (double)perStat.get(key) / seg.length;
					w.appearDoc = 1;
					dict.put(key, w);
				}
			}
		}
		mfr.close();
		
		for (String key : dict.keySet()) {
			dict.get(key).calTFIDF(docNum);
		}
		
		recordTFIDF(dict);
	}
	
	private static void recordTFIDF(HashMap<String, Word> dict) {
		
		ArrayList<Word> list = new ArrayList<Word>();
		for (String key : dict.keySet()) {
			list.add(dict.get(key));
		}
		Word[] vocabulary = new Word[list.size()];
		list.toArray(vocabulary);
		Arrays.sort(vocabulary);
		
		HashSet<String> stopWordDict = getStopWords();
		
		String resultFile = LinkerDriver.DATA_DIR + "userTfidf.txt";
		MyFileWriter mfw = new MyFileWriter(resultFile, true);
		for (int i = 0; i < vocabulary.length; i++) {
			Word w = vocabulary[i];
			if (w.str.length() < 2 || stopWordDict.contains(w.str)) {
				continue;
			}
			mfw.write(w.str + "\t" + w.idf + "\t" + w.tf + "\t" + w.appearDoc + "\n");
		}
		mfw.close();
	}
	
	private static HashSet<String> getStopWords() {
		String file = LinkerDriver.DATA_DIR + "StopWords.txt";
		HashSet<String> set = new HashSet<String>();
		
		MyFileReader mfr = new MyFileReader(file);
		String temp = null;
		while ((temp = mfr.getNextLine()) != null) {
			set.add(temp.trim());
		}
		
		mfr.close();
		return set;
	}
	
	public static HashMap<String, Double> getUserTFIDF() {
		HashMap<String, Double> dict = new HashMap<String, Double>();
		MyFileReader mfr = new MyFileReader(LinkerDriver.DATA_DIR + "userTfidf.txt");
		String temp = null;
		while ((temp = mfr.getNextLine()) != null) {
			String[] seg = temp.trim().split("\t");
			dict.put(seg[0], Double.parseDouble(seg[1]));
		}
		mfr.close();		
		return dict;
	}
}


class Word implements Comparable {

	public String str;
	public int appearDoc;
	public double tf, idf;
	
	public Word(String word) {
		this.str = word;
		tf = 0.0;
		appearDoc = 0;
		idf = 0.0;
	}
	
	public void calTFIDF(int totalDoc) {
		idf = Math.log((double)totalDoc / appearDoc);
//		tfidf = tf * Math.log((double)totalDoc / appearDoc);
//		tfidf = Math.log(1 + tfidf);
	}
	
	@Override
	public int compareTo(Object another) {
		// TODO Auto-generated method stub
		Word a = (Word) another;
		if (this.idf < a.idf) {
			return 1;
		}
		else if (this.idf == a.idf) {
			return 0;
		}
		else {
			return -1;
		}
	}
	
}
