package clustering;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Hashtable;
import java.util.List;
import java.util.Set;
import java.util.StringTokenizer;
import java.util.TreeSet;


import thesis.FSModule;
import thesis.InternTweetInfoExtractor;
import thesis.WordsFeatureExtractor;


import com.aliasi.cluster.KMeansClusterer;
import com.aliasi.tokenizer.PorterStemmerTokenizerFactory;
import com.aliasi.util.FeatureExtractor;
import com.mongodb.DBCursor;
import com.mongodb.DBObject;

import experiment.Dimension;


public class ClusteringAlgorithm {
	private static final int NUMBEROFCLUSTERS = 10;
	private static final int NUMBEROFITERATIONS = 20;
	
	private static final double frequencyThreshold = 0.005;
	
	private Set<ClusterItem> clusterItems;
	private Dimension clusterDictionary = new Dimension();
	
	public ClusteringAlgorithm(Set<ClusterItem> clusterItems) {
		this.clusterItems = clusterItems;
	}
	
	public Set<Cluster> cluster(){
		Hashtable<String, Integer> wordsCountTable = new Hashtable<String, Integer>();
		
		DBCursor tweets = FSModule.readTweetsInfoData();
		long datasetDimension = FSModule.getDatasetDimension();
			
		while (tweets.hasNext()){
			DBObject tweet = tweets.next();
			
			InternTweetInfoExtractor infoExtr = new InternTweetInfoExtractor(tweet);
			String preprocessedText = infoExtr.getPreprocessedText();
			
			TreeSet<String> words = new TreeSet<String>();
			
			StringTokenizer tk = new StringTokenizer(preprocessedText);
			while (tk.hasMoreTokens()){
				String word = tk.nextToken();
				
				String stemmedWord = PorterStemmerTokenizerFactory.stem(word);
				
				words.add(stemmedWord);
			}
			
			for (String word : words){
				Integer wordCount = wordsCountTable.get(word);
	    		
	    		if(wordCount == null){
	    			wordsCountTable.put(word, 1);
	    		}
	    		else{
	    			wordsCountTable.put(word, wordCount+1);
	    		}
			}
		}
		
		List<String> featureWords = new ArrayList<String>();
		
		for (String word : wordsCountTable.keySet()){
			int count = wordsCountTable.get(word);
			
			double frequency = (double)count / datasetDimension;
			
			if(frequency >= frequencyThreshold){
				featureWords.add(word);
			}
		}
		
		System.out.println("CLUSTERING FEATURE WORDS: " + featureWords.size());
		
		FeatureExtractor<ClusterItem> fExtractor = new WordsFeatureExtractor<ClusterItem>(featureWords);
		KMeansClusterer<ClusterItem> clustering = new KMeansClusterer<ClusterItem>(fExtractor, NUMBEROFCLUSTERS, NUMBEROFITERATIONS, true, 0);
		
		Set<Cluster> result = new HashSet<Cluster>();
		
		Set<Set<ClusterItem>> clusters = clustering.cluster(clusterItems);
		
		int clusterCount = 1;
		for (Set<ClusterItem> clusterItems : clusters){
			String clusterId = "clus" + clusterCount;
			Cluster cluster = new Cluster(clusterId, clusterItems);
			result.add(cluster);
			
			HashMap<Long, Double> tweetsContainedInCluster = new HashMap<Long, Double>();
			for (ClusterItem clusterItem : clusterItems){
				tweetsContainedInCluster.put(clusterItem.getTweetId(), 1.0);
			}
			clusterDictionary.value2obj.put(clusterId, tweetsContainedInCluster);
			clusterCount++;
		}
		
		return result;
	}
	
	public Dimension getClusterDictionary() {
		return clusterDictionary;
	}
}
