package tz.preprocessing;

import java.io.BufferedWriter;
import java.io.File;
import java.io.FileWriter;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.Iterator;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.TreeMap;

import tz.data.Article;
import tz.data.Constantes;
import tz.preprocessing.bagOfWords.BagOfWords;
import tz.preprocessing.bagOfWords.TfIdf;
import tz.preprocessing.lexical.WordExtract;
import tz.preprocessing.stopwords.StopWordsExtract;
import tz.preprocessing.suffixStripping.Stemmer;
import tz.util.TzHelper;

/**
 * 
 * @author dejvid
 * 
 *         1. Preberemo vse testne članke. 2. Iz člankov odstranimo stopwords 3.
 *         poženemo stemmer nad preostalimi besedami 4. naredimo seznam besed za
 *         vsak članek
 * 
 */
public class Article2WordsCount
{
	public static Map<Article, BagOfWords>	articleCountWords	= null;
	public static TreeMap<String, Integer>	commonwords			= null;
	public static TreeMap<String, LinkedList<Integer>> clustersTopics = null;

	
	public static void main(String[] args)
	{
		articleCountWords	= new HashMap<Article, BagOfWords>();
		commonwords			= new TreeMap<String, Integer>();
		clustersTopics = new TreeMap<String, LinkedList<Integer>>();
		
		Choose10Topics chose10Topics = new Choose10Topics();
		StopWordsExtract stopWords = new StopWordsExtract(new File(
				"data/common-english-words.txt"));
		Map<Article, List<String>> articleMap = new HashMap<Article, List<String>>();
		// 1. Preberemo vse članke
		System.out.print("Reading all articles...");
		TzHelper.tic();
		List<Article> lstArticles = WordExtract.getAllArticles();
		lstArticles = lstArticles.subList(0, 5000);
		List<Article> tmp = new ArrayList<Article>();

		int numOfArticlesAdded = 0;
		for(int i = 0; i < 5000; i++){
			if(lstArticles.get(i).hasTopics() == false){
				continue;
			}
			
			Article art = lstArticles.get(i);
		
			
			
			//to vrstico spreminjamo glede na stevilo primerov ki hocemo dobit
			if(numOfArticlesAdded>=Constantes.NUMOFARTICLES) break;  
			String articleT =  art.getArticleTopics().substring(0, art.getArticleTopics().length()-1);
			if(chose10Topics.contains(articleT) && chose10Topics.checkOccurrence(articleT)<Constantes.NUMOfTOPICOCCURRENCE){
				numOfArticlesAdded++;
				tmp.add(lstArticles.get(i));
				LinkedList<Integer> IdTopics = clustersTopics.get(articleT);
				if( IdTopics != null){
					IdTopics.add(art.getNewId());			
					clustersTopics.put(articleT, IdTopics);
				}else{
					IdTopics = new LinkedList<Integer>();
					IdTopics.add(art.getNewId());
					clustersTopics.put(articleT, IdTopics);
				}
			}
			//System.out.println("ID "+art.getArticleID()+", TOPICS "+art.getArticleTopics());
		}
		
		lstArticles = tmp;
		System.out.println(TzHelper.toc() + "ms");
		System.out.println("Num articles: " + lstArticles.size());
		// Vse članke pretvorimo v besede. Besede iz člankov odstranimo, ki so
		// stop words!
		
		System.out.print("Calculating BagOfWords...");
		TzHelper.tic();
		for (Article article : lstArticles)
		{
			LinkedList<String> lstWords = new LinkedList<String>();
			article.getWordsFromArticle(lstWords);
			removeStopWords(lstWords, stopWords);
			lstWords = Stemmer.stemAListofWords(lstWords);
			articleMap.put(article, lstWords);
			BagOfWords bagOfWords = new BagOfWords(lstWords);
			articleCountWords.put(article, bagOfWords);
			// addcommonword(commonwords, lstWords);
			addcommonwordFromBag(commonwords, bagOfWords);
			
		}
		System.out.println(TzHelper.toc() + "ms");
		
		
		int numWords[] = {20, 15, 10, 5};
		for(int i = 0; i < numWords.length; i++)
		{
			int numWordsRepeat = numWords[i];
			List<String> lstStopWordLlist = getInterestingStopWords(articleCountWords,numWordsRepeat);
			System.out.print("StopWords (" + numWordsRepeat + ")");
			for(String s : lstStopWordLlist)
			{
				System.out.print(s + ",");
			}
			System.out.println();
		}
		
		System.out.print("Calculating Tf_idf...");
		TzHelper.tic();
		for (Article article : articleCountWords.keySet())
		{
			TfIdf.calcTfIdf(articleCountWords.get(article), commonwords,
					articleCountWords.size());
		}
		
		
		System.out.println(TzHelper.toc() + "ms");
		wrriteClustersToFile("data/Clusters.txt");
		// printMapCount(articleCountWords);
		// printMap(articleMap);
	}
	
	private static List<String> getInterestingStopWords(Map<Article, BagOfWords> articleCountWords, int numRepeat)
	{
		LinkedList<String> ll = new LinkedList<String>();
		for(BagOfWords bag : articleCountWords.values())
		{
			for(String sw : bag.getListOfWordsThatOccurrenceIsGreatherEqualThan(numRepeat))
			{
				if(!ll.contains(sw))
					ll.add(sw);
			}
			//ll.addAll(bag.getListOfWordsThatOcurrenceIsGreatherEqualThan(numRepeat));
		}
		return ll;
	}
	
	private static void addcommonword(TreeMap<String, Integer> commonwords,
			List<String> words)
	{
		int count;
		for (String s : words)
		{
			if (commonwords.containsKey(s))
				count = commonwords.get(s) + 1;
			else
				count = 1;
			commonwords.put(s, count);
		}
	}
	
	/**
	 * Iz vseh že izračunanih Bag Of Wordsov sestavi seznam vseh besed, ki se
	 * pojavijo v vseh clankih. (Manj iteracij ker gremo skozi različne besede)
	 * Za razliko od funkcije addcommonword tukaj izracunam stevilo dokumentov v
	 * katerih se vsaka beseda pojavi.
	 * 
	 * @param commonwords
	 * @param bag
	 */
	private static void addcommonwordFromBag(TreeMap<String, Integer> commonwords, BagOfWords bag)
	{
		int count;
		for (String s : bag.getBag().keySet())
		{
			if (commonwords.containsKey(s))
				count = commonwords.get(s) + 1;
			else
				count = 1;
			commonwords.put(s, count);
		}
	}
	
	public static void printMap(Map<Article, List<String>> articleMap)
	{
		Set<Article> keys = articleMap.keySet();
		for (Article article : keys)
		{
			System.out.print(article.getArticleID() + ": ");
			List<String> artWords = articleMap.get(article);
			printWords(artWords);
			System.out.println();
		}
	}
	
	public static void printMapCount(Map<Article, BagOfWords> articleMap)
	{
		for (Article article : articleMap.keySet())
		{
			System.out.println(article.getArticleID() + ":");
			BagOfWords bag = articleMap.get(article);
			for (String word : bag.getBag().keySet())
			{
				System.out.print(word + ": ");
				System.out.print(bag.getBag().get(word) + "; ");
			}
			System.out.println("");
			
		}
	}
	
	public static void printWords(List<String> list)
	{
		for (String s : list)
		{
			System.out.print(s + " ");
		}
	}
	
	/**
	 * Iz words odstrani tiste besede, ki so stopwords.
	 * 
	 * @param words
	 * @param stop
	 */
	public static void removeStopWords(List<String> words, StopWordsExtract stop)
	{
		int i = 0;
		while (i < words.size())
		{
			String w = words.get(i);
			if (stop.isStopWord(w))
			{
				words.remove(i);
			} else
			// not a stopword
			{
				i++;
			}
		}
	}
	
	public static void wrriteClustersToFile(String filename){
		  try{
			    // Create file 
			    FileWriter fstream = new FileWriter(filename);
			        BufferedWriter out = new BufferedWriter(fstream);
			    
			    for(String topics : clustersTopics.keySet()){
			    	LinkedList<Integer> IdArticle = clustersTopics.get(topics);
			    	for(int i = 0 ; i < IdArticle.size(); i++){
			    		Integer id = IdArticle.get(i);
			    		String str = id.toString()+"\t"+topics+"\n";
			    		out.write(str);
			    	}
			    }
			    
			    //Close the output stream
			    out.close();
			    }catch (Exception e){//Catch exception if any
			      System.err.println("Error: " + e.getMessage());
			      e.printStackTrace();
			    }
	}
}

