package thesis;
import java.io.BufferedReader;
import java.io.FileNotFoundException;
import java.io.FileReader;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Hashtable;
import java.util.List;
import java.util.StringTokenizer;
import java.util.TreeSet;

import com.aliasi.classify.NaiveBayesClassifier;
import com.aliasi.tokenizer.PorterStemmerTokenizerFactory;
import com.aliasi.tokenizer.TokenizerFactory;
import com.mongodb.DBCursor;
import com.mongodb.DBObject;


public class BBCNewsDatasetClassifierFactory {
	private static final String CLASSESFILE = "bbc_classes.classes";
	private static final String MATRIXFILE = "bbc_matrix.mtx";
	private static final String TERMSFILE = "bbc_terms.terms";
	
	private static final double thresholdFrequency = 0.002;
	private static final double thresholdRatio = 0.7;
	
	private Hashtable<String, Integer> conceptIds = new Hashtable<String, Integer>();
	private TreeSet<String> featureWords = new TreeSet<String>();
	
	private FilteringAlgorithm fAlg;
	
	public BBCNewsDatasetClassifierFactory(FilteringAlgorithm fAlg){
		this.fAlg = fAlg;
		
		conceptIds.put("Business", 0);
		conceptIds.put("Entertainment", 1);
		conceptIds.put("Politics", 2);
		conceptIds.put("Sport", 3);
		conceptIds.put("Tech", 4);
		conceptIds.put("Crime", 5);
		
		for (String concept : conceptIds.keySet()){
			int conceptId = conceptIds.get(concept);
			
			List<Integer> relatedDocumentIds = getRelatedDocumentIds(conceptId);
			List<Integer> unrelatedDocumentIds = getUnrelatedDocumentIds(conceptId);
			
			int numberOfWordsPositiveDocuments = getNumberOfWordsInDocumentCollection(relatedDocumentIds);
			int numberOfWordsNegativeDocuments = getNumberOfWordsInDocumentCollection(unrelatedDocumentIds);
			
			Hashtable<Integer, Integer> positiveWordIdsTable = getWordIdsTable(relatedDocumentIds);
			Hashtable<Integer, Integer> negativeWordIdsTable = getWordIdsTable(unrelatedDocumentIds);

			Hashtable<String, Integer> positiveWordsTable = getWordsFromWordIds(positiveWordIdsTable);
			Hashtable<String, Integer> negativeWordsTable = getWordsFromWordIds(negativeWordIdsTable);
			
			for (String word : positiveWordsTable.keySet()) {
				int positiveValue = positiveWordsTable.get(word);

				double positiveFrequency = (double) positiveValue / numberOfWordsPositiveDocuments;

				if (positiveFrequency >= thresholdFrequency) {
					Integer negativeValue = negativeWordsTable.get(word);

					if (negativeValue == null) {
						featureWords.add(word);
						System.out.println(concept + " positive word: " + word);
					} else {
						double negativeFrequency = (double) negativeValue
								/ numberOfWordsNegativeDocuments;

						double ratio = positiveFrequency / (negativeFrequency + positiveFrequency);

						if (ratio >= thresholdRatio) {
							featureWords.add(word);
							System.out.println(concept + " positive word: " + word);
						}
					}
				}
			}
		}
		System.out.println("FEATURE WORDS: " + featureWords.size());
		
		
		for (String concept : conceptIds.keySet()){
			DBCursor positiveSamples = FSModule.readPositiveSamples(concept);
			DBCursor negativeSamples = FSModule.readNegativeSamples(concept);

			int numberOfWordsPositiveSamples = 0;
			int numberOfWordsNegativeSamples = 0;
			
			Hashtable<String, Integer> positiveWordsTable = new Hashtable<String, Integer>();
			Hashtable<String, Integer> negativeWordsTable = new Hashtable<String, Integer>();

			while (positiveSamples.hasNext()) {
				DBObject tweet = positiveSamples.next();

				String text = tweet.get("text").toString();

				String preprocessedText = fAlg.getPreprocessedText(text);

				StringTokenizer tk = new StringTokenizer(preprocessedText);
				
				while (tk.hasMoreTokens()) {
					String word = tk.nextToken();
					numberOfWordsPositiveSamples++;

					String stemmedWord = PorterStemmerTokenizerFactory.stem(word);
					
					Integer wordCount = positiveWordsTable.get(stemmedWord);

					if (wordCount == null) {
						positiveWordsTable.put(stemmedWord, 1);
					} else {
						positiveWordsTable.put(stemmedWord, wordCount + 1);
					}
				}
			}
			
			while (negativeSamples.hasNext()) {
				DBObject tweet = negativeSamples.next();
				

				String text = tweet.get("text").toString();

				String preprocessedText = fAlg.getPreprocessedText(text);

				StringTokenizer tk = new StringTokenizer(preprocessedText);
				
				while (tk.hasMoreTokens()) {
					String word = tk.nextToken();
					numberOfWordsNegativeSamples++;

					String stemmedWord = PorterStemmerTokenizerFactory.stem(word);
					
					Integer wordCount = negativeWordsTable.get(stemmedWord);

					if (wordCount == null) {
						negativeWordsTable.put(stemmedWord, 1);
					} else {
						negativeWordsTable.put(stemmedWord, wordCount + 1);
					}
				}
			}
			
			
			for (String word : positiveWordsTable.keySet()) {
				int positiveValue = positiveWordsTable.get(word);

				double positiveFrequency = (double) positiveValue / numberOfWordsPositiveSamples;

				if (positiveFrequency >= thresholdFrequency) {
					Integer negativeValue = negativeWordsTable.get(word);

					if (negativeValue == null) {
						featureWords.add(word);
						System.out.println(concept + " positive word: " + word);
					} else {
						double negativeFrequency = (double) negativeValue
								/ numberOfWordsNegativeSamples;

						double ratio = positiveFrequency / (negativeFrequency + positiveFrequency);

						if (ratio >= thresholdRatio) {
							featureWords.add(word);
							System.out.println(concept + " positive word: " + word);
						}
					}
				}
			}
			
			for (String word : negativeWordsTable.keySet()) {
				int negativeValue = negativeWordsTable.get(word);

				double negativeFrequency = (double) negativeValue / numberOfWordsNegativeSamples;

				if (negativeFrequency >= thresholdFrequency) {
					Integer positiveValue = positiveWordsTable.get(word);

					if (positiveValue == null) {
						featureWords.add(word);
						System.out.println(concept + " negative word: " + word);
					} else {
						double positiveFrequency = (double) positiveValue
								/ numberOfWordsPositiveSamples;

						double ratio = negativeFrequency / (positiveFrequency + negativeFrequency);

						if (ratio >= thresholdRatio) {
							featureWords.add(word);
							System.out.println(concept + " negative word: " + word);
						}
					}
				}
			}
		}
		System.out.println("FEATURE WORDS: " + featureWords.size());
		
	}
	
	public NaiveBayesClassifier getTrainedClassifier(String concept) {
		int conceptId = conceptIds.get(concept);
		
		List<Integer> relatedDocumentIds = getRelatedDocumentIds(conceptId);
		List<Integer> unrelatedDocumentIds = getUnrelatedDocumentIds(conceptId);
		
		Hashtable<Integer, Integer> positiveWordIdsTable = getWordIdsTable(relatedDocumentIds);
		Hashtable<Integer, Integer> negativeWordIdsTable = getWordIdsTable(unrelatedDocumentIds);

		Hashtable<String, Integer> positiveWordsTable = getWordsFromWordIds(positiveWordIdsTable);
		Hashtable<String, Integer> negativeWordsTable = getWordsFromWordIds(negativeWordIdsTable);
		
		String categories[] = new String[2];
		categories[0] = "negative";
		categories[1] = "positive";

		TokenizerFactory tf = new WordsTokenizerFactory(featureWords);

		NaiveBayesClassifier classifier = new NaiveBayesClassifier(categories, tf);
		
		for (String word : positiveWordsTable.keySet()){
			int frequency = positiveWordsTable.get(word);
			
			classifier.train("positive", word, frequency);
		}
		
		for (String word : negativeWordsTable.keySet()){
			int frequency = negativeWordsTable.get(word);
			
			classifier.train("negative", word, frequency);
		}
		
		DBCursor positiveSamples = FSModule.readPositiveSamples(concept);
		DBCursor negativeSamples = FSModule.readNegativeSamples(concept);

		while (positiveSamples.hasNext()) {
			DBObject tweet = positiveSamples.next();

			String text = tweet.get("text").toString();

			String preprocessedText = fAlg.getPreprocessedText(text);

			classifier.train("positive", preprocessedText, 1);
		}

		while (negativeSamples.hasNext()) {
			DBObject tweet = negativeSamples.next();

			String text = tweet.get("text").toString();

			String preprocessedText = fAlg.getPreprocessedText(text);

			classifier.train("negative", preprocessedText, 1);
		}
		
		for (String otherConcept : conceptIds.keySet()){
			if (otherConcept.compareTo(concept) != 0){
				DBCursor otherNegativeSamples = FSModule.readPositiveSamples(otherConcept);
				
				while (otherNegativeSamples.hasNext()) {
					DBObject tweet = otherNegativeSamples.next();

					String text = tweet.get("text").toString();

					String preprocessedText = fAlg.getPreprocessedText(text);

					classifier.train("negative", preprocessedText, 1);
				}
			}
		}
		
		return classifier;
	}
	
	private Hashtable<Integer, Integer> getWordIdsTable(List<Integer> documentIds) {
		Hashtable<Integer, Integer> wordIdsTable = new Hashtable<Integer, Integer>();
		
		BufferedReader brMatrix = null;
		try {
			brMatrix = new BufferedReader(new FileReader(MATRIXFILE));
		} catch (FileNotFoundException e) {
			e.printStackTrace();
			System.exit(-1);
		}
		try {
			while(brMatrix.ready()){
				String line = brMatrix.readLine();
				StringTokenizer tk = new StringTokenizer(line);
				
				int wordId = Integer.parseInt(tk.nextToken());
				int documentId = Integer.parseInt(tk.nextToken());
				int frequency = Math.round(Float.parseFloat(tk.nextToken()));
					
				if (documentIds.contains(documentId)){
					Integer oldFrequency = wordIdsTable.get(wordId);
					if(oldFrequency == null){
						wordIdsTable.put(wordId, frequency);
					}
					else{
						wordIdsTable.put(wordId, frequency + oldFrequency);
					}
				}
			}
		} catch (NumberFormatException e) {
			e.printStackTrace();
			System.exit(-1);
		} catch (IOException e) {
			e.printStackTrace();
			System.exit(-1);
		}
		
		try {
			brMatrix.close();
		} catch (IOException e) {
			e.printStackTrace();
			System.exit(-1);
		}
		
		return wordIdsTable;
	}

	private int getNumberOfWordsInDocumentCollection(List<Integer> documentIds) {
		int numberOfWords = 0;
		
		BufferedReader brMatrix = null;
		try {
			brMatrix = new BufferedReader(new FileReader(MATRIXFILE));
		} catch (FileNotFoundException e) {
			e.printStackTrace();
			System.exit(-1);
		}
		try {
			while(brMatrix.ready()){
				String line = brMatrix.readLine();
				StringTokenizer tk = new StringTokenizer(line);
				
				tk.nextToken();
				int documentId = Integer.parseInt(tk.nextToken());
				int frequency = Math.round(Float.parseFloat(tk.nextToken()));
					
				if (documentIds.contains(documentId)){
					numberOfWords += frequency;
				}
			}
		} catch (NumberFormatException e) {
			e.printStackTrace();
			System.exit(-1);
		} catch (IOException e) {
			e.printStackTrace();
			System.exit(-1);
		}
		
		try {
			brMatrix.close();
		} catch (IOException e) {
			e.printStackTrace();
			System.exit(-1);
		}
		
		return numberOfWords;
	}

	private Hashtable<String, Integer> getWordsFromWordIds(
			Hashtable<Integer, Integer> wordIdsTable) {
		Hashtable<String, Integer> wordsTable = new Hashtable<String, Integer>();
		
		Hashtable<Integer, String> fromWordIdsToWord = new Hashtable<Integer, String>();
		
		BufferedReader brTerms = null;
		try {
			brTerms = new BufferedReader(new FileReader(TERMSFILE));
		} catch (FileNotFoundException e) {
			e.printStackTrace();
			System.exit(-1);
		}
		try {
			int termsCount = 1;
			
			while(brTerms.ready()){
				String line = brTerms.readLine();
				StringTokenizer tk = new StringTokenizer(line);
				String word = tk.nextToken();
				fromWordIdsToWord.put(termsCount, word);
				termsCount++;
			}
				
		} catch (NumberFormatException e) {
			e.printStackTrace();
			System.exit(-1);
		} catch (IOException e) {
			e.printStackTrace();
			System.exit(-1);
		}
		
		for (int wordId : wordIdsTable.keySet()){
			int frequency = wordIdsTable.get(wordId);
			String word = fromWordIdsToWord.get(wordId);
			wordsTable.put(word, frequency);
		}
		
		try {
			brTerms.close();
		} catch (IOException e) {
			e.printStackTrace();
			System.exit(-1);
		}
		
		return wordsTable;
	}

	private List<Integer> getRelatedDocumentIds(int conceptId) {
		List<Integer> relatedDocumentIds = new ArrayList<Integer>();
		
		BufferedReader brClasses = null;
		try {
			brClasses = new BufferedReader(new FileReader(CLASSESFILE));
		} catch (FileNotFoundException e) {
			e.printStackTrace();
			System.exit(-1);
		}
		try {
			while(brClasses.ready()){
				String line = brClasses.readLine();
				StringTokenizer tk = new StringTokenizer(line);
				int documentId = Integer.parseInt(tk.nextToken());
				int cid = Integer.parseInt(tk.nextToken());
				if (cid == conceptId){
					relatedDocumentIds.add(documentId);
				}
			}
		} catch (NumberFormatException e) {
			e.printStackTrace();
			System.exit(-1);
		} catch (IOException e) {
			e.printStackTrace();
			System.exit(-1);
		}
		
		try {
			brClasses.close();
		} catch (IOException e) {
			e.printStackTrace();
			System.exit(-1);
		}
		
		return relatedDocumentIds;
	}

	private List<Integer> getUnrelatedDocumentIds(int conceptId) {
		List<Integer> unrelatedDocumentIds = new ArrayList<Integer>();
		
		BufferedReader brClasses = null;
		try {
			brClasses = new BufferedReader(new FileReader(CLASSESFILE));
		} catch (FileNotFoundException e) {
			e.printStackTrace();
			System.exit(-1);
		}
		try {
			while(brClasses.ready()){
				String line = brClasses.readLine();
				StringTokenizer tk = new StringTokenizer(line);
				int documentId = Integer.parseInt(tk.nextToken());
				int cid = Integer.parseInt(tk.nextToken());
				if (cid != conceptId){
					unrelatedDocumentIds.add(documentId);
				}
			}
		} catch (NumberFormatException e) {
			e.printStackTrace();
			System.exit(-1);
		} catch (IOException e) {
			e.printStackTrace();
			System.exit(-1);
		}
		
		try {
			brClasses.close();
		} catch (IOException e) {
			e.printStackTrace();
			System.exit(-1);
		}
		
		return unrelatedDocumentIds;
	}

}
