package ClassifierTraining;

import java.io.BufferedReader;
import java.io.File;
import java.io.FileNotFoundException;
import java.io.FileReader;
import java.io.FileWriter;
import java.io.IOException;
import java.io.PrintWriter;
import java.net.UnknownHostException;
import java.util.ArrayList;
import java.util.GregorianCalendar;
import java.util.Hashtable;
import java.util.List;
import java.util.StringTokenizer;
import java.util.TreeSet;

import thesis.BBCNewsDatasetClassifierFactory;
import thesis.FSModule;
import thesis.FilteringAlgorithm;
import thesis.InputTweetInfoExtractor;
import thesis.LocationManager;


import com.aliasi.classify.JointClassification;
import com.aliasi.classify.NaiveBayesClassifier;
import com.aliasi.tokenizer.PorterStemmerTokenizerFactory;
import com.aliasi.util.Pair;
import com.mongodb.BasicDBObject;
import com.mongodb.DB;
import com.mongodb.DBCollection;
import com.mongodb.DBCursor;
import com.mongodb.DBObject;
import com.mongodb.Mongo;


public class TweetsClassifiers {
	private static Mongo m;
	
	private static final String CLASSESFILE = "bbc_classes.classes";
	private static final String MATRIXFILE = "bbc_matrix.mtx";
	private static final String TERMSFILE = "bbc_terms.terms";
	
	private static final String INPUTDB = "Test";
	private static final String INPUTCOLL = "OneDayNews";
	private static final String CLASSIFIERDB = "Classifier";
	
	private static FilteringAlgorithm fAlg;
	
	//private static final double thresholdFrequency = 0.05;
	//private static final double thresholdRatio = 0.7;
	
	private static final String concept = "Business";
	private static final String DIRNAME = "C:\\Users\\hp\\Desktop\\articoli general news";
	
	public static void main(String args[]){
		connectToLocalMongoDB();
		
		FSModule.connectToLocalMongoDB();
		
		NaiveBayesClassifier classifier = getBBCNewsClassifier(concept);
		
		refineGroundKnowledge(classifier);
	}

	
	//ADD NEW CONCEPT TO BBC DATASET
	public static void addNewConceptToNewsDataset() {

		Hashtable<Pair<Integer, Integer>, Double> matrixUpdates = new Hashtable<Pair<Integer, Integer>, Double>();
		
		int lastDocumentId = getLastDocumentId();
		int lastConceptId = getLastConceptId();

		int conceptId = lastConceptId+1;
		
		File directory = new File(DIRNAME);
		LocationManager lMng = new LocationManager(null);
		FilteringAlgorithm fAlg = new FilteringAlgorithm(lMng);
		
		for (File file : directory.listFiles()){
			lastDocumentId++;
			int documentId = lastDocumentId;
			
			updateClassesFile(documentId, conceptId);
			
			Hashtable<String, Double> wordFrequencies = new Hashtable<String, Double>();
			
			BufferedReader br = null;
			try {
				br = new BufferedReader(new FileReader(file));
			} catch (FileNotFoundException e) {
				e.printStackTrace();
				System.exit(-1);
			}
			
			try {
				while (br.ready()){
					String line = br.readLine();
					line = line.toLowerCase();
					
					String preprocLine = fAlg.getPreprocessedText(line);
					StringTokenizer tk = new StringTokenizer(preprocLine);
					
					while (tk.hasMoreTokens()){
						String word = tk.nextToken();
						String stemmedWord = PorterStemmerTokenizerFactory.stem(word);
						
						Double wordFrequency = wordFrequencies.get(stemmedWord);
						if (wordFrequency == null){
							wordFrequencies.put(stemmedWord, 1.0);
						}
						else{
							wordFrequencies.put(stemmedWord, wordFrequency + 1.0);
						}
					}
				}
			} catch (IOException e) {
				e.printStackTrace();
				System.exit(-1);
			}
			
			for (String word : wordFrequencies.keySet()){
				double frequency = wordFrequencies.get(word);
				
				int wordId = getWordId(word);
				Pair<Integer, Integer> wordAndDocument = new Pair<Integer, Integer>(wordId, documentId);
				
				matrixUpdates.put(wordAndDocument, frequency);
			}
		}
		
		updateMatrixFile(matrixUpdates);
		
		reorderMatrixFile();
	}

	private static void reorderMatrixFile() {
		Hashtable<Pair<Integer, Integer>, Double> matrix = new Hashtable<Pair<Integer, Integer>, Double>();
		TreeSet<ComparablePair<Integer,Integer>> pairs = new TreeSet<ComparablePair<Integer, Integer>>();
		
		BufferedReader brMatrix = null;
		try {
			brMatrix = new BufferedReader(new FileReader(MATRIXFILE));
		} catch (FileNotFoundException e) {
			e.printStackTrace();
			System.exit(-1);
		}
		
		try{
			while(brMatrix.ready()){
				String line = brMatrix.readLine();
				StringTokenizer tk = new StringTokenizer(line);

				int wordId = Integer.parseInt(tk.nextToken());
				int documentId = Integer.parseInt(tk.nextToken());
				double frequency = Float.parseFloat(tk.nextToken());

				ComparablePair<Integer, Integer> wordAndDocument = new ComparablePair<Integer, Integer>(wordId, documentId);
				matrix.put(wordAndDocument, frequency);
				
				pairs.add(wordAndDocument);
			}
		} catch (NumberFormatException e) {
			e.printStackTrace();
			System.exit(-1);
		} catch (IOException e) {
			e.printStackTrace();
			System.exit(-1);
		}

		try {
			brMatrix.close();
		} catch (IOException e) {
			e.printStackTrace();
			System.exit(-1);
		}
		
		PrintWriter pw = null;
		try {
			pw = new PrintWriter(new FileWriter(MATRIXFILE));
		} catch (IOException e) {
			e.printStackTrace();
			System.exit(-1);
		}
		
		boolean firstWriting = true;
		for (Pair<Integer, Integer> wordAndDocument : pairs){
			int wordId = wordAndDocument.a();
			int documentId = wordAndDocument.b();
			
			double frequency = matrix.get(wordAndDocument);
			
			if(firstWriting){
				pw.print("" + wordId + " " + documentId + " " + frequency);
				firstWriting = false;
			}
			else{
				pw.print("\n" + wordId + " " + documentId + " " + frequency);
			}
		}

		pw.close();
	}

	private static void updateMatrixFile(Hashtable<Pair<Integer, Integer>, Double> matrixUpdates) {
		PrintWriter pw = null;
		try {
			pw = new PrintWriter(new FileWriter(MATRIXFILE, true));
		} catch (IOException e) {
			e.printStackTrace();
			System.exit(-1);
		}
		
		for (Pair<Integer, Integer> wordAndDocument : matrixUpdates.keySet()){
			int wordId = wordAndDocument.a();
			int documentId = wordAndDocument.b();
			double frequency = matrixUpdates.get(wordAndDocument);
			
			pw.print("\n" + wordId + " " + documentId + " " + frequency);
		}
		
		pw.close();
	}

	private static void updateClassesFile(int documentId, int conceptId) {
		PrintWriter pw = null;
		
		try {
			pw = new PrintWriter(new FileWriter(CLASSESFILE, true));
		} catch (IOException e) {
			e.printStackTrace();
			System.exit(-1);
		}
		
		pw.print("\n" + documentId + " " + conceptId);
		
		pw.close();
	}

	private static int getWordId(String w) {
		int termsCount = 1;
		
		BufferedReader brTerms = null;
		try {
			brTerms = new BufferedReader(new FileReader(TERMSFILE));
		} catch (FileNotFoundException e) {
			e.printStackTrace();
			System.exit(-1);
		}
		try {
			while(brTerms.ready()){
				String line = brTerms.readLine();
				StringTokenizer tk = new StringTokenizer(line);
				String word = tk.nextToken();
				if (word.compareTo(w) == 0){
					try {
						brTerms.close();
					} catch (IOException e) {
						e.printStackTrace();
						System.exit(-1);
					}
					
					return termsCount;
				}
				termsCount++;
			}
				
		} catch (NumberFormatException e) {
			e.printStackTrace();
			System.exit(-1);
		} catch (IOException e) {
			e.printStackTrace();
			System.exit(-1);
		}
		
		try {
			brTerms.close();
		} catch (IOException e) {
			e.printStackTrace();
			System.exit(-1);
		}
		
		PrintWriter pw = null;
		try {
			pw = new PrintWriter(new FileWriter(TERMSFILE, true));
		} catch (IOException e) {
			e.printStackTrace();
			System.exit(-1);
		}
		
		pw.print("\n" + w);
		
		pw.close();
		
		return termsCount;
	}

	private static int getLastConceptId() {
		int cid = 0;
		BufferedReader brClasses = null;
		try {
			brClasses = new BufferedReader(new FileReader(CLASSESFILE));
		} catch (FileNotFoundException e) {
			e.printStackTrace();
			System.exit(-1);
		}
		try {
			while(brClasses.ready()){
				String line = brClasses.readLine();
				StringTokenizer tk = new StringTokenizer(line);
				tk.nextToken();
				cid = Integer.parseInt(tk.nextToken());
			}
		} catch (NumberFormatException e) {
			e.printStackTrace();
			System.exit(-1);
		} catch (IOException e) {
			e.printStackTrace();
			System.exit(-1);
		}
		
		try {
			brClasses.close();
		} catch (IOException e) {
			e.printStackTrace();
			System.exit(-1);
		}
		
		return cid;
	}

	private static int getLastDocumentId() {
		int documentId = 0;
		BufferedReader brClasses = null;
		try {
			brClasses = new BufferedReader(new FileReader(CLASSESFILE));
		} catch (FileNotFoundException e) {
			e.printStackTrace();
			System.exit(-1);
		}
		try {
			while(brClasses.ready()){
				String line = brClasses.readLine();
				StringTokenizer tk = new StringTokenizer(line);
				documentId = Integer.parseInt(tk.nextToken());
				tk.nextToken();
			}
		} catch (NumberFormatException e) {
			e.printStackTrace();
			System.exit(-1);
		} catch (IOException e) {
			e.printStackTrace();
			System.exit(-1);
		}
		
		try {
			brClasses.close();
		} catch (IOException e) {
			e.printStackTrace();
			System.exit(-1);
		}
		
		return documentId;
	}


	//CLASSIFICATION ALGORITHM
	/*
	public static NaiveBayesClassifier getTrainedClassifier(String concept) {
		FilteringAlgorithm fAlg = new FilteringAlgorithm();

		DBCursor positiveSamples = readPositiveSamples(concept);
		DBCursor negativeSamples = readNegativeSamples(concept);

		int numberOfPositiveSamples = positiveSamples.size();
		int numberOfNegativeSamples = negativeSamples.size();

		Hashtable<String, Integer> positiveWordsTable = new Hashtable<String, Integer>();
		Hashtable<String, Integer> negativeWordsTable = new Hashtable<String, Integer>();

		while (positiveSamples.hasNext()){
			DBObject tweet = positiveSamples.next();

			String text = tweet.get("text").toString();

			String preprocessedText = fAlg.getPreprocessedText(text);

			StringTokenizer tk = new StringTokenizer(preprocessedText);

			while (tk.hasMoreTokens()){
				String word = tk.nextToken();

				Integer wordCount = positiveWordsTable.get(word);

				if(wordCount == null){
					positiveWordsTable.put(word, 1);
				}
				else{
					positiveWordsTable.put(word, wordCount+1);
				}
			}
		}

		while (negativeSamples.hasNext()){
			DBObject tweet = negativeSamples.next();

			String text = tweet.get("text").toString();

			String preprocessedText = fAlg.getPreprocessedText(text);

			StringTokenizer tk = new StringTokenizer(preprocessedText);

			while (tk.hasMoreTokens()){
				String word = tk.nextToken();

				Integer wordCount = negativeWordsTable.get(word);

				if(wordCount == null){
					negativeWordsTable.put(word, 1);
				}
				else{
					negativeWordsTable.put(word, wordCount+1);
				}
			}	
		}

		List<String> featureWords = new ArrayList<String>();

		for (String word : positiveWordsTable.keySet()){
			int positiveValue = positiveWordsTable.get(word);

			double frequency = (double)positiveValue/numberOfPositiveSamples;

			if (frequency >= thresholdFrequency){	
				Integer negativeValue = negativeWordsTable.get(word);

				if(negativeValue == null){
					System.out.println(word);
					featureWords.add(word);
				}
				else{
					double negativeFrequency = (double)negativeValue / numberOfNegativeSamples;

					double ratio = frequency / negativeFrequency;

					if (ratio >= thresholdRatio){
						featureWords.add(word);
					}
				}
			}
		}

		for (String word : negativeWordsTable.keySet()){
			int negativeValue = negativeWordsTable.get(word);

			double frequency = (double)negativeValue/numberOfNegativeSamples;

			if (frequency >= thresholdFrequency){	
				Integer positiveValue = positiveWordsTable.get(word);

				if(positiveValue == null){
					System.out.println(word);
					featureWords.add(word);
				}
				else{
					double positiveFrequency = (double)positiveValue / numberOfPositiveSamples;

					double ratio = frequency / positiveFrequency;

					if (ratio >= thresholdRatio){
						System.out.println(word);
						featureWords.add(word);
					}
				}
			}
		}

		System.out.println(featureWords.size());

		String categories[] = new String[2];
		categories[0] = "negative";
		categories[1] = "positive";

		TokenizerFactory tf = new WordsTokenizerFactory(featureWords);

		NaiveBayesClassifier classifier = new NaiveBayesClassifier(categories, tf); 

		positiveSamples = readPositiveSamples(concept);
		negativeSamples = readNegativeSamples(concept);

		while (positiveSamples.hasNext()){
			DBObject tweet = positiveSamples.next();

			String text = tweet.get("text").toString();

			String preprocessedText = fAlg.getPreprocessedText(text);

			classifier.train("positive", preprocessedText, 1);
		}

		while (negativeSamples.hasNext()){
			DBObject tweet = negativeSamples.next();

			String text = tweet.get("text").toString();

			String preprocessedText = fAlg.getPreprocessedText(text);

			classifier.train("negative", preprocessedText, 1);
		}

		return classifier;
	}*/

	// GET INITIAL GROUND KNOWLEDGE
	/*
	public static void getInitialGroundKnowledge() {
		GregorianCalendar beginData = new GregorianCalendar(2012, 9, 30, 20, 00);
		long beginTimestamp = beginData.getTimeInMillis();
		int numberOfTweets = 50000;
		int numberOfTtbc = 300;
		
		List<String> keywords = getKeywords(concept);
		
		DBCursor inputTweets = readIngestedData(beginTimestamp, numberOfTweets);
		
		List<DBObject> relevantTweets = new ArrayList<DBObject>();
		FilteringAlgorithm fAlg = new FilteringAlgorithm();
		
		here : while (inputTweets.hasNext()){
			DBObject tweet = inputTweets.next();
			InputTweetInfoExtractor infoExtr = new InputTweetInfoExtractor(tweet);
			
			if (fAlg.mustBeFiltered(tweet)) {
				continue;
			}
			String preprocessedText = fAlg.getPreprocessedText(infoExtr.getText());
			
			for (String keyword : keywords){
				if (preprocessedText.contains(" " + keyword + " ")){

					relevantTweets.add(tweet);
					
					if(relevantTweets.size() == numberOfTtbc){
						break here;
					}
				}
			}
		}
		
		List<DBObject> positiveSamples = new ArrayList<DBObject>();
		List<DBObject> negativeSamples = new ArrayList<DBObject>();
		
		while (!relevantTweets.isEmpty()){
			int nftSize = relevantTweets.size();
			List<DBObject> tweetsToBeClassified = null;
			
			if(nftSize < 10){
				tweetsToBeClassified = relevantTweets.subList(0, nftSize);
			}
			else{
				tweetsToBeClassified = relevantTweets.subList(0, 10);
			}
			
			ClassifierDialog f = new ClassifierDialog(concept, tweetsToBeClassified);
			
			f.setVisible(true);
			
			positiveSamples.addAll(f.getPositiveSamples());
			negativeSamples.addAll(f.getNegativeSamples());
			
			if(nftSize < 10){
				for (int i=0; i<nftSize; i++){
					relevantTweets.remove(0);
				}
			}
			else {
				for (int i=0; i<10; i++){
					relevantTweets.remove(0);
				}
			}
		}
		
		writePositiveSamples(concept, positiveSamples);
		writeNegativeSamples(concept, negativeSamples);
	}*/
	
	
	//REFINE GROUND KNOWLEDGE
	private static NaiveBayesClassifier getBBCNewsClassifier(String conc) {
		LocationManager lMng = new LocationManager(null);
		fAlg = new FilteringAlgorithm(lMng);
		
		BBCNewsDatasetClassifierFactory factory = new BBCNewsDatasetClassifierFactory(fAlg);
		
		NaiveBayesClassifier classifier = factory.getTrainedClassifier(conc);
		
		return classifier;
	}
	
	public static void refineGroundKnowledge(NaiveBayesClassifier classifier){
		GregorianCalendar beginData = new GregorianCalendar(2013, 2, 2, 14, 10);
		long beginTimestamp = beginData.getTimeInMillis();
		int numberOfTweets = 15000;
		int numberOfTtbc = 100;
		
		DBCursor inputTweets = readIngestedData(beginTimestamp, numberOfTweets);
		
		System.out.println(inputTweets.size());
		
		List<DBObject> bestTweetsAccordingToClassifier = new ArrayList<DBObject>();
		int counter = 0;
		
		while (inputTweets.hasNext()){
			DBObject tweet = inputTweets.next();
			InputTweetInfoExtractor infoExtr = new InputTweetInfoExtractor(tweet);
			
			if (fAlg.mustBeFiltered(tweet)) {
				continue;
			}
			String text = infoExtr.getText();
	    	
	    	String preprocessedText = fAlg.getPreprocessedText(text);
	    	
	    	JointClassification classification = classifier.classify(preprocessedText);
	    	
	    	Double positiveProbability = classification.conditionalProbability("positive");
	    	
	    	if(positiveProbability > 0.8 && counter < numberOfTtbc){
	    		counter++;
	    		bestTweetsAccordingToClassifier.add(tweet);
	    	}
	    	
	    }
		
		List<DBObject> positiveSamples = new ArrayList<DBObject>();
		List<DBObject> negativeSamples = new ArrayList<DBObject>();
		
		while (!bestTweetsAccordingToClassifier.isEmpty()){
			int nftSize = bestTweetsAccordingToClassifier.size();
			List<DBObject> tweetsToBeClassified = null;
			
			if(nftSize < 10){
				tweetsToBeClassified = bestTweetsAccordingToClassifier.subList(0, nftSize);
			}
			else{
				tweetsToBeClassified = bestTweetsAccordingToClassifier.subList(0, 10);
			}
			
			ClassifierDialog f = new ClassifierDialog(concept, tweetsToBeClassified);
			
			f.setVisible(true);
			
			positiveSamples.addAll(f.getPositiveSamples());
			negativeSamples.addAll(f.getNegativeSamples());
			
			if(nftSize < 10){
				for (int i=0; i<nftSize; i++){
					bestTweetsAccordingToClassifier.remove(0);
				}
			}
			else {
				for (int i=0; i<10; i++){
					bestTweetsAccordingToClassifier.remove(0);
				}
			}
		}
		
		writePositiveSamples(concept, positiveSamples);
		writeNegativeSamples(concept, negativeSamples);
	}

	
	//OTHER FUNCTIONS
	private static DBCursor readIngestedData(long begin_timestamp, int numberOfTweets){
		DB db = m.getDB(INPUTDB);
		
		DBCollection c = db.getCollection(INPUTCOLL);	
		
		DBObject query = new BasicDBObject();
		
		query.put("header.tweet_time", new BasicDBObject("$gte", "" + begin_timestamp));
		
		DBCursor cursor = c.find(query).limit(numberOfTweets);
		
		return cursor;
	}

	private static void writePositiveSamples(String concept, List<DBObject> positiveSamples){
		DB db = m.getDB(CLASSIFIERDB);
		
		DBCollection c = db.getCollection(concept + "_positive");
		
		for (DBObject tweet : positiveSamples){
			InputTweetInfoExtractor infoExtr = new InputTweetInfoExtractor(tweet);
			
			DBObject storedTweet = new BasicDBObject();
			
			storedTweet.put("tweet_id", infoExtr.getId());
			storedTweet.put("text", infoExtr.getText());
			
			c.insert(storedTweet);
			
			try {
				Thread.sleep(1000);
			} catch (InterruptedException e) {
				e.printStackTrace();
				System.exit(-1);
			}
		}
	}
	
	private static void writeNegativeSamples(String concept, List<DBObject> negativeSamples){
		DB db = m.getDB(CLASSIFIERDB);
		
		DBCollection c = db.getCollection(concept + "_negative");
		
		for (DBObject tweet : negativeSamples){
			InputTweetInfoExtractor infoExtr = new InputTweetInfoExtractor(tweet);
			
			DBObject storedTweet = new BasicDBObject();
			
			storedTweet.put("tweet_id", infoExtr.getId());
			storedTweet.put("text", infoExtr.getText());
			
			c.insert(storedTweet);
			
			try {
				Thread.sleep(1000);
			} catch (InterruptedException e) {
				e.printStackTrace();
				System.exit(-1);
			}
		}
	}

	public static void connectToLocalMongoDB() {
		try {
			m = new Mongo();
		} catch (UnknownHostException e) {
			e.printStackTrace();
			System.exit(-1);
		}
	}
}
