package edu.uta.cse6339.facetedinterface.classifier.src.facetedExplorationMain;

import java.io.BufferedReader;
import java.io.BufferedWriter;
import java.io.File;
import java.io.FileReader;
import java.io.FileWriter;
import java.io.IOException;
import java.io.Writer;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.NavigableSet;
import java.util.Set;
import java.util.TreeMap;
import java.util.regex.Pattern;

public class ProcessNewsArticles
{
	int numberOfFiles = 0;
	int impScore = 30;
	int headScore = 20;
	int midScore = 5;
	int tailScore = 10;
	boolean isTraining = true;
	static Set<String> allTokensInFeature = new HashSet<String>();
	static HashMap<String, HashSet<Integer>> hateCrimeDocTokens = new HashMap<String, HashSet<Integer>>();
	static HashMap<String, HashSet<Integer>> rowKeyDocTokens = new HashMap<String, HashSet<Integer>>();
	static HashMap<String, HashSet<Integer>> univKeyDocTokens = new HashMap<String, HashSet<Integer>>();
	
	// outputfile paths..
	String rowOutFile = "/home/lakshmanas/workspace/cse6339-faceted-interface/data/output/rowOutFile.txt";
	String hateCrimeOutFile = "/home/lakshmanas/workspace/cse6339-faceted-interface/data/output/hateCrimeOutFile.txt";
	String univOutFile = "/home/lakshmanas/workspace/cse6339-faceted-interface/data/output/universityOutFile.txt";
	String labelOutFile = "/home/lakshmanas/workspace/cse6339-faceted-interface/data/output/labelOutFile.txt";
	
	public dataFeature processArticles(String folderPath, boolean isTraining)
	{
		this.isTraining = isTraining;
		// This will contain the global count of all the words, where the count is the number of documents a word appears in...
		HashMap<String, Integer> globalWords = new HashMap<String, Integer>();
		dataFeature df = new dataFeature();
		// DS to hold all documents..
		// (key = docId, value = (key = token, value = list of positions token occurs in))
		HashMap<Integer, docDetails> allDocs = new HashMap<Integer, docDetails>();
		processArticles(folderPath, globalWords, df.docIdMapper, df.invertedDocIdMapper, allDocs);
		// We now have articles in-memory.. create the feature vector for each article now...
		df.featureVector = getFeatureVectors(globalWords, allDocs);
		allDocs = null;
		globalWords = null;
		return df;
	}
	
	private double getTokenScore(ArrayList<Integer> positions, HashMap<String, Integer> globalWords, int totalWords, String token)
	{
		int tf = positions.size();
		int df = globalWords.get(token);
		double imp = 0;
		// compute the importance...
		for(int pos : positions)
		{
			if(pos == -1)
			{
				imp += impScore;
			}
			double articlePos = (totalWords - pos)/3;
			if(articlePos < 1/3)
			{
				// belongs to the beginning of the article..
				imp += headScore;
			}
			else if(articlePos > 2/3)
			{
				// belongs to the end of the article..
				imp += tailScore;
			}
			else
			{
				// belongs to the middle of the article..
				imp += midScore;
			}
		}
		// average the importance of a token over all the places it appears in a article...
		imp = imp/positions.size();
		double score = tf*df*imp;
		return(score);
	}
	
	private HashMap<Integer, HashMap<String, Double>> getFeatureVectors(HashMap<String, Integer> globalWords,
			HashMap<Integer, docDetails> allDocs)
	{
		HashMap<Integer, HashMap<String, Double>> featureVector = new HashMap<Integer, HashMap<String, Double>>();
		for(int i=1; i<numberOfFiles; i++)
		{
			//System.out.println("File: " + i);
			TreeMap<Double, ArrayList<String>> sortedTokens = new TreeMap<Double, ArrayList<String>>();
			HashMap<String, Double> feature = new HashMap<String, Double>();
			docDetails docd = allDocs.get(i);
			Iterator<String> iter = docd.doc.keySet().iterator();
			int totalWords = docd.totalWords;
			while(iter.hasNext())
			{
				String token = iter.next();
				ArrayList<Integer> positions = docd.doc.get(token);
				double weight = getTokenScore(positions, globalWords, totalWords, token);
				if(sortedTokens.containsKey(weight))
				{
					ArrayList<String> tokens = sortedTokens.get(weight);
					tokens.add(token);
				}
				else
				{
					ArrayList<String> tokens = new ArrayList<String>();
					tokens.add(token);
					sortedTokens.put(weight, tokens);
				}
			}
			NavigableSet<Double> ns = sortedTokens.descendingKeySet();
			Iterator<Double> iter1 = ns.iterator();
			int cnt = 0;
			while((iter1.hasNext()) && (cnt < ClassifierHelper.numberOfDimensions))
			{
				double wt = iter1.next();
				ArrayList<String> words = sortedTokens.get(wt);
				for(String word : words)
				{
					/*if(word.equals("Miss") || word.equals("Class") || word.equals("Hit"))
						continue;*/
					cnt++;
					if(cnt == ClassifierHelper.numberOfDimensions)
						break;
					feature.put(word, wt);
					if(isTraining)
					{
						// add this word to a set of all those tokens which are going to form the set of attributes..
						allTokensInFeature.add(word);
					}
				}
			}
			featureVector.put(i, feature);
		}
		return featureVector;
	}
	
	private void processArticles(String folderPath, HashMap<String, Integer> globalWords,
			HashMap<Integer, String> DocIdMapper,
			HashMap<String, Integer> invertedDocIdMapper,
			HashMap<Integer, docDetails> AllDocs)
	{
		int numf = 0;
		int fileId = 1;
		File folder = new File(folderPath);
		File[] files = folder.listFiles();
		Pattern p = Pattern.compile("[\\s]+|[\\t]+|[-]+|[\\.]+|[,]");
		for(int i=0; i<files.length; i++)
		{
			if(files[i].isFile())
			{
				numf++;
				String filePath = files[i]+"";
				try
				{
					BufferedReader br = new BufferedReader(new FileReader(filePath));
					try
					{
						String line = br.readLine();
						if(line == null)
						{
							System.out.println("Error in the first line of this file.. ignoring " + filePath);
							continue;
						}
						docDetails docd = new docDetails();
						int now = 0;
						line = line.trim();
						DocIdMapper.put(fileId, line);
						invertedDocIdMapper.put(line, fileId);
						while((line = br.readLine()) != null)
						{
							String[] tokens = p.split(line);
							for(String token : tokens)
							{
								token=token.trim().toLowerCase();
								if(FacetedExplorationMain.stopWords.contains(token))
									continue;
								now++;
								if(docd.doc.containsKey(token))
								{
									ArrayList<Integer> tokenPos = docd.doc.get(token);
									int pos = -1;
									if(!FacetedExplorationMain.colKeyWords.contains(token))
									{
										pos = now;
									}
									tokenPos.add(pos);
									now++;
								}
								else
								{
									ArrayList<Integer> tokenPos = new ArrayList<Integer>();
									int pos = -1;
									if(!FacetedExplorationMain.colKeyWords.contains(token))
									{
										pos = now;
									}
									tokenPos.add(pos);
									docd.doc.put(token, tokenPos);
								}

								if(!isTraining)
								{
									// we have to populate hate crime and row related info only for test data. the classifier need not know
									// this info...
									if(FacetedExplorationMain.hateCrimeWords.contains(token))
									{
										populateKeyWordList(hateCrimeDocTokens, fileId, token);
									}
									// populate the university related keywords...
									if(FacetedExplorationMain.universityKeyWords.contains(token))
									{
										populateKeyWordList(univKeyDocTokens, fileId, token);
									}
									
									if(FacetedExplorationMain.rowKeyWords.contains(token))
									{
										populateKeyWordList(rowKeyDocTokens, fileId, token);
									}
									else if(FacetedExplorationMain.stateAbbrKeywords.containsKey(token))
									{
										String abbr = FacetedExplorationMain.stateAbbrKeywords.get(token);
										if(rowKeyDocTokens.containsKey(abbr))
										{
											HashSet<Integer> docList = rowKeyDocTokens.get(abbr);
											docList.add(fileId);
										}
										else
										{
											HashSet<Integer> docList = new HashSet<Integer>();
											docList.add(fileId);
											rowKeyDocTokens.put(abbr, docList);
										}
									}
								}
							}
						}
						docd.totalWords = now;
						AllDocs.put(fileId, docd);
						// update the global token list...
						Iterator<String> iter = docd.doc.keySet().iterator();
						while(iter.hasNext())
						{
							String word = iter.next();
							int cnt = 1;
							if(globalWords.containsKey(word))
							{
								cnt = globalWords.get(word);
								cnt++;
							}
							globalWords.put(word,cnt);
						}
					}
					finally
					{
						br.close();
					}
				}
				catch(IOException ioe)
				{
					ioe.printStackTrace();
				}
				fileId++;
			}
		}
		System.out.println("Number of files = " + numf);
		numberOfFiles = fileId;
	}

	private void populateKeyWordList(HashMap<String, HashSet<Integer>> hm, int fileId, String token)
	{
		if(hm.containsKey(token))
		{
			HashSet<Integer> docList = hm.get(token);
			docList.add(fileId);
		}
		else
		{
			HashSet<Integer> docList = new HashSet<Integer>();
			docList.add(fileId);
			hm.put(token, docList);
		}
	}
	
	public void printLabelOutputFile(HashMap<Integer, String> docIdMapper, HashMap<Integer, ArrayList<Integer>> labelDocsInvertedIndex)
	{
		File outFile1 = new File(labelOutFile);
		Writer out1 = null;
		try
		{
			out1 = new BufferedWriter(new FileWriter(outFile1));
			Iterator<Integer> labeliter = labelDocsInvertedIndex.keySet().iterator();
			while(labeliter.hasNext())
			{
				int label = labeliter.next();
				ArrayList<Integer> docIds = labelDocsInvertedIndex.get(label);
				System.out.println("Number of docs in type " + label + " = " + docIds.size());
				String line = ClassifierHelper.typeIdClassMapper.get(label) + ";";
				if(!docIds.isEmpty())
				{
					line += docIdMapper.get(docIds.get(0));
				}
				for(int i=1; i<docIds.size(); i++)
				{
					line += "," + docIdMapper.get(docIds.get(i));
				}
				out1.write(line + "\n");
			}
			out1.close();
		}
		catch(Exception e)
		{
			e.printStackTrace();
		}
	}
	
	public void printKeyWordOutputToFile(HashMap<Integer, String> docIdMapper)
	{
		printHMtoFile(rowKeyDocTokens, rowOutFile, docIdMapper);
		printHMtoFile(hateCrimeDocTokens, hateCrimeOutFile, docIdMapper);
		printHMtoFile(univKeyDocTokens, univOutFile, docIdMapper);
	}
	
	private void printHMtoFile(HashMap<String, HashSet<Integer>> hm, String filepath, HashMap<Integer, String> docIdMapper)
	{
		File outFile1 = new File(filepath);
		Writer out1 = null;
		try
		{
			out1 = new BufferedWriter(new FileWriter(outFile1));
			Iterator<String> rowIter = hm.keySet().iterator();
			while(rowIter.hasNext())
			{
				String token = rowIter.next();
				String line = token + ";";
				HashSet<Integer> keywords = hm.get(token);
				Iterator<Integer> iter = keywords.iterator();
				if(iter.hasNext())
				{
					line = line + docIdMapper.get(iter.next());
				}
				while(iter.hasNext())
				{
					line = line + "," + docIdMapper.get(iter.next());
				}
				out1.write(line + "\n");
			}
			out1.close();
		}
		catch(Exception e)
		{
			e.printStackTrace();
		}
	}
}

class docDetails
{
	HashMap<String, ArrayList<Integer>> doc = new HashMap<String, ArrayList<Integer>>();
	int totalWords = 1;
}

class dataFeature
{
	// this contains the feature vector. a list of words from the article..
	HashMap<Integer, HashMap<String, Double>> featureVector = new HashMap<Integer, HashMap<String, Double>>();
	// This will contain the id associated with a given doc.. The doc is represented by the link read from the first line, id is given here.
	HashMap<Integer, String> docIdMapper = new HashMap<Integer, String>();
	// This will contain the inverted index of the link and id..
	HashMap<String, Integer> invertedDocIdMapper = new HashMap<String, Integer>();
}
