package nlppatents.apps;

import java.io.BufferedWriter;
import java.io.FileWriter;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashMap;

//import java.util.Iterator;
//import java.util.TreeMap;
//import java.util.TreeSet;


import nlppatents.util.DictWord;
import nlppatents.util.POS;
import nlppatents.util.POSWord;
import nlppatents.util.Patent;
import nlppatents.util.PatentTxtDirectoryScanner;
import nlppatents.util.Phrase;
import nlppatents.util.Sentence;
import nlppatents.util.WordCategory;

/**
 * In this module, we generate the seed-categories used to classify patents. Seed-categories
 * are 'initial' categories generated after the first run of a very generic version of the 
 * Semlex algorithm.
 * 
 * Algorithm Explanation:
 *   In order to classify patents based on noun content, we need a set of noun-groups to 
 *   act as the dimensions that form the 'patent-space' where we will classify each
 *   patent.  As group seed-words in the original assignment algorithm (see project page) were 
 *   supposed to be generated manually (by hand), I needed to invent a method that would allow
 *   me to automatically generate an arbitrary number of new seed-word groups.  
 *   
 *   My solution is to use an initial run of the Semlex algorithm that treats every
 *   noun as its own category.  As the run progresses through the corpus, all colocations
 *   are noted by associating two nouns that are colocated.  This is done by keeping
 *   a 'colocation list' for every noun that can be colocated with another noun, and adding
 *   the second noun to the first nouns colocation list, and vice versa.  For example, 
 *   if we were to encounter the following the the corpus:
 *   
 *     NP: a black dog
 *     CC: and
 *     NP: a red cat
 *   
 *   'cat' would be added to dog's colocation list, and 'dog' would be added to cat's
 *   colocation list.  After the first run is completed, all of the colocation lists are
 *   sorted by number of elements (so 'dog' might have 20 associated nouns, and thus would
 *   have a sorting value of 20), and the top N largest noun lists are the initial seed
 *   categories.
 *   
 *   NOTE:  I may want to use coupling values (I.E. sum(collocation_values)) for noun lists instead
 *   of number of colocated words to determine group sorting values.
 *   NOTE2:  This would be harder as I would have to keep track of colocation values for each word
 *   
 *   I have chosen log2(number_of_patents) for N (the number of seed categories) to keep the
 *   number of dimensions manageable yet large enough (I believe) to adequately classify the corpus.
 *   
 * Algorithm Deficiencies:
 * 
 *   As I am using Semlex to make categories for Semlex, there are two main problems that I can
 *   forsee:  1) The initial word lists may have heavy overlap, and 2) the initial word lists
 *   may have a high noise-to-signal ratios.  Later revisions of this system will likely include
 *   a better initial category picker.
 *   
 * @author bresee
 *
 */
public class GenerateCategories 
{
	public static class DictWordWrapper implements Comparable<DictWordWrapper>
	{
		DictWord word;
		// arraylist=less memory, treeset=faster for big sets;  I opt for speed
		//TreeSet<DictWord> colocated;
		ArrayList<DictWord> colocated;
		float score; // cached score for final sorting
		
		public DictWordWrapper(DictWord word)
		{
			this.word = word;
			//this.colocated = new TreeSet<DictWord>();
			this.colocated = new ArrayList<DictWord>();
			this.score = -1;
		}
		
		/**
		 * After a few hours of experimentation, I believe I've found a formula that 
		 * is *pretty* good:
		 * For 10k patents, we want the following values:
		 * 	- ~325 colocated words
		 *  - ~750 coloc-value
		 *  - ~10,000 (n) frequency
		 *  
		 *  colocated words grows *very* slowly, so we can hold this constant for now
		 *  coloc-value is slowly but more linearly than #words, so we will choose n/12
		 *  frequency:  we will choose n for best frequency
		 *  
		 *  So, value function should be inverse of mean-squared distance (smaller-dist=better):
		 *    1 / ((325-colocated.size())**2 + (n/12 - colocValue)**2 + (n-frequency)**2)
		 *    
		 */
		/*public void calcScore()
		{
			final float COLOCSIZE = 325.0f;
			float N = GenerateCategories.numPatents;
			float CVAL = N/12.0f;
			
			float mySize = colocated.size();
			float myColoc = word.coloc;
			float myFreq = word.freq;
			
			mySize -= COLOCSIZE;
			mySize *= mySize;
			
			myColoc -= CVAL;
			myColoc *= myColoc;
			
			myFreq -= N;
			myFreq *= myFreq;
			
			this.score = 1 / (mySize + myColoc + myFreq);
		}*/
		
		
		@Override
		public int compareTo(DictWordWrapper w)
		{	
			/*if(this.score < 0.0f)
				this.calcScore();
			if(w.score < 0.0f)
				w.calcScore();
			
			if(score < w.score)
				return -1;
			else if(score > w.score)
				return 1;
			*/
			
			//default: string comparison at bottom
			
			
			int mySize = this.colocated.size();
			int size = w.colocated.size();
			// number of colocated words first (bigger=better)
			if(mySize > size)
				return -1;
			else if(mySize < size)
				return 1;
			
			// colocation value second (bigger=better)
			if(word.coloc - w.word.coloc != 0)
				return word.coloc - w.word.coloc;
			
			// size-wise && coloc-wise these guys are equal, so go based on something arbitrary 
			// but consistent: string comparison
			return word.word.compareTo(w.word.word);
		}
	}
	
	// shooting for log_2(num_patents) categories to keep things managable (~14 cats for 10k seems adequate)
	public static HashMap<DictWord, DictWordWrapper> colocWords;
	
	public static final int MINRANGE = 7000000;
	public static final int MAXRANGE = 7009999; //7009999;
	public static final int CAPACITY = 150000; // 100k should be fine for nouns (I hope :S)
	
	public static int numPatents;
	
	public static void main(String args[])
	{
		long stime = System.currentTimeMillis();
		
		// TODO:  Read minRange and maxRange from the command line
		
		numPatents = MAXRANGE-MINRANGE+1;  // used later for common-noun pruning
		colocWords = new HashMap<DictWord, DictWordWrapper>(CAPACITY);
		
		// init relevant classes
		POS.init();
		DictWord.init();
		POSWord.init();
		
		PatentTxtDirectoryScanner scanner = new PatentTxtDirectoryScanner(MINRANGE, MAXRANGE);
		scanner.incFrequency = true; // frequencies are important for pruning out common words like 'it'
		
		Patent p;
		while((p = scanner.nextPatent()) != null)
		{
			processPatent(p);
		}
		
		// sort our DictWord Wrappers
		//Object[] words = (colocWords.values().toArray(a));
		//DictWordWrapper[] wrappedWords = (DictWordWrapper[])words;
		DictWordWrapper[] words = (DictWordWrapper[]) (colocWords.values().toArray(new DictWordWrapper[0]));
		
		Arrays.sort(words);
		
		// TODO: report and we're done!
		System.out.println("Number of words in POSWord dictionary: " + POSWord.words.size());
		System.out.println("Number of words in DictWord dictionary: " + DictWord.dictionary.size());
		System.out.println("Number of DictWordWrappers in final collection: " + words.length);
		/*System.out.println("Top 50 words and their colocated words:");
		for(int i = 0; i < 100; i++)
		{
			DictWordWrapper dw = words[i];
			System.out.println("  " + i + ") word: \"" + dw.word.word + "\"");
			System.out.print("    colocated-words=" + dw.colocated.size() + ",  coloc-value=" + dw.word.coloc);
			System.out.print(",  freq=" + dw.word.freq); // + ",  patentCount=" + dw.word.patentCount);
			System.out.print('\n');
			
			Iterator<DictWord> itr = dw.colocated.iterator();
			System.out.print("    ");
			while(itr.hasNext())
			{
				System.out.print(itr.next().word);
				System.out.print(" ");
			}
			System.out.print("\n");
			
		}*/
		
		/*
		 *  EVERYTHING BELOW IS SLIGHTLY ARBITRARY:  Until I find
		 *  a better method of generating initial categories, this
		 *  will have to do
		 */
		
		ArrayList<WordCategory> categories = new ArrayList<WordCategory>();
		WordCategory W = null;
		
		System.out.println("Chose words (colocWords < 400)");
		int count = 0;
		int count2;
		for(int i = 0; (i < words.length) && (count < 30); i++)
		{
			if(words[i].colocated.size() < 400)
			{
				W = new WordCategory();
				categories.add(W);
				
				System.out.print("" + (count+1) + ": " + words[i].word.word);
				W.members.add(words[i].word);
				
				count++;
				
				count2 = 0;
				//now grab the first 4 that are colocated with this word that are also < 400
				for(int j = 0; (j < words[i].colocated.size()) && (count2 < 6); j++)
				{
					DictWord d = words[i].colocated.get(j);
					DictWordWrapper dw = colocWords.get(d);
					if((dw.colocated.size() < 400) && (dw.colocated.size() > 300))
					{
						System.out.print(", " + dw.word.word);
						W.members.add(dw.word);
						count2++;
					}
				}
				System.out.print("\n");
			}
		}
		saveCategories(categories);
		System.out.println("FINAL TIME: " + (System.currentTimeMillis() - stime));
	}
	
	public static void saveCategories(ArrayList<WordCategory> categories)
	{
		BufferedWriter writer;
		try 
		{
			writer = new BufferedWriter(new FileWriter("../../seeds.txt"));
			for(int i = 0; i < categories.size(); i++)
			{
				writer.write("category:\n");
				WordCategory W = categories.get(i);
				for(int j = 0; j < W.members.size(); j++)
				{
					writer.write(W.members.get(j).word);
					writer.write('\n');
				}
			}
			
			writer.close();
		} catch (IOException e) {
			// TODO Auto-generated catch block
			e.printStackTrace();
			return;
		} 
	}
	
	public static void processPatent(Patent patent)
	{
		// do title
		processSentence(patent.title);
		
		int size = 0;
		
		// do abstract
		size = patent.pAbstract.size();
		for(int i = 0; i < size; i++)
		{
			processSentence(patent.pAbstract.get(i));
		}
		
		// do claims
		size = patent.pClaims.size();
		for(int i = 0; i < size; i++)
		{
			processSentence(patent.pClaims.get(i));
		}
		
		// do description
		size = patent.pDescription.size();
		for(int i = 0; i < size; i++)
		{
			processSentence(patent.pDescription.get(i));
		}
	}
	
	public static void processSentence(Sentence sentence)
	{
		int size = sentence.sentence.size();
		
		for(int i = 0; i < size; i++)
		{
			Phrase p = sentence.sentence.get(i);
			// see if this is a phrase is full-o nouns
			if((p.type == POS.noun) || (p.type == POS.prep))
			{
				// it is!  see if it has any colocated friends
				DictWord col = colocForward(sentence, i);
				if(col != null) // friends found!  add DictWords to each others lists
				{
					// get the head noun of the colocated phrase
					DictWord head = p.words.get(p.words.size()-1).dictWord;
					if(col != head)
					{
						// ensure that both words are in each others colocated sets
						DictWordWrapper headWrap = get(head);
						DictWordWrapper colWrap = get(col);
						
						if(!headWrap.colocated.contains(col))
						{
							headWrap.colocated.add(col);
						}
						if(!colWrap.colocated.contains(head))
						{
							colWrap.colocated.add(head);
						}
						
						// increase collocation values
						head.coloc++;
						col.coloc++;
					}
				}
			}
		}
	}
	
	/**
	 * Determines whether phrase sentence.sentence.get(index) is colocated with a following
	 * phrase.
	 * 
	 * @param sentence
	 * @param index
	 * @return
	 */
	public static DictWord colocForward(Sentence sentence, int index)
	{
		// pointless to check if we are 2nd from last (or greater) term because there
		// will be no room for a joining term (and, or, comma)
		if(index >= sentence.size() - 2)
			return null;
		
		index++;
		Phrase nextP = sentence.get(index);
		boolean found = false;
		
		/* yes, I realize if we had a crazy weird sentence like:
		 * 	 "NP , and , or , and , , and or NP" 
		 * this would be valid.  I will fix it later
		 * 
		 * TODO:  Make this more valid
		 */
		while((nextP.type == POS.cc) || (nextP.type == POS.comma))
		{
			found = true;
			index++;
			if(!(index < sentence.size()))
				return null;
			nextP = sentence.get(index);
		}
		
		if(!found || ((nextP.type != POS.noun) && (nextP.type != POS.prep)))
			return null;
		
		return nextP.words.get(nextP.words.size() - 1).dictWord;
	}
	
	public static DictWordWrapper get(DictWord word)
	{
		DictWordWrapper wrapper = colocWords.get(word);
		if(wrapper == null)
		{
			wrapper = new DictWordWrapper(word);
			colocWords.put(word, wrapper);
		}
		
		return wrapper;
	}
}
