package edu.unc.ils.memai.extract;



import java.io.File;


import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.StringTokenizer;
import java.util.TreeMap;

import maui.stemmers.PorterStemmer;
import maui.stemmers.Stemmer;
import maui.util.Candidate;

import org.apache.commons.io.FileUtils;

import edu.unc.ils.memai.tokenize.MauiTokenizer;
import edu.unc.ils.memai.tokenize.Tokenizer;
import edu.unc.ils.memai.vocab.MauiVocabulary;
import edu.unc.ils.memai.vocab.Vocabulary;


public class LongestMatchCandidateExtractor extends CandidateExtractor
{
   public static void main(String[] args) throws Exception {
        
        Vocabulary vocab = new MauiVocabulary();
        
        @SuppressWarnings("unchecked")
        List<String> stopwords = FileUtils.readLines(new File("src/main/resources/stopwords_en.txt"), "UTF-8");
        
        Stemmer stemmer = new PorterStemmer();
        vocab.setStemmer(stemmer);
        vocab.setStopwords(stopwords);
        vocab.setThreshold(10);
        vocab.setLowerCase(true);
        vocab.load("/Users/cwillis/dev/hive/hive-data/nalt/naltStoreEn");
        
        Tokenizer tok = new MauiTokenizer();
        
        CandidateExtractor extractor = new LongestMatchCandidateExtractor();
        extractor.setMaxPhraseLength(5);
        extractor.setMinPhraseLength(1);
        extractor.setMinOccurFrequency(1);
        extractor.setVocabulary(vocab);
        extractor.setStopwords(stopwords);
        
        extractor.setStemmer(stemmer);
        
        String text = "Cotton (Gossypium hirsutum L.) fibers are trichomes that initiate from the ovule epidermis.";
        List<String> segments = tok.tokenize(text, false);
        Map<String, Candidate> candidates = extractor.getCandidates(segments);
        for (Candidate candidate: candidates.values())
        {
            System.out.println(candidate.getTitle() + "|" + candidate.getTermFrequency());
        }
        
   }
    @Override
    public Map<String, Candidate> getCandidates(List<String> text)
            throws Exception {

        // Map of matched phrases and their positions in the sentence
        Map<String, Candidate> candidateMap = new TreeMap<String, Candidate>();
        
        // Temporary map used to remove overlapping phrases
        Map<Integer, List<Candidate>> positionMap = new TreeMap<Integer, List<Candidate>>();
		
		String[] phraseArray = new String[maxPhraseLength];
		String[] origPhraseArray = new String[maxPhraseLength];

		int wordPos = 0;
		int firstWord = 0;
		for (String segment: text)
		{
			int numSeen = 0;
			
			StringTokenizer wordTok = new StringTokenizer(segment, " ");
			while (wordTok.hasMoreTokens())
			{
			    String origWord = wordTok.nextToken();
			    String word = origWord;
			    if (stemmer != null)
			        word = stemmer.stem(word);
			    
			    for (int i=0; i<maxPhraseLength-1; i++)
			        phraseArray[i] = phraseArray[i+1];
			    phraseArray[maxPhraseLength-1] = word;
            
                for (int i=0; i<maxPhraseLength-1; i++)
                    origPhraseArray[i] = origPhraseArray[i+1];
                origPhraseArray[maxPhraseLength-1] = origWord;
				
				numSeen++;
				if (numSeen > maxPhraseLength)
					numSeen = maxPhraseLength;
				
				StringBuffer phraseBuffer = new StringBuffer();
				StringBuffer origPhraseBuffer = new StringBuffer();
				for (int i=1; i<=numSeen; i++)
				{
	                if (i > 1) {
	                    phraseBuffer.insert(0, ' ');
	                    origPhraseBuffer.insert(0, ' ');
	                }
	                phraseBuffer.insert(0, phraseArray[maxPhraseLength - i]);
	                origPhraseBuffer.insert(0, origPhraseArray[maxPhraseLength - i]);
	                
	                String phrase = phraseBuffer.toString();
	                String origPhrase = origPhraseBuffer.toString();
	                
	                int startPos = wordPos;
	                startPos = wordPos - (i -1);


	                // Lookup senses
	                List<String> senses = vocab.getSenses(phrase);
	                for (String id: senses)
	                {
	                    Candidate candidate = candidateMap.get(id);
	                    if (candidate == null)
	                    {
	                        firstWord = wordPos - i;
	                        candidate = new Candidate(phrase, origPhrase, wordPos);
	                        candidate.setTitle(vocab.getTerm(id));
	                        candidate.setName(id);
	                    }
	                    else
	                    {
                            firstWord = wordPos - i;
                            candidate.recordOccurrence(origPhrase, firstWord);
	                    }
	                    candidateMap.put(id, candidate);
	                    
	                    List<Candidate> posMatches = positionMap.get(startPos);
	                    if (posMatches == null)
	                        posMatches = new ArrayList<Candidate>();
	                    posMatches.add(candidate);
	                    positionMap.put(startPos, posMatches);
	                }
				}
				wordPos++;
			}
		}
		
		Map<String, Candidate> finalMatch = new HashMap<String, Candidate>();
        // Only return the longest matching phrase
        int nextPos = 0;
        for (Integer pos: positionMap.keySet())
        {
            if (pos < nextPos)
                continue;
            
            List<Candidate> candidates = positionMap.get(pos);
            Candidate longestMatch = null;
            int longestLength = 0;
            for (Candidate candidate: candidates) 
            {
                String phrase = candidate.getBestFullForm();
                int length = phrase.split(" ").length;
                if (length > longestLength)
                {
                    longestMatch = candidate;
                    longestLength = length;
                }
                nextPos = pos + length;
            }
            finalMatch.put(longestMatch.getName(), longestMatch);
        }
        return finalMatch;
    }
}