package org.col.dspace.tagger;

import java.io.BufferedReader;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.io.OutputStreamWriter;
import java.io.PrintWriter;
import java.io.ObjectInputStream.GetField;
import java.util.ArrayList;
import java.util.Enumeration;
import java.util.Hashtable;
import java.util.Vector;

import org.nzdl.kea.filters.KEAFilter;
import org.nzdl.kea.main.KEAKeyphraseExtractor;
import org.nzdl.kea.stemmers.PorterStemmer;
import org.nzdl.kea.stemmers.Stemmer;
import org.nzdl.kea.stopwords.Stopwords;
import org.nzdl.kea.stopwords.StopwordsEnglish;

import weka.core.Attribute;
import weka.core.FastVector;
import weka.core.Instance;
import weka.core.Instances;
import weka.core.Utils;



/*It looks like we can't use just one OnlineKeyphraseExtractor instance for all requests.
 * Apparently the KEAFilter object in KEAKeyphraseExtractor can not be shared.
 * 
 */
public class OnlineKeyphraseExtractor extends KEAKeyphraseExtractor{
	
	
	@Override
	public void setModelName(String str){
		super.setModelName(str);	
		
	}
	
	public void setDefaultOptions(){
		setModelName("en/model");
		setVocabulary("agrovoc");
		setVocabularyFormat("skos");
		setEncoding("UTF-8");
		setDocumentLanguage("en"); 
		setStemmer(new PorterStemmer());
		setStopwords(new StopwordsEnglish());
		setNumPhrases(8);
	}
	
	public void setOptions(String modelName,String voc, String vocFormat, String encoding, String docLang, Stemmer stemmer, Stopwords stopwords, int numPhrases){
		setModelName(modelName);
		setVocabulary(voc);
		setVocabularyFormat(vocFormat);
		setEncoding(encoding);
		setDocumentLanguage(docLang); 
		setStemmer(stemmer);
		setStopwords(stopwords);
		setNumPhrases(numPhrases);
	}
	
	
	public void extractKeyPhrases(String inputtext, ArrayList<String> tags) throws Exception{
		
		KEAFilter keaFilter = getKEAFilter();
		keaFilter.setNumPhrases(getNumPhrases());
		keaFilter.setVocabulary(getVocabulary());
		keaFilter.setVocabularyFormat(getVocabularyFormat());
		keaFilter.setDocumentLanguage(getDocumentLanguage());
		keaFilter.setStemmer(getStemmer());
		keaFilter.setStopwords(getStopwords());
		
		if (getVocabulary().equals("none")) {
			keaFilter.m_NODEfeature = false;
		} else {
			keaFilter.loadThesaurus(getStemmer(),getStopwords());
		}
		
		FastVector atts = new FastVector(3);
		atts.addElement(new Attribute("doc", (FastVector) null));
		atts.addElement(new Attribute("keyphrases", (FastVector) null));
		atts.addElement(new Attribute("filename", (String) null));
		Instances data = new Instances("keyphrase_training_data", atts, 0);
		
		
		System.err.println("-- Extracting keyphrases... ");
		double[] newInst = new double[2];
		newInst[0] = (double)data.attribute(0).addStringValue(inputtext);
		newInst[1] = Instance.missingValue();
		data.add(new Instance(1.0, newInst));
		keaFilter.input(data.instance(0));
		data = data.stringFreeStructure();
		
		Instance[] topRankedInstances = new Instance[getNumPhrases()];
		Instance inst;
		
		// Iterating over all extracted keyphrases (inst)
		while ((inst = keaFilter.output()) != null) {
			
			int index = (int)inst.value(keaFilter.getRankIndex()) - 1;
			
			if (index < getNumPhrases()) {
				topRankedInstances[index] = inst;
				
			}
		}
		
		for (int i = 0; i < getNumPhrases(); i++) {
			if(topRankedInstances[i]!=null)
				tags.add(topRankedInstances[i].stringValue(keaFilter.getUnstemmedPhraseIndex()));
			
		}
		
		
	}

	/**
	 * Builds the model from the files
	 * @throws  
	 * @throws IOException 
	 */
/*	public void extractKeyphrases(Hashtable stems) throws Exception {
		
		Vector stats = new Vector();
		
		// Check whether there is actually any data
		// = if there any files in the directory
		if (stems.size() == 0) {
			throw new Exception("Couldn't find any data!");
		}
		
		m_KEAFilter.setNumPhrases(m_numPhrases);    
		m_KEAFilter.setVocabulary(m_vocabulary);
		m_KEAFilter.setVocabularyFormat(m_vocabularyFormat);
		m_KEAFilter.setDocumentLanguage(getDocumentLanguage());
		m_KEAFilter.setStemmer(m_Stemmer);
		m_KEAFilter.setStopwords(m_Stopwords);
		
		if (getVocabulary().equals("none")) {
			m_KEAFilter.m_NODEfeature = false;
		} else {
			m_KEAFilter.loadThesaurus(m_Stemmer,m_Stopwords);
		}
		
		FastVector atts = new FastVector(3);
		atts.addElement(new Attribute("doc", (FastVector) null));
		atts.addElement(new Attribute("keyphrases", (FastVector) null));
		atts.addElement(new Attribute("filename", (String) null));
		Instances data = new Instances("keyphrase_training_data", atts, 0);
		
		if (m_KEAFilter.m_Dictionary == null) {
			buildGlobalDictionaries(stems);
		}
		
		System.err.println("-- Extracting keyphrases... ");
		// Extract keyphrases
		Enumeration elem = stems.keys();
		// Enumeration over all files in the directory (now in the hash):
		while (elem.hasMoreElements()) {	
			String str = (String)elem.nextElement();
			
			double[] newInst = new double[2];
			try {
				File txt = new File(m_dirName + "/" + str + ".txt");	
				InputStreamReader is;
				if (!m_encoding.equals("default")) {
					is = new InputStreamReader(new FileInputStream(txt), m_encoding);
				} else {
					is = new InputStreamReader(new FileInputStream(txt));
				}
				StringBuffer txtStr = new StringBuffer();
				int c;
				while ((c = is.read()) != -1) {
					txtStr.append((char)c);
				}
				
				newInst[0] = (double)data.attribute(0).addStringValue(txtStr.toString());
				
			} catch (Exception e) {
				if (m_debug) {
					System.err.println("Can't read document " + str + ".txt");
				}
				newInst[0] = Instance.missingValue();
			}
			try {
				File key = new File(m_dirName + "/" + str + ".key");
				InputStreamReader is; 
				if (!m_encoding.equals("default")) {
					is = new InputStreamReader(new FileInputStream(key), m_encoding);
				} else {
					is = new InputStreamReader(new FileInputStream(key));
				}
				StringBuffer keyStr = new StringBuffer();
				int c;
				
				// keyStr = keyphrases in the str.key file
				// Kea assumes, that these keyphrases were assigned by the author
				// and evaluates extracted keyphrases againse these
				
				while ((c = is.read()) != -1) {
					keyStr.append((char)c);
				}      
				
				newInst[1] = (double)data.attribute(1).addStringValue(keyStr.toString());
			} catch (Exception e) {
				if (m_debug) {
					System.err.println("No existing keyphrases for stem " + str + ".");
				}
				newInst[1] = Instance.missingValue();
			}
			
			
			data.add(new Instance(1.0, newInst));
			
			
			m_KEAFilter.input(data.instance(0));
			
			
			data = data.stringFreeStructure();
			if (m_debug) {
				System.err.println("-- Document: " + str);
			}
			Instance[] topRankedInstances = new Instance[m_numPhrases];
			Instance inst;
			
			// Iterating over all extracted keyphrases (inst)
			while ((inst = m_KEAFilter.output()) != null) {
				
				int index = (int)inst.value(m_KEAFilter.getRankIndex()) - 1;
				
				if (index < m_numPhrases) {
					topRankedInstances[index] = inst;
					
				}
			}
			
			if (m_debug) {
				System.err.println("-- Keyphrases and feature values:");
			}
			FileOutputStream out = null;
			PrintWriter printer = null; 
			File key = new File(m_dirName + "/" + str + ".key");
			if (!key.exists()) {
				out = new FileOutputStream(m_dirName + "/" + str + ".key");
				if (!m_encoding.equals("default")) {
					printer = new PrintWriter(new OutputStreamWriter(out, m_encoding));					
					
				} else {
					printer = new PrintWriter(out);
				}
			}
			double numExtracted = 0, numCorrect = 0;
			
			for (int i = 0; i < m_numPhrases; i++) {
				if (topRankedInstances[i] != null) {
					if (!topRankedInstances[i].
							isMissing(topRankedInstances[i].numAttributes() - 1)) {
						numExtracted += 1.0;
					}
					if ((int)topRankedInstances[i].
							value(topRankedInstances[i].numAttributes() - 1) == 1) {
						numCorrect += 1.0;
					}
					if (printer != null) {
						printer.print(topRankedInstances[i].
								stringValue(m_KEAFilter.getUnstemmedPhraseIndex()));
						System.out.print(topRankedInstances[i].
								stringValue(m_KEAFilter.getUnstemmedPhraseIndex()));
						System.out.println("\t" + Utils.
								doubleToString(topRankedInstances[i].
										value(m_KEAFilter.
												getProbabilityIndex()), 4));


						if (m_AdditionalInfo) {
							printer.print("\t");
							printer.print(topRankedInstances[i].
									stringValue(m_KEAFilter.getStemmedPhraseIndex()));
							printer.print("\t");
							printer.print(Utils.
									doubleToString(topRankedInstances[i].
											value(m_KEAFilter.
													getProbabilityIndex()), 4));
						}
						printer.println();
					}
					if (m_debug) {
						System.err.println(topRankedInstances[i]);
					}
				}
			}
			if (numExtracted > 0) {
				if (m_debug) {
					System.err.println("-- " + numCorrect + " correct");
				}
				stats.addElement(new Double(numCorrect));
			}
			if (printer != null) {
				printer.flush();
				printer.close();
				out.close();
			}
		}
		double[] st = new double[stats.size()];
		for (int i = 0; i < stats.size(); i++) {
			st[i] = ((Double)stats.elementAt(i)).doubleValue();
		}
		double avg = Utils.mean(st);
		double stdDev = Math.sqrt(Utils.variance(st));
		
		System.err.println("Avg. number of matching keyphrases compared to existing ones : " +
				Utils.doubleToString(avg, 2) + " +/- " + 
				Utils.doubleToString(stdDev, 2));
		System.err.println("Based on " + stats.size() + " documents");
		// m_KEAFilter.batchFinished();
	}*/
	

	public static void main(String[] args){
		
		OnlineKeyphraseExtractor ke = new OnlineKeyphraseExtractor();
		
		ke.setDefaultOptions();
		try {
			ke.loadModel();
		} catch (Exception e1) {
			// TODO Auto-generated catch block
			e1.printStackTrace();
		}
		
		//String fpath= ke.getClass().getClassLoader().getResource("").getFile().concat("en/traindocs/bostid_b02moe.txt");
		InputStream in = Thread.currentThread().getContextClassLoader().getResourceAsStream("en/traindocs/bostid_b02moe.txt");
		BufferedReader bufReader = new BufferedReader(new InputStreamReader(in));
		StringBuffer strBuffer = new StringBuffer();

		try {
			String str;
			while( (str=bufReader.readLine())!=null){
				strBuffer.append(str);
			}
			bufReader.close();
			String text = strBuffer.toString();
			ArrayList<String> tags = new ArrayList<String>();
			ke.extractKeyPhrases(text,tags);

			for(String tag:tags)
				System.out.println(tag);
			
			
			
		} catch (IOException e) {
			// TODO Auto-generated catch block
			e.printStackTrace();
		} catch (Exception e) {
			// TODO Auto-generated catch block
			e.printStackTrace();
		}
		
	
	
		
		
	}
}
