package com.kyubi.learner;

import java.io.BufferedReader;
import java.io.BufferedWriter;
import java.io.File;
import java.io.FileNotFoundException;
import java.io.FileOutputStream;
import java.io.FileReader;
import java.io.FileWriter;
import java.io.IOException;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Set;
import java.util.Vector;

import weka.core.Attribute;
import weka.core.FastVector;
import weka.core.Instance;
import weka.core.converters.ConverterUtils.DataSource;

import com.kyubi.common.config.ConfigKeys;
import com.kyubi.common.config.Configuration;
import com.kyubi.common.data.Instances;
import com.kyubi.common.data.XMLToInstanceConvertor;
import com.kyubi.common.grammar.Tree;
import com.kyubi.tagger.Term;
import com.kyubi.tagger.decorators.CachingDecorator;
import com.kyubi.tagger.decorators.Lowercase;
import com.kyubi.tagger.decorators.PorterStemmer;
import com.kyubi.tagger.decorators.TagCollector;
import com.kyubi.tagger.taggers.StanfordTreeTagger;
import com.kyubi.tagger.taggers.XMLWrapper;

/**
 * @deprecated
 * @author anand
 *
 */
public class TagTemplateExtractor {

	private String trainFilePath = null;
	private String testFilePath = null;
	private Configuration config = null;
	private Instances trainInsts = null;
	private String[] tagsToLearn = null;
	private HashMap<Integer, Vector<String>> classifierInstances = null;
	private HashMap<Integer, weka.core.Instances> wekaClassInstances = null;

	private Set<String> uniqTags = null;
	private String uniqTagsString = null;

	private Set<String> trainClass = null;

	private boolean append = false;

	public TagTemplateExtractor(String trainFilePath, String trainClassFilePath,
			String[] tags) {
		this.trainFilePath = trainFilePath;
		this.config = Configuration.getSingleton();
		this.tagsToLearn = tags;
		this.classifierInstances = new HashMap<Integer, Vector<String>>();
		this.wekaClassInstances = new HashMap<Integer, weka.core.Instances>();

		this.trainClass = new HashSet<String>();

		XMLToInstanceConvertor xmlToInstConv = new XMLToInstanceConvertor(
				trainFilePath);
		trainInsts = xmlToInstConv.convert();

		readTrainClass(trainClassFilePath);
	}

	private void readTrainClass(String path) {
		BufferedReader rdr = null;
		String line;
		net.sf.snowball.ext.PorterStemmer stemmer = new net.sf.snowball.ext.PorterStemmer();

		try {
			rdr = new BufferedReader(new FileReader(path));
		} catch (FileNotFoundException fnf) {
			fnf.printStackTrace();
		}

		try {
			while ((line = rdr.readLine()) != null) {
				if (!line.equals("")) {
					stemmer.setCurrent(line.trim().toLowerCase());
					stemmer.stem();
					trainClass.add(stemmer.getCurrent());
				}
			}
		} catch (IOException io) {
			io.printStackTrace();
		}
	}

	private Attribute createTagAttribute(String label) {
		if (uniqTags == null) {
			uniqTags = config.getUniqLinesFromFile(ConfigKeys.BASEURL
					+ ConfigKeys.UNIQ_TAGS_PATH);
		}

		FastVector v = new FastVector(40);

		for (String tag : uniqTags) {
			v.addElement(tag);
		}

		Attribute attr = new Attribute(label, v);

		return attr;
	}

	private boolean dumpInstanceToFile(Vector<String> instances, int numAttrs,
			File file) {
		BufferedWriter wrtr = null;
		String line;

		if (uniqTagsString == null) {
			uniqTagsString = config.getCommaSeparatedLinesFromFile(config
					.get(ConfigKeys.BASEURL)
					+ config.get(ConfigKeys.UNIQ_TAGS_PATH));
		}

		try {
			wrtr = new BufferedWriter(new FileWriter(file, this.append));
		} catch (IOException ioe) {
			ioe.printStackTrace();
			return false;
		}

		// skip header if we are appending to existing file
		if (!append) {
			// write header
			try {
				wrtr.write("@RELATION TEMPLATE_LEARNER\n\n");

				for (int i = 0; i < numAttrs; i++) {
					if (i == numAttrs - 1) {
						wrtr.write("@ATTRIBUTE CLASS NUMERIC\n");
					} else {
						wrtr.write("@ATTRIBUTE " + "ATTR_" + String.valueOf(i)
								+ " {" + uniqTagsString + "}\n");
					}
				}

				wrtr.write("\n@DATA\n");
			} catch (IOException io) {
				io.printStackTrace();
				return false;
			}
		}

		// write the data
		// I'm beginning to hate WEKA :(
		try {
			for (int i = 0; i < instances.size(); i++) {
				wrtr.write(instances.get(i) + "\n");
			}
		} catch (IOException io) {
			io.printStackTrace();
			return false;
		}

		try {
			wrtr.close();
		} catch (IOException ioe) {
			ioe.printStackTrace();
			return false;
		}

		return true;
	}

	public void prepareTrainingSet(boolean append) {
		this.append = append;

		Set<String> stopWords = config.getUniqLinesFromFile(config.get(ConfigKeys.BASEURL) + config.get(ConfigKeys.STOPWORDS_PATH));
		Set<String> stemmedStopWords = new HashSet<String>();
		net.sf.snowball.ext.PorterStemmer stemmer = new net.sf.snowball.ext.PorterStemmer();
		for(String stWord : stopWords) {
			stemmer.setCurrent(stWord);
			stemmer.stem();
			stemmedStopWords.add(stemmer.getCurrent());
		}
		
		TagCollector tc = new TagCollector(new StanfordTreeTagger(
				new CachingDecorator(new PorterStemmer(new Lowercase(new XMLWrapper(trainInsts))),
						true)), tagsToLearn, stemmedStopWords);
		tc.collectPhrases();

		ArrayList<Term> topTerms = tc.getTopTagWords();
		Vector<String> taggedValues = tc.getTaggedValues();

		for (Term t : topTerms) {
			Tree tree = new Tree();
			tree.parse(taggedValues.get((int) t.lineNum));
			Vector<String> origSentence = tree.getOriginalSentence();
			Vector<String> posTags = tree.getPOSTags();

			String[] termTokens = t.value.split("[ ]+");

			int tIndex = 0;
			int sIndex = 0;
			for (int i = 0; i < origSentence.size(); i++) {
				if (origSentence.get(i).equals(termTokens[tIndex])) {
					if (tIndex == termTokens.length - 1) {
						sIndex = i;
						break;
					}

					tIndex++;
				} else {
					tIndex = 0;
				}
			}

			Vector<String[]> templates = config.getTuples(ConfigKeys.TEMPLATES);
			if (templates != null) {
				for (int i = 0; i < templates.size(); i++) {
					String[] tuple = templates.get(i);

					if (tuple.length == 2) {
						int leftBuff = Integer.parseInt(tuple[0]);
						int rightBuff = Integer.parseInt(tuple[1]);

						StringBuffer template = extractTemplate(posTags, t,
								(sIndex - termTokens.length - 1), sIndex,
								leftBuff, rightBuff);

						if (template != null) {
//							System.out.println(t.value);
							if (trainClass.contains(t.value)) {
								template.append("1");
							} else {
								template.append("0");
							}

							// save instance for corresponding classifier
							if (classifierInstances.containsKey(i)) {
								Vector<String> insts = classifierInstances
										.get(i);
								insts.add(template.toString().trim());
							} else {
								Vector<String> insts = new Vector<String>();
								insts.add(template.toString().trim());
								classifierInstances.put(i, insts);
							}
						}
					}
				}
			}
		}

		// TODO HACK - find a cleaner programmable way to do this
		// write instances to tmp file as ARFF and load weka instances
		// File dir = new File(config.get(ConfigKeys.TMPURL));
		Set<Integer> keys = classifierInstances.keySet();
		if (keys != null) {
			for (int templNum : keys) {
				try {
					File tmpFile = new File(config.get(ConfigKeys.TMPURL)
							+ "file_" + String.valueOf(templNum) + ".arff");

					if (classifierInstances.containsKey(templNum)) {
						Vector<String> instances = classifierInstances
								.get(templNum);
						dumpInstanceToFile(instances, instances.get(0).split(
								"[ ,]+").length, tmpFile);
						
						
						// TODO do WEKA crap and then delete the file
						DataSource source = new DataSource(tmpFile.getAbsolutePath());
						weka.core.Instances insts = source.getDataSet();
						wekaClassInstances.put(templNum, insts);
					}
				} catch (Exception e) {
					e.printStackTrace();
					return;
				}
			}
		}
	}

	private StringBuffer extractTemplate(Vector<String> posTags, Term t,
			int sIndex, int eIndex, int leftBuf, int rightBuf) {
		StringBuffer tmpl = new StringBuffer();

		// FOR LEFT

		// if(sIndex-leftBuf < 0) {
		// // if the leftmost index is at -1 put it as START
		// // if it is below that then info for template is insufficient
		// if(sIndex-leftBuf == -1) {
		// // TODO put in config file
		// tmpl.append("START");
		// } else {
		// return null;
		// }
		// }

		// Copy left buffer tags to tmpl
		for (int i = sIndex - leftBuf; i < sIndex; i++) {
			//if (i >= -1) {
				if (i <= -1) {
					tmpl.append("\"START\",");
				} else {
					tmpl.append("\"" + posTags.get(i) + "\",");
				}
//			} else {
//				return null;
//			}
		}

		// MAIN TAG

		tmpl.append("\"" + t.tag + "\",");

		// FOR RIGHT

		// if(eIndex+rightBuf >= posTags.size()) {
		// // if the rightmost index is at size() put it as END
		// // if it is greater then info for template is insufficient
		// if(eIndex+rightBuf == posTags.size()) {
		// // TODO put in config file
		// tmpl.append("END");
		// } else {
		// return null;
		// }
		// }

		// Copy right buffer tags to tmpl
		for (int i = eIndex + 1; i <= eIndex + rightBuf; i++) {
			if (i >= posTags.size()) {
//				if (i == posTags.size()) {
//					if (i >= (eIndex + rightBuf)) {
						tmpl.append("\"END\",");
//					} else {
//						return null;
//					}
				} else {
					tmpl.append("\"" + posTags.get(i) + "\",");
				}
//			} else {
//				return null;
//			}

		}

		return tmpl;
	}
	
	public HashMap<Integer, weka.core.Instances> getTemplateInstances() {
		return wekaClassInstances;
	}

	public static void main(String[] args) {
		try {
			Configuration c = Configuration.getSingleton(args[2]);

			String[] tags = c.getArray(ConfigKeys.TAGS_TO_LEARN);

			TagTemplateExtractor tl = new TagTemplateExtractor(args[1], args[3], c
					.getArray(ConfigKeys.TAGS_TO_LEARN));
			tl.prepareTrainingSet(false);
		} catch (Exception e) {
			e.printStackTrace();
		}
	}
}
