package etxt2db.mallet;
import java.text.ParseException;
import java.util.Iterator;
import java.util.List;
import java.util.Set;

import edu.cmu.minorthird.text.Span;
import edu.cmu.minorthird.text.TextLabels;
import edu.cmu.minorthird.text.learn.SpanFeatureExtractor;
import edu.umass.cs.mallet.base.pipe.Pipe;
import edu.umass.cs.mallet.base.pipe.SerialPipes;
import edu.umass.cs.mallet.base.pipe.TokenSequence2FeatureVectorSequence;
import edu.umass.cs.mallet.base.pipe.tsf.TokenText;
import edu.umass.cs.mallet.base.types.Instance;
import edu.umass.cs.mallet.base.types.InstanceList;


public class MalletLoader {
	//private static String CAPS = "[A-Z]";
    //private static String LOW = "[a-z]";
    //private static String CAPSNUM = "[A-Z0-9]";
    //private static String ALPHA = "[A-Za-z]";
    //private static String ALPHANUM = "[A-Za-z0-9]";
    //private static String PUNT = "[,\\.;:?!()]";
    //private static String QUOTE = "[\"`']";
    
	
	public InstanceList load(TextLabels labels, List<String> attributes, SpanFeatureExtractor tokenFE) throws ParseException{
		int numFile = 0;
		Pipe featurep = new SerialPipes (new Pipe[] {
				new MedlineSentence2TokenSequence(),
			    //new RegexMatches ("INITCAP", Pattern.compile (CAPS+".*")),
			    //new RegexMatches ("CAPITALIZED", Pattern.compile (CAPS+LOW+"*")),
			    //new RegexMatches ("ALLCAPS", Pattern.compile (CAPS+"+")),
			    //new RegexMatches ("MIXEDCAPS", Pattern.compile ("[A-Z][a-z]+[A-Z][A-Za-z]*")),
			    //new RegexMatches ("CONTAINSDIGITS", Pattern.compile (".*[0-9].*")),
			    //new RegexMatches ("ALLDIGITS", Pattern.compile ("[0-9]+")),
			    //new RegexMatches ("NUMERICAL", Pattern.compile ("[-0-9]+[\\.,]+[0-9\\.,]+")),
			    //new RegexMatches ("ALPHNUMERIC", Pattern.compile ("[A-Za-z0-9]+")),
			    //new RegexMatches ("CONTAINSDASH", Pattern.compile (ALPHANUM+"+-"+ALPHANUM+"*")),
			    //new RegexMatches ("ACRO", Pattern.compile ("[A-Z][A-Z\\.]*\\.[A-Z\\.]*")),
			    //new RegexMatches ("LONELYINITIAL", Pattern.compile (CAPS+"\\.")),
			    //new RegexMatches ("SINGLECHAR", Pattern.compile (ALPHA)),
			    //new RegexMatches ("CAPLETTER", Pattern.compile ("[A-Z]")),
			    //new RegexMatches ("PUNC", Pattern.compile (PUNT)),
				new MalletFeatureExtractor(labels, attributes, tokenFE),
			    new TokenText ("WORD="),
			    //new LexiconMembership(new File("/home/ryantm/progs/state_list")),
			    //new TokenTextCharSuffix("4SUFFIX=",4),
			    //new TokenTextCharSuffix("3SUFFIX=",3),
			    //new TokenTextCharSuffix("2SUFFIX=",2),
			    //new TokenTextCharNGrams ("CHARNGRAM=", new int[] {2,3}),
			    //new FeaturesInWindow("WORD=",-1,1),
			    //new FeaturesInWindow("WINDOW=",-1,1,Pattern.compile("WORD=.*"),true), 
			    //new PrintTokenSequenceFeatures(),
			    new TokenSequence2FeatureVectorSequence (true, true)
				});
		Pipe p = featurep;
		p.getTargetAlphabet().lookupIndex("NEG");
		InstanceList result = new InstanceList(p);
		
		Iterator<Span> iter = labels.getTextBase().documentSpanIterator();
		while(iter.hasNext()){
			Span current = iter.next();
			int i = current.size()-1;
			String lineFormat = "";
			//System.out.println("Begining processing file " + numFile + ": " + current.getDocumentId());
			while(i>=0){
				String tokFormat = "";
				Span tok = current.subSpan(i, 1);
				if(!tok.asString().matches("\\s*")){
					tokFormat = tokFormat + tok.asString();
					
					boolean overlap = false;
					String currentTag = "";
					
					/*edu.cmu.minorthird.classify.Instance ins = tokenFE.extractInstance(labels, tok);
					Iterator<Feature> iterator = ins.featureIterator();
					while(iterator.hasNext()){
						Feature f = iterator.next();
						String featureName = "";
						int j = 0;
						while(j<f.getName().length){
							if(j==0){
								featureName=featureName+f.getName()[j];
							}else{
								featureName =featureName+ "_" + f.getName()[j];
							
							}
							j++;
						}
						tokFormat = tokFormat + " " + featureName;
					}*/
					
					
					for(String att : attributes){
						Set<Span> classified = labels.getTypeSet(att, current.getDocumentId());
						for(Span spn : classified){
							if(spn.contains(tok)){
								if(overlap){
									throw new ParseException( "This loader does not support embedded tags: " +
													"opening tag '"+att+"' tag with tag '" + currentTag + "' opened in "+current.getDocumentId(),0);
								}
								if(spn.getLoChar() == tok.getLoChar() && 
								   spn.getHiChar() == tok.getHiChar()){
								    tokFormat = att + "Unique" + " " + tokFormat;
								}else if(spn.getLoChar() == tok.getLoChar()){
									tokFormat = att + "Begin" + " " + tokFormat;
								} else if(spn.getHiChar() == tok.getHiChar()){
									tokFormat = att + "End" + " " + tokFormat;
								} else{
									tokFormat = att + "Continue" + " " + tokFormat;
								}
								currentTag = att;
								overlap = true;
							}
						}
					}
					
					if(!overlap){
						tokFormat = "NEG " + tokFormat;
					}
					
					lineFormat = tokFormat + "\n" + lineFormat;
				}
				i--;
			}
			//System.out.println(lineFormat);
			Instance ins = new Instance(lineFormat,null,current.getDocumentId(),current.getDocumentId(),p);
			//System.out.println("Adding through Pipe file " + numFile + ": " + current.getDocumentId());
			result.add(ins);
			//System.out.println("Finished analyzing file " + numFile + ": " + current.getDocumentId());
			numFile++;
		}
		return result;
	}
}
