package langnstats.project.lib;

import java.io.BufferedReader;
import java.io.File;
import java.io.FileReader;
import java.io.IOException;
import java.io.Reader;
import java.io.StreamTokenizer;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;

import langnstats.project.languagemodel.hmm.EmissionCharacter;

public enum WordType implements EmissionCharacter{
	COLON, COMMA, LEFTPAR, PERIOD, RIGHTPAR, CC, CD, DT, EX, IN, JJ, JJR, JJS, MD, NN, NNP, NNPS, NNS, POS, PRP, PRP$, RB, RBR, RP, TO, VB, VBD, VBG, VBN, VBP, VBZ, WDT, WP, WRB;
	
	public int getIndex(){ return this.ordinal(); }
	
	public static WordType get(String s) {
		if(s==null){ return null; }
		for(WordType wordType : WordType.values()){
			if(s.equals(wordType.getOriginalTag())){
				return wordType;
			}
		}
		assert false : "No wordtype as " + s;
		return null;
	}
	
	public String getName(){ return this.toString(); }
	
	public String getOriginalTag(){
		String tag = this.getName();
		if(tag.length()>4){ return "<"+tag+">"; }
		else{ return tag; }
	}

	public static int vocabularySize() {
		return size();
	}

	public static int size() {
		return WordType.values().length;
	}

	public static WordType[] parse(File file) throws IOException{
		Reader reader = new BufferedReader(new FileReader(file));
		StreamTokenizer tokenizer = buildTokenizer(reader);

		int type;
		List<WordType> list = new ArrayList<WordType>();
		while( (type=tokenizer.nextToken()) != StreamTokenizer.TT_EOF ){
			if(type==StreamTokenizer.TT_NUMBER || tokenizer.sval==null){
				throw new IllegalArgumentException("Invalid token! type: "+type+", sval: "+tokenizer.sval);
			}

			list.add( WordType.get(tokenizer.sval) );
		}
		WordType[] tokens = new WordType[0];
		return list.toArray(tokens);
	}
	
	
	private static StreamTokenizer buildTokenizer(Reader reader){
		StreamTokenizer tokenizer = new StreamTokenizer(reader);
		tokenizer.eolIsSignificant(false);
		
		tokenizer.wordChars('(',')');	// if not, it becomes null
		tokenizer.ordinaryChar('<');	// if not, it thinks '<' as a number
		tokenizer.ordinaryChar('>');	// if not, it thinks '<' as a number
		tokenizer.wordChars('<','<');
		tokenizer.wordChars('>','>');
		tokenizer.wordChars('$','$');
		return tokenizer;		
	}
	
	public static double[] makePredictionArray(Map<WordType,Double> map){
		double[] predictions = new double[WordType.size()];
			
		int index =0;
		for(WordType wordType : WordType.values()){
			predictions[index] = map.get(wordType);
			index++;
		}
		return predictions;
	}

	public int getAlphabetSize() {
		return WordType.size();
	}
	
	/*public static void main(String[] args) throws IOException{
		WordType[] tokens = WordType.parse(Global.getTrainA());
		PrintWriter writer = new PrintWriter(Global.testFile);
		for(WordType wt : tokens){
			writer.println(wt);
		}
		writer.close();
	}*/
}
