package wikiextract.nlp.trainingset.x;

import static wikiextract.nlp.trainingset.x.Definitions.*;
import wikiextract.data.util.StringUtil;
import wikiextract.nlp.trainingset.x.Definitions.Dataset;

public class Settings {
	static int CROSS_VALIDATION_SETS = 5;
	static int NUM_CROSS_LEXICONS = 5;
	static int WEB_LEXICONS = 4;//4;
	
	static boolean LEXICON_CROSSTRAINING = true;
	
	public static int MAX_ARTICLES_PER_ATTRIBUTE = 125; //1250; //100; //1250; //1000;
	public static int MAX_SENTENCES_PER_ARTICLE = 10; //100;

	//static boolean SECONDPASS_RANDOMIZATION_ONLY = true;
	
	static long RANDOM_SEED = 29;
	public static double cv_test_train_ratio = 0.2;
	// first pass features
	static Feature1[] features1ToCompute = { Feature1.WORDS, /*Feature1.TRANSITIONS,/*
		*/Feature1.CONTEXTUALIZATION, Feature1.CAPITALIZATION, Feature1.DIGITS, /*Feature1.DEPENDENCIES /*Feature1.NEWDEPS2*/ };
	//static Feature1[] features1ToCompute = { Feature1.WORDS, /*Feature1.TRANSITIONS,/*
	//	*//*Feature1.CONTEXTUALIZATION, Feature1.CAPITALIZATION, /*Feature1.DIGITS, Feature1.DEPENDENCIES /*Feature1.NEWDEPS2*/ };
	//static Feature1[] features1ToCompute = { Feature1.WORDS, /*Feature1.TRANSITIONS,/*
	//	*/Feature1.CONTEXTUALIZATION, /*Feature1.CAPITALIZATION,*/ Feature1.DIGITS, Feature1.DEPENDENCIES /*Feature1.NEWDEPS2*/ };
	// second pass features: these are alternatives
	static Feature2[] features2ToCompute = { /*Feature2.BASELINE, /*Feature2.ALLVALUES,*/ Feature2.WEB /*,*/ /*Feature2.GAUSSIAN*/ };
	//static Feature2[] features2ToCompute = { Feature2.BASELINE, /*Feature2.ALLVALUES, Feature2.WEB ,*/ Feature2.GAUSSIAN };

	// list types for second pass features
	static ListType[] typesToCompute = { 
		//ListType.FIRST_TOKEN,
		//ListType.ORIGINAL,
		ListType.FIRST_SEGMENT,
		//ListType.LAST_SEGMENT 
	};

	// training set size: -1 means that 4/5 of available data is taken as training set
	static int[] evaluationPoints = new int[] { -1/*-1 /*-1 /*10, -1/*100, /*1000, 10000, 100000 1000000*/ };
	static int[] repetitions = new int[] { 25 /*25, /*10 /*100, 10, 25,/* 1 /*, 1*/ };

	// articles // snts
	private static String firstSfx = "";
	//private static String secondSfx = "random29-100-25-fs-ls-ft";
	//private static String firstSfx = "all";
	private static String secondSfx = "";
	
	static Dataset dataset = Dataset.INFOBOX;
	
	static Datatype datatype = Datatype.TEXT;
	
	static String idsFile = null;
	static String trainingsetFile = null;
 
	// use -1 for small sample file
	// use -2 for one against other
	// use -3 for test sample file   
	static int chunk = -2; //-3;//-1; //-1  // hightes 3
	// was 25
	
	static int[] oaoTrain = {202619,202713};
	static int[] oaoTest = {207211};
	
	static {
		setChunk(dataset, datatype, chunk);
	}
	
	static void setChunk(Dataset ds, Datatype dt, int chunk) {
		Settings.dataset = ds;
		Settings.datatype = dt;
		if (Settings.dataset == Dataset.INFOBOX) {
			trainingsetFile = "trainingset.wikipedia.bin.sorted";
			if (Settings.datatype == Datatype.INTEGER)
				idsFile = "wp/atts.int.id-";
			else if (Settings.datatype == Datatype.TEXT)
				idsFile = "/wp/atts.text.id-";
		} else if (Settings.dataset == Dataset.FREEBASE) {
			trainingsetFile = "trainingset.freebase.bin.all.sorted.reduced";
			if (Settings.datatype == Datatype.INTEGER)
				idsFile = "nfb/atts.int.id-";
			else if (Settings.datatype == Datatype.TEXT)
				idsFile = "nfb/atts.text.id-";
		}
		if (chunk == -1)
			idsFile += "sub";
		else if (chunk == -2) {
			if (Settings.datatype == Datatype.TEXT)
				idsFile = "/wp/atts.text.id-good";
			else if (Settings.datatype == Datatype.INTEGER)
				idsFile = "/wp/atts.int.id-good";
			/*
			StringBuilder sb = new StringBuilder();
			for (int i=0; i < oaoTrain.length; i++) {
				if (i>0) sb.append("-");
				sb.append(oaoTrain[i]);
			}
			sb.append("_");
			for (int i=0; i < oaoTest.length; i++) {
				if (i>0) sb.append("-");
				sb.append(oaoTest[i]);
			}
			firstSfx = sb.toString();
			*/
			firstSfx = "all";
			secondSfx = "oaoN";
		} else if (chunk == -3) {
			idsFile += "test";
			firstSfx = "test";
		} else if (chunk == -4) {
			idsFile = "faust/faust";
			firstSfx = "faust";
		} else
		if (chunk >=0) {
			idsFile += "good-" + chunk;
			firstSfx = "cn" + chunk;
			secondSfx = "cn" + chunk;
		}
	}
	
	
	static boolean useFeature1(Feature1 f) {
		for (int i=0; i < features1ToCompute.length; i++)
			if (features1ToCompute[i].equals(f)) return true;
		return false;
	}
	
	static boolean useListType(ListType lt) {
		for (int i=0; i < typesToCompute.length; i++)
			if (typesToCompute[i].equals(lt)) return true;
		return false;
	}
	
	public static String firstPassSuffix() {
		StringBuilder sb = new StringBuilder();		
		for (Feature1 f : features1ToCompute) {
			if (sb.length() > 0) sb.append("-");
			sb.append(f.toString());
		}
		sb.append("_" + firstSfx);
		return dataset + "_" + datatype + "_" + sb.toString();
	}
	
	static String secondPassSuffix() {
		StringBuilder sb = new StringBuilder();
		for (Feature2 f : features2ToCompute) {
			if (sb.length() > 0) sb.append("-");
			sb.append(f.toString());
		}
		sb.append("_" + secondSfx);
		return sb.toString();
	}
	
	static void parse(String[] args) {
		try {
			for (int i=0; i < args.length; i++) {
				if ("-features1".equals(args[i])) {
					String[] f = StringUtil.split(args[++i], ',', 5);
					features1ToCompute = new Feature1[f.length];
					for (int j=0; j < f.length; j++) {
						Feature1 fe = null;
						if (f[j].equals("words"))
							fe = Feature1.WORDS;
						else if (f[j].equals("transitions"))
							fe = Feature1.TRANSITIONS;
						else if (f[j].equals("contextualization"))
							fe = Feature1.CONTEXTUALIZATION;
						else if (f[j].equals("capitalization"))
							fe = Feature1.CAPITALIZATION;
						else if (f[j].equals("digits"))
							fe = Feature1.DIGITS;
						else if (f[j].equals("dependencies"))
							fe = Feature1.DEPENDENCIES;
						features1ToCompute[j] = fe;
					}
				} else if ("-types".equals(args[i])) {
					String[] t = StringUtil.split(args[++i], ',', 5);
					typesToCompute = new ListType[t.length];
					for (int j=0; j < t.length; j++) {
						ListType lt = null;
						if (t[j].equals("firstSegment"))
							lt = ListType.FIRST_SEGMENT;
						else if (t[j].equals("lastSegment"))
							lt = ListType.LAST_SEGMENT;
						typesToCompute[j] = lt;
					}					
				} else if ("-points".equals(args[i])) {
					String[] p = StringUtil.split(args[++i], ',', 5);
					evaluationPoints = new int[p.length];
					for (int j=0; j < p.length; j++) {
						evaluationPoints[j] = Integer.parseInt(p[j]);
					}
				} else if ("-dataset".equals(args[i])) {
					i++;
					dataset = null;
					if ("infobox".equals(args[i]))
						dataset = Dataset.INFOBOX;
					else if ("freebase".equals(args[i]))
						dataset = Dataset.FREEBASE;
					else throw new Exception();
				}
			}
		} catch (Exception e) {
			System.out.println("Usage: ... -features words,transitions");
		}
	}
}
