package geppetto.main;

import geppetto.cat.alignments.Alignment;
import geppetto.cat.alignments.AlignmentsSet;
import geppetto.cat.alignments.DynamicAlignmentSet;
import geppetto.cat.corpus.BilingualCorpus;
import geppetto.lexical.LexicalT;
import geppetto.main.adaptation.AdaptationInfo;
import geppetto.phraseHMM.WordTrie;
import geppetto.phraseHMM.alignmentInformationWriters.AlignmentInformationWriter;
import geppetto.phraseHMM.alignmentInformationWriters.BestAlignmentInformationWriter;
import geppetto.phraseHMM.alignmentInformationWriters.NoAlignmentInformationWriter;
import geppetto.phraseHMM.lexicalWeighting.AbstractLexicalWeightingCalculator;
import geppetto.phraseHMM.lexicalWeighting.MosesLexicalWeightingCalculator;
import geppetto.phraseHMM.lexicalWeighting.WeightedLexicalWeightingCalculator;
import geppetto.phraseHMM.phraseExtraction.GeneralPhraseExtraction;
import geppetto.phraseHMM.phraseExtraction.extractedphrase.ExtractedPhrasePair;
import geppetto.phraseHMM.phraseExtraction.extractedphrase.ExtractedPhrasePairWriter;
import geppetto.phraseHMM.phraseExtraction.extractedphrase.writer.DetailedTableWriter;
import geppetto.phraseHMM.phraseExtraction.extractedphrase.writer.ExtractionTableWriter;
import geppetto.phraseHMM.phraseExtraction.phraseAccepter.AbstractPhraseAccepter;
import geppetto.phraseHMM.phraseExtraction.phraseAccepter.AdaptedSingleTypeAccepter;
import geppetto.phraseHMM.phraseExtraction.phraseAccepter.AdaptedSpecificTypeAccepter;
import geppetto.phraseHMM.phraseExtraction.phraseAccepter.AlwaysAccept;
import geppetto.phraseHMM.phraseExtraction.phraseAccepter.CompositePhraseAccepter;
import geppetto.phraseHMM.phraseExtraction.phraseAccepter.EndOfPhraseTerminalPunctuation;
import geppetto.phraseHMM.phraseExtraction.phraseAccepter.KohenPhraseAccept;
import geppetto.phraseHMM.phraseExtraction.phraseAccepter.NoPunctuation;
import geppetto.phraseHMM.phraseExtraction.phraseAccepter.NoTerminalPunctuation;
import geppetto.phraseHMM.phraseExtraction.phraseAccepter.PhraseLengthAcceter;
import geppetto.phraseHMM.phraseExtraction.phraseAccepter.PosteriorAccept;
import geppetto.phraseHMM.phraseExtraction.phraseAccepter.PurposeAccepter;
import geppetto.phraseHMM.phraseExtraction.sentencePair.PhrasePairCandidateExtractor;
import geppetto.phraseHMM.phraseExtraction.sentencePair.SizeBasedPhrasePairCandidateExtractor;
import geppetto.phraseScorer.AbstractPhraseScorer;
import geppetto.phraseScorer.AdaptationScorer;
import geppetto.phraseScorer.One;
import geppetto.phraseScorer.PurposeBasedScorer;
import geppetto.phraseScorer.WeightedPhraseScorerPhraseScorer;
import geppetto.reordering.AbstractReorderingCalculator;
import geppetto.reordering.AdaptationReorderingMSDModelCalculator;
import geppetto.reordering.AlignmentScoreWeightedMSDReorderingCalculator;
import geppetto.reordering.GraphPhraseReorderingCalculator;
import geppetto.reordering.MSDReorderingCalculator;
import geppetto.reordering.PhraseMSDReorderingCalculator;
import geppetto.reordering.PurposeBasedMSDReorderingModel;
import geppetto.reordering.AlignmentWeightedMSDReorderingCalculator;
import geppetto.reordering.ScoreWeightedMSDReorderingCalculator;
import geppetto.reordering.WeightedGraphPhraseReorderingCalculator;
import geppetto.reordering.graph.scorers.DiscontinuousPenalty;
import geppetto.reordering.graph.scorers.LengthPenalty;
import geppetto.reordering.graph.scorers.PhraseScoreScorer;
import geppetto.reordering.graph.scorers.SourceSegmentationScorer;

import java.io.FileInputStream;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.UnsupportedEncodingException;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Properties;

import unreleased.geppetto.phraseHMM.phraseExtraction.phraseAccepter.NoBorderDanglingWords;
import unreleased.geppetto.phraseHMM.phraseExtraction.phraseAccepter.NoDanglingWords;
import unreleased.geppetto.phraseHMM.phraseExtraction.sentencePair.AlignmentPointsPhrasePairCandidateExtractor;

public class ExtractionUtils {
	protected static GeppettoParameters _parameters = new GeppettoParameters();
	protected static HashMap<String, String> _defaultParameters = new HashMap<String, String>();
	protected static String[] REQUIRED = new String[] {"corpusDescription", "output"};
	protected static HashMap<String, AbstractPhraseAccepter> _accepters = new HashMap<String, AbstractPhraseAccepter>();
	protected static HashMap<String, AbstractLexicalWeightingCalculator> _lexicalCalcs = new HashMap<String, AbstractLexicalWeightingCalculator>();
	protected static WordTrie sourceVocab;
	protected static WordTrie targetVocab;
	static {
		_defaultParameters.put("size", "1000");
		_defaultParameters.put("maxSentenceSize", "999");
		_defaultParameters.put("threshold", "0.3");
		_defaultParameters.put("encoding", "UTF-8");
		// _properties.put("output", "");

		_defaultParameters.put("alignmentFileType", "posterior");
		_defaultParameters.put("alignmentLoader", "static");
		_defaultParameters.put("alignments_dir", "");

		_defaultParameters.put("scorerType", "one");
		_defaultParameters.put("accepterType", "kohen");
		_defaultParameters.put("posterior_acceptor_threshold", "0.1");
		_defaultParameters.put("reorderingCalcType", "msd_alignment_score_weighted");
		_defaultParameters.put("lexicalWeightCalc", "moses");
		_defaultParameters.put("outputType", "regular");
		_defaultParameters.put("candidateExtractor", "size_based-7-7");
		_defaultParameters.put("alignmentInformation", "1-best");

		_defaultParameters.put("adaptation_file", "");
	}

	protected static String corpusDescription;
	protected static int size; 
	protected static int maxSentenceSize; 
	protected static String encoding;
	protected static String output;
	protected static String outputType;
	protected static String detailedOutput;
	protected static String sentencesString;

	protected static BilingualCorpus corpus;

	protected static String alignmentsDir;
	protected static String alignmentLoader;
	protected static String alignmentsFileType;

	protected static AlignmentsSet alignments;
	protected static LexicalT lexicalTable;

	protected static String scorerType;
	protected static String accepterType;

	protected static String reordCalcType;
//	protected static AbstractReorderingCalculator reorderingCalc; 
	protected static String lexicalCalcType;
//	protected static AbstractLexicalWeightingCalculator lexicalWeightCalc;

	protected static String phrasePairCandidateExtractorType; 
	protected static String alignmentInformation;

	protected static String adaptationFile;
	protected static AdaptationInfo adaptationInfo;
	
	protected static int wordTrieSourceSize = 0;
	protected static int wordTrieTargetSize = 0;

	public static void loadParameters(String[] args) throws Exception {
		_parameters.loadParametersFromMap(_defaultParameters);
		_parameters.loadFromPropertyFile(args[0]);
		_parameters.loadParametersFromCommandLineArgs(args);
		_parameters.setRequiredParameters(REQUIRED);
		String[] missingProps = _parameters.getMissingParamters();

		if(missingProps.length > 0){
			String msg = "";
			for (String prop : missingProps){
				msg += prop + " ";
			}

			throw new Exception("Missing Properties: " + msg);			
		}
	}


	protected static void initArguments() {
		corpusDescription = _parameters.getProperties().getProperty("corpusDescription");
		size = Integer.parseInt(_parameters.getProperties().getProperty("size")); 
		maxSentenceSize = Integer.parseInt(_parameters.getProperties().getProperty("maxSentenceSize")); 
		encoding = _parameters.getProperties().getProperty("encoding");
		output = _parameters.getProperties().getProperty("output");	
		outputType = _parameters.getProperties().getProperty("outputType");
		detailedOutput = _parameters.getProperties().getProperty("detailedOutput");
		sentencesString = _parameters.getProperties().getProperty("sentences");
		if(_parameters.getProperties().containsKey("alignments_dir")){
			alignmentsDir = _parameters.getProperties().getProperty("alignments_dir");
		}
		else{
			alignmentsDir = _parameters.getProperties().getProperty("alignmentsFile");			
		}
		alignmentLoader = _parameters.getProperties().getProperty("alignmentLoader");
		alignmentsFileType = _parameters.getProperties().getProperty("alignmentFileType");

		scorerType = _parameters.getProperties().getProperty("scorerType");
		accepterType = _parameters.getProperties().getProperty("accepterType");

		reordCalcType = _parameters.getProperties().getProperty("reorderingCalcType");
		lexicalCalcType = _parameters.getProperties().getProperty("lexicalWeightCalc");
		phrasePairCandidateExtractorType = _parameters.getProperties().getProperty("candidateExtractor");
		alignmentInformation = _parameters.getProperties().getProperty("alignmentInformation");
		adaptationFile = _parameters.getProperties().getProperty("adaptation_file");
	}

	protected static void printArguments(){
		System.out.println("----General Parameters----");
		System.out.println("Corpus: " + corpusDescription);
		System.out.println("Size: " + size);
		System.out.println("Max Sentence size: " + maxSentenceSize);
		System.out.println("output: " + output);
		System.out.println("encoding: " + encoding);

		System.out.println("----Alignment Parameters----");
		System.out.println("Alignment Dir: " + alignmentsDir);		
		System.out.println("Alignment Loader: " + alignmentLoader);		
		System.out.println("Alignment File Type: " + alignmentsFileType);		

		System.out.println("----Extraction Parameters----");
		System.out.println("Phrase Scorer: " + scorerType);
		System.out.println("Phrase Accepter: " + accepterType);
		System.out.println("Reordering Calculator: " + reordCalcType);
		System.out.println("Lexical Weight Calculator: " + lexicalCalcType);
		System.out.println("Candidate Extractor: " + phrasePairCandidateExtractorType);
	}

	protected static void loadCorpus() throws IOException{
		Properties properties = new Properties();
		try {
			properties.load(new FileInputStream(corpusDescription));
		} catch (IOException e) {
			throw new AssertionError("Wrong properties file " + corpusDescription);
		}
		corpus = new BilingualCorpus();
		corpus.loadFromFileDescription(corpusDescription, size, maxSentenceSize);
		corpus.initialize();

	}

	public static void buildAlignmentSet() throws FileNotFoundException, IOException {
		if(alignmentsFileType.equals("posterior")){
			if(alignmentLoader.equals("static")){
				alignments = AlignmentsSet.getAlignmentFromPosterior(getAlignmentModelsDir(), BilingualCorpus.TRAIN_CORPUS, corpus);
			}
			else{
				alignments = DynamicAlignmentSet.getAlignmentFromPosterior(getAlignmentModelsDir(), BilingualCorpus.TRAIN_CORPUS, corpus);
			}
		}
		else if (alignmentsFileType.equals("moses")){
			if(alignmentLoader.equals("static")){
				alignments = AlignmentsSet.getAlignmentFromMoses(getAlignmentModelsDir(), BilingualCorpus.TRAIN_CORPUS, corpus);
			}
			else{
				alignments = DynamicAlignmentSet.getAlignmentFromMoses(getAlignmentModelsDir(), BilingualCorpus.TRAIN_CORPUS, corpus);
			}
		}
	}

	public static String getAlignmentModelsDir() {
		if(alignmentsDir.equals("")){
			return output + "/alignmentSym";
		}
		return alignmentsDir;
	}

	public static PhrasePairCandidateExtractor getPhrasePairCandidateExtractor(String phrasePairCandidateExtractorType) {
		if(phrasePairCandidateExtractorType.startsWith("size_based")){
			int sourceSize = Integer.parseInt(phrasePairCandidateExtractorType.split("-")[1]);
			int targetSize = Integer.parseInt(phrasePairCandidateExtractorType.split("-")[2]);
			wordTrieSourceSize = sourceSize;
			wordTrieTargetSize = targetSize;
			return new SizeBasedPhrasePairCandidateExtractor(sourceSize, targetSize);
		}else if(phrasePairCandidateExtractorType.startsWith("minimal_translation_unit")){
			int size = Integer.parseInt(phrasePairCandidateExtractorType.split("-")[1]);
			wordTrieSourceSize = 10; // must ... find ... way ... to ... make ... word trie dynamic
			wordTrieTargetSize = 10; // must ... find ... way ... to ... make ... word trie dynamic
			return new AlignmentPointsPhrasePairCandidateExtractor(size, alignments);
		}
		return null;
	}

	public static AbstractPhraseScorer getScorer(String scorerType){
		if(scorerType.equals("one")){
			return new One();
		}
		if(scorerType.equals("affirmative_adapted")){
			return new PurposeBasedScorer("a", corpus);
		}
		if(scorerType.equals("interrogative_adapted")){
			return new PurposeBasedScorer("i", corpus);
		}
		if(scorerType.equals("declarative_adapted")){
			return new PurposeBasedScorer("d", corpus);
		}
		if(scorerType.equals("exclamative_adapted")){
			return new PurposeBasedScorer("e", corpus);
		}
		if(scorerType.equals("weighted")){
			return new WeightedPhraseScorerPhraseScorer(alignments);
		}
		if(scorerType.startsWith("adapted")){
			String[] weights = scorerType.split("\\s+");
			HashMap<String, Double> type_weights=new HashMap<String, Double>();
			for(int i = 1; i < weights.length; i++){
				String[] type_weight = weights[i].split("=");
				type_weights.put(type_weight[0], Double.parseDouble(type_weight[1]));
			}
			return new AdaptationScorer(alignments, getAdaptationInfo(), type_weights);
		}
		return null;
	}

	public static AbstractPhraseAccepter getAccepter(String accepterType){
		String[] accepters = accepterType.split("\\+");
		CompositePhraseAccepter accepter = new CompositePhraseAccepter();

		for(String accepterName : accepters){
			if(accepterName.equals("kohen") || accepterName.equals("koehn")){
				accepter.add(new KohenPhraseAccept(alignments));
			}
			if(accepterName.equals("posterior")){
				accepter.add(new PosteriorAccept(Double.parseDouble(_parameters.getProperties().getProperty("posterior_acceptor_threshold")), new WeightedPhraseScorerPhraseScorer(alignments)));
			}
			if(accepterName.equals("always")){
				accepter.add(new AlwaysAccept());
			}
			if(accepterName.equals("no_punc")){
				accepter.add(new NoPunctuation(corpus));
			}
			if(accepterName.equals("no_terminal_punc")){
				accepter.add(new NoTerminalPunctuation(corpus));
			}
			if(accepterName.equals("no_terminal_punc_unless_last")){
				accepter.add(new EndOfPhraseTerminalPunctuation(corpus));
			}
			if(accepterName.startsWith("length_diff")){
				accepter.add(new PhraseLengthAcceter(Integer.parseInt(accepterName.split("-")[1])));
			}
			if(accepterName.equals("no_dangling_words")){
				accepter.add(new NoDanglingWords(alignments));
			}
			if(accepterName.equals("no_border_dangling_words")){
				accepter.add(new NoBorderDanglingWords(alignments));
			}
			if(accepterName.startsWith("purpose_based")){
				accepter.add(new PurposeAccepter(accepterName.split("-")[1],corpus));
			}
			if(accepterName.equals("adaptation_single_type")){
				accepter.add(new AdaptedSingleTypeAccepter(getAdaptationInfo()));
			}
			if(accepterName.startsWith("adaptation_specific_type")){
				String[] accepter_types = accepterName.split("\\s+");
				ArrayList<String> acceptedTypes = new ArrayList<String>();
				for(int i = 1; i < accepter_types.length; i++){
					acceptedTypes.add(accepter_types[i]);
				}
				accepter.add(new AdaptedSpecificTypeAccepter(getAdaptationInfo(), acceptedTypes));
			}
		}
		return accepter;
	}

	public static AbstractReorderingCalculator getReorderingCalc(String reordCalcType){
		if(reordCalcType.equals("msd")){
			return new MSDReorderingCalculator(alignments);
		}
		if(reordCalcType.equals("msd_alignment_weighted")){
			return new AlignmentWeightedMSDReorderingCalculator(alignments);
		}
		if(reordCalcType.equals("msd_score_weighted")){
			return new ScoreWeightedMSDReorderingCalculator(alignments);
		}
		if(reordCalcType.equals("msd_alignment_score_weighted")){
			return new AlignmentScoreWeightedMSDReorderingCalculator(alignments);
		}		
		if(reordCalcType.equals("pmsd")){
			return new PhraseMSDReorderingCalculator(alignments);
		}		
		if(reordCalcType.equals("pmsd-graph")){
			return new GraphPhraseReorderingCalculator(alignments);
		}
		if(reordCalcType.startsWith("pmsd_graph_weighted")){
			return getWeightedGraphReorderingCalculator(reordCalcType.split(":")[1]);
		}
		if(reordCalcType.equals("affirmative_purpose_msd")){
			return new PurposeBasedMSDReorderingModel(alignments, corpus, "a");
		}
		if(reordCalcType.equals("declarative_purpose_msd")){
			return new PurposeBasedMSDReorderingModel(alignments, corpus, "d");
		}
		if(reordCalcType.equals("interrogative_purpose_msd")){
			return new PurposeBasedMSDReorderingModel(alignments, corpus, "i");
		}
		if(reordCalcType.equals("exclamative_purpose_msd")){
			return new PurposeBasedMSDReorderingModel(alignments, corpus, "e");
		}
		if(reordCalcType.startsWith("adapted")){
			String[] weights = scorerType.split("\\s+");
			HashMap<String, Double> type_weights=new HashMap<String, Double>();
			for(int i = 1; i < weights.length; i++){
				String[] type_weight = weights[i].split("=");
				type_weights.put(type_weight[0], Double.parseDouble(type_weight[1]));
			}
			return new AdaptationReorderingMSDModelCalculator(alignments, corpus, getAdaptationInfo(), type_weights);
		}
		return null;
	}

	public static AbstractLexicalWeightingCalculator getLexicalWeightingCalc(String lexicalCalcType){
		String[] arguments = lexicalCalcType.split("-");
		String lexicalTableType = "";
		String lexicalAlgorithm = "";
		if(arguments.length == 2){
			lexicalTableType = arguments[0];
			lexicalAlgorithm = arguments[1];			
		}
		else
			if(arguments.length == 1){
				lexicalTableType = "one";
				lexicalAlgorithm = lexicalCalcType;
			}
			else{
				System.err.println("wrong argument number for lexical weighting. Found " + arguments.length + " arguments, while expecting 1 or 2");
				System.exit(-1);
			}
		if(lexicalAlgorithm.equals("moses")){
			System.out.println("initializing Lexical Model");
			lexicalTable = new LexicalT(corpus, getScorer(lexicalTableType));
			lexicalTable.init(BilingualCorpus.TRAIN_CORPUS, alignments);		
			System.out.println("finished initializing Lexical Model");			
			return new MosesLexicalWeightingCalculator(alignments, lexicalTable);
		}
		if(lexicalAlgorithm.equals("weighted")){
			System.out.println("initializing Lexical Model");
			lexicalTable = new LexicalT(corpus, getScorer(lexicalTableType));
			lexicalTable.init(BilingualCorpus.TRAIN_CORPUS, alignments);		
			System.out.println("finished initializing Lexical Model");
			return new WeightedLexicalWeightingCalculator(alignments, lexicalTable);
		}
		return null;
	}

	public static String getFilePath(String defaultPath, String filePath){
		if(filePath == null){
			return defaultPath;
		}
		return filePath;
	}

	public static ExtractedPhrasePairWriter getOutputWriter(String type, List<Integer> sentences) throws FileNotFoundException, UnsupportedEncodingException{
		if(type.equals("regular")) {
			return new ExtractionTableWriter(getBilingualCorpus(),output, encoding);
		}

		if(type.equals("detailed")) {
			// create gold alignment
			AlignmentsSet gold = new AlignmentsSet();
			for (Alignment al : alignments.getAlignments()) {
				Alignment alignment = new Alignment(al);
				alignment.resetPoints();
				alignment.resetPosteriors();
				gold.addAlignment(alignment);
			}

			// parse sentences
			String[] tokens = sentencesString.split(",");
			for (String token : tokens) {
				if (token.contains("-")) {
					String[] tokens2 = token.split("-");

					int min = Integer.valueOf(tokens2[0]);
					int max = Integer.valueOf(tokens2[1]);
					for (int i = min; i < max + 1; i++) {
						sentences.add(new Integer(i));
					}
				} else {
					sentences.add(new Integer(token));
				}
			}

			return new DetailedTableWriter(sentences, detailedOutput, corpus, alignments, gold);
		}

		return null;
	}

	public static AlignmentInformationWriter getAlignmentWriters(String alignmentInformation){
		if(alignmentInformation.equals("1-best")){
			return new BestAlignmentInformationWriter(corpus,alignments);
		}
		return new NoAlignmentInformationWriter();
	}

	public static BilingualCorpus getBilingualCorpus() {
		return corpus;
	}

	public static void buildAdaptationInfo() throws IOException{
		if(!adaptationFile.equals("")){
			adaptationInfo = new AdaptationInfo();
			adaptationInfo.loadFromFile(adaptationFile);
		}
	}

	public static AdaptationInfo getAdaptationInfo(){
		return adaptationInfo;
	}
	
	public static WeightedGraphPhraseReorderingCalculator getWeightedGraphReorderingCalculator(String scorers){
		String[] scorerArray = scorers.split("-");
		WeightedGraphPhraseReorderingCalculator calc = new WeightedGraphPhraseReorderingCalculator(alignments);
		for(String scorer : scorerArray){
			if(scorer.equals("distance_penalty")){
				calc.addGraphScorer(new LengthPenalty());
			}
			if(scorer.equals("weighted_alignments")){
				calc.addGraphScorer(new PhraseScoreScorer(alignments));
			}
			if(scorer.equals("source_segmentation")){
				
				calc.addGraphScorer(new SourceSegmentationScorer(getWordTrieSource()));
			}
			if(scorer.equals("discontinuous_penalty")){
				calc.addGraphScorer(new DiscontinuousPenalty());
			}
		}
		return calc;
	}
	
	public static WordTrie getWordTrieSource(){
		if(sourceVocab == null){
			sourceVocab = new WordTrie(wordTrieSourceSize);
			//sourceVocab.addPhrasesAllSentences(corpus._trainSourceSentences);
			sourceVocab.compactTrie();
		}
		return sourceVocab;
	}
	public static WordTrie getWordTrieTarget(){
		if(targetVocab == null){
			targetVocab = new WordTrie(wordTrieTargetSize);
			//targetVocab.addPhrasesAllSentences(corpus._trainForeignSentences);
			targetVocab.compactTrie();
		}
		return targetVocab;
	}
	
	public static void addPhrasesToWordTries(ControlPoints controlPoints){
		ControlPoints newControlPoints = new ControlPoints(alignments);
		newControlPoints.setPhraseScorer(controlPoints.getPhraseScorer());
		newControlPoints.setPhraseCandidateExtractor(controlPoints.getPhraseCandidateExtractor());
		newControlPoints.setPhraseAccepter(controlPoints.getPhraseAccepter());
		newControlPoints.setOutputWriter(new ExtractedPhrasePairWriter(){

			@Override
			public void close() {
			}

			@Override
			public void initialize() {
			}

			@Override
			public void write(ExtractedPhrasePair epp) {
				getWordTrieSource().insertPhraseRecursive(epp.getSourcePhrase());
				getWordTrieTarget().insertPhraseRecursive(epp.getTargetPhrase());
			}

			@Override
			public void writeHeaders() {
			}

			@Override
			public void writeInfo(String key, String value) {				
			}
			
		});
		newControlPoints.setAlignmentInfoWriter(new NoAlignmentInformationWriter());
		GeneralPhraseExtraction.generateMosesExtractionFile(corpus, BilingualCorpus.TRAIN_CORPUS, newControlPoints);
	}
	
	public static String getWordTrieSourceFile(){
		return output + ".wordtrie.source";
	}
	
	public static String getWordTrieTargetFile(){
		return output + ".wordtrie.target";
	}
}
