/**
 *	@author: Manimin Morga
 *	date created: February 8, 2006
 *	version: 2.0
 *	modified: July 24, 2006
 */

package text.ia;

import java.util.Hashtable;
import java.util.Vector;

import javax.swing.JOptionPane;

import text.obj.AlignedSentence;
import text.obj.TExtSentence;

public class InputAnalyzer implements InputAnalyzerInterface {

	public Vector analyzeTrainInput(String strSource, String strTarget,
		char cTargetLanguage, String strSDType) {

		//Replaces '[', ']', and '->' characters
		strSource = strSource.replace("[", "").replace("]", "").replace("->", "");
		strTarget = strTarget.replace("[", "").replace("]", "").replace("->", "");
		
		//Sentence Segmentation
		SentenceDetector sd = new SentenceDetector();			
		
		char cSrc = 'e';
		char cTarget = 'f';

		if(cTargetLanguage == 'e') {
			cSrc = 'e';
			cTarget = 'f';
		}else {
			cSrc = 'f';
			cTarget = 'e';
		}

		Vector vSourceSentences = sd.toSentences(strSource, strSDType, cSrc);
		Vector vTargetSentences = sd.toSentences(strTarget, strSDType, cTarget);
		
		//Tokenize
		TExtTokenizer tokenizer = new TExtTokenizer();
		
		Vector vSourceSentenceObjects = new Vector();
		Vector vTargetSentenceObjects = new Vector();
		
		Vector vAlignedSentenceObjects = new Vector();

		if(vSourceSentences.size() == vTargetSentences.size()) {
			String strSentence = null;
			
			for(int i = 0; i < vSourceSentences.size(); i++) {
				strSentence = (String)vSourceSentences.get(i);
				
				if(Character.isUpperCase(strSentence.charAt(0)))
					strSentence = Character.toLowerCase(strSentence.charAt(0)) + strSentence.substring(1, strSentence.length());
					
				vSourceSentenceObjects.add(new TExtSentence(tokenizer.tokenize(strSentence, cSrc), tokenizer.getUpdatedSentence()));
			}
			
			for(int i = 0; i < vTargetSentences.size(); i++) {
				strSentence = (String)vTargetSentences.get(i);
				
				if(Character.isUpperCase(strSentence.charAt(0)))
					strSentence = Character.toLowerCase(strSentence.charAt(0)) + strSentence.substring(1, strSentence.length());

				vTargetSentenceObjects.add(new TExtSentence(tokenizer.tokenize(strSentence, cTarget), tokenizer.getUpdatedSentence()));
			}
			
			TExtAligner aligner = new TExtAligner(cTargetLanguage);
			Hashtable hAligned = null;
			
			//Unit Alignment
			for(int i = 0; i < vSourceSentenceObjects.size(); i++) {
				
				hAligned = aligner.align(((TExtSentence) vSourceSentenceObjects.get(i)).getWords(), ((TExtSentence) vTargetSentenceObjects.get(i)).getWords(), true);
				
				TExtSentence senNewSource = new TExtSentence((Vector) aligner.getUdpatedTokens().get(0), (String) vSourceSentences.get(i));
				TExtSentence senNewTarget = new TExtSentence((Vector) aligner.getUdpatedTokens().get(1), (String) vTargetSentences.get(i));
				vAlignedSentenceObjects.add(new AlignedSentence(senNewSource, senNewTarget, (Hashtable)hAligned.clone()));
			}

			aligner = null;
			
			vAlignedSentenceObjects.trimToSize();

		}else {
			//Error
			JOptionPane.showMessageDialog(null, "The number of sentences in" 
				+ " the source language is not\nequal to the number of" 
				+ " sentences in the target language.", "Error!", JOptionPane.ERROR_MESSAGE);
			System.err.println("The number of sentences in the source language"
				+ " is not equal to the number of sentences in the target language.");
		}

		sd = null;
		tokenizer = null;
		
		return vAlignedSentenceObjects;
	}
	
	// counts tokens first; the sentence with the bigger number of tokens becomes the hashtable key
	public Vector analyzeTrainInputCountTokens(String strSource, String strTarget,
			char cTargetLanguage, String strSDType) {

			//Replaces '[', ']', and '->' characters
			strSource = strSource.replace("[", "").replace("]", "").replace("->", "");
			strTarget = strTarget.replace("[", "").replace("]", "").replace("->", "");
			
			//Sentence Segmentation
			SentenceDetector sd = new SentenceDetector();			
			
			char cSrc = 'e';
			char cTarget = 'f';

			if(cTargetLanguage == 'e') {
				cSrc = 'e';
				cTarget = 'f';
			}else {
				cSrc = 'f';
				cTarget = 'e';
			}

			Vector vSourceSentences = sd.toSentences(strSource, strSDType, cSrc);
			Vector vTargetSentences = sd.toSentences(strTarget, strSDType, cTarget);
			
			//Tokenize
			TExtTokenizer tokenizer = new TExtTokenizer();
			
			Vector vSourceSentenceObjects = new Vector();
			Vector vTargetSentenceObjects = new Vector();
			
			Vector vAlignedSentenceObjects = new Vector();

			if(vSourceSentences.size() == vTargetSentences.size()) {
				String strSentence = null;
				
				for(int i = 0; i < vSourceSentences.size(); i++) {
					strSentence = (String)vSourceSentences.get(i);
					
					if(Character.isUpperCase(strSentence.charAt(0)))
						strSentence = Character.toLowerCase(strSentence.charAt(0)) + strSentence.substring(1, strSentence.length());
						
					vSourceSentenceObjects.add(new TExtSentence(tokenizer.tokenize(strSentence, cSrc), tokenizer.getUpdatedSentence()));
				}
				
				for(int i = 0; i < vTargetSentences.size(); i++) {
					strSentence = (String)vTargetSentences.get(i);
					
					if(Character.isUpperCase(strSentence.charAt(0)))
						strSentence = Character.toLowerCase(strSentence.charAt(0)) + strSentence.substring(1, strSentence.length());

					vTargetSentenceObjects.add(new TExtSentence(tokenizer.tokenize(strSentence, cTarget), tokenizer.getUpdatedSentence()));
				}
				
				TExtAligner aligner = new TExtAligner(cTargetLanguage);
				Hashtable hAligned = null;
				
				//Unit Alignment
				for(int i = 0; i < vSourceSentenceObjects.size(); i++) {
					
					hAligned = aligner.align(((TExtSentence) vSourceSentenceObjects.get(i)).getWords(), ((TExtSentence) vTargetSentenceObjects.get(i)).getWords(), true);
					
					TExtSentence senNewSource = new TExtSentence((Vector) aligner.getUdpatedTokens().get(0), (String) vSourceSentences.get(i));
					TExtSentence senNewTarget = new TExtSentence((Vector) aligner.getUdpatedTokens().get(1), (String) vTargetSentences.get(i));
					vAlignedSentenceObjects.add(new AlignedSentence(senNewSource, senNewTarget, (Hashtable)hAligned.clone()));
				}

				aligner = null;
				
				vAlignedSentenceObjects.trimToSize();

			}else {
				//Error
				JOptionPane.showMessageDialog(null, "The number of sentences in" 
					+ " the source language is not\nequal to the number of" 
					+ " sentences in the target language.", "Error!", JOptionPane.ERROR_MESSAGE);
				System.err.println("The number of sentences in the source language"
					+ " is not equal to the number of sentences in the target language.");
			}

			sd = null;
			tokenizer = null;
			
			return vAlignedSentenceObjects;
		}

	public Vector analyzeTranslateInput(String strInput, char cTargetLanguage, String strSDType) {

		//Replaces '[', ']', and '->' characters
		strInput = strInput.replace("[", "").replace("]", "").replace("->", "");
	
		//Sentence Segmentation
		String strSentence = null;
		SentenceDetector sd = new SentenceDetector();
		Vector vSentences = sd.toSentences(strInput, strSDType, cTargetLanguage);

		//Tokenize
		TExtTokenizer tokenizer = new TExtTokenizer();

		Vector vSentenceObjects = new Vector();
		
		for(int i = 0; i < vSentences.size(); i++) {
			strSentence = (String)vSentences.get(i);
			
			if(Character.isUpperCase(strSentence.charAt(0)))
				strSentence = Character.toLowerCase(strSentence.charAt(0)) + strSentence.substring(1, strSentence.length());

			vSentenceObjects.add(new TExtSentence(tokenizer.tokenizeString(strSentence, cTargetLanguage), tokenizer.getUpdatedSentence()));
		}

		vSentenceObjects.trimToSize();

		return vSentenceObjects;
	}
}