package etxt2db.gui;
import java.awt.TextArea;
import java.io.File;
import java.io.IOException;
import java.text.ParseException;
import java.util.List;

import org.antlr.runtime.ANTLRStringStream;
import org.antlr.runtime.CharStream;
import org.antlr.runtime.CommonTokenStream;
import org.antlr.runtime.RecognitionException;

import etxt2db.annotators.OverlapedTagClassificationModel;
import etxt2db.api.ClassificationEvaluator;
import etxt2db.api.ClassificationExecutor;
import etxt2db.api.ClassificationModel;
import etxt2db.api.ClassificationModelCreator;
import etxt2db.features.EditableTokenFE;
import etxt2db.query.Etxt2DBGrammarLexer;
import etxt2db.query.Etxt2DBGrammarParser;
import etxt2db.query.QueryInformationObject;
import etxt2db.serialization.ClassificationModelSerializer;

public class Etxt2DBGUIConnector {
	
	TextArea GUITextArea;
	
	String beginTag = "<";
	String endTag = ">";
	boolean docPerFile = true;

	public Etxt2DBGUIConnector(TextArea GUITextArea){
		this.GUITextArea = GUITextArea;
	}
	
	public void setBeginTag(String str){
		beginTag = str;
	}
	
	public void setEndTag(String str){
		endTag = str;
	}
	
	public void setDocPerFile(boolean bool){
		docPerFile = bool;
	}

	public void interpretQuery(String text) {
		GUITextArea.setText("Please, wait while the query is parsed\n");
		CharStream input = new ANTLRStringStream(text);
		Etxt2DBGrammarLexer lex = new Etxt2DBGrammarLexer(input);

		CommonTokenStream tokens = new CommonTokenStream(lex);
		Etxt2DBGrammarParser parser = new Etxt2DBGrammarParser(tokens);
		try {
			parser.root();
		} catch (RecognitionException e1) {
		}
		List<String> errors = parser.getErrors();
		if(errors.size()>0){
			GUITextArea.append("Parsing finished with the following errors:\n");
			String result = "";
			for(String error: errors){
				result = result + error + "\n";
			}
			GUITextArea.append(result);
			return;
		}else{
			GUITextArea.append("Parsing was a success\n");
			for(QueryInformationObject info : parser.queries){
				if(info.getAction() == Etxt2DBGrammarParser.TRAINING){
					if(info.getTechniqueUsed() == Etxt2DBGrammarParser.EXPREG){
						GUITextArea.append("Starting  the creation of a regular expression model\n");
						ClassificationModelCreator creator = new ClassificationModelCreator();
						info.setTrainingResult(creator.createRegexClassificationModel(info.getRegularExpression(), info.getRegularExpressionType()));
					}else if(info.getTechniqueUsed() == Etxt2DBGrammarParser.DICTIO){
						GUITextArea.append("Starting  the creation of a dictionary model\n");
						try {
							ClassificationModelCreator creator = new ClassificationModelCreator();
							info.setTrainingResult(creator.createDictionaryClassificationModel(new File(info.getDictionaryPath()), info.isDictionaryCase()));
						} catch (IOException e) {
							GUITextArea.append("Error opening the file " + info.getDictionaryPath());
							return;
						} catch (ParseException e) {
							GUITextArea.append("Error parsing the file " + info.getDictionaryPath());
							return;
						}
					}else if(info.getTechniqueUsed() == Etxt2DBGrammarParser.MACLEA){
						GUITextArea.append("Starting  the training of a machine learning model\n");
						GUITextArea.append("It may take a few minutes... Please wait...\n");
						if(info.getFeaturesList()!=null){
							try {
								EditableTokenFE featureExtractor = new EditableTokenFE(info.getFeaturesList(),info.getFeaturesWindow());
								ClassificationModelCreator trainer = new ClassificationModelCreator();
								trainer.setBeginTag(beginTag);
								trainer.setEndTag(endTag);
								trainer.setDocPerFile(docPerFile);
								try {
									info.setTrainingResult(trainer.trainMachineLearningModel(new File(info.getMachineLearningTrainingPath()),
									        info.getMachineLearningTechnique(),
									        info.getTypesList(),
									        featureExtractor));
								} catch (ParseException e) {
									GUITextArea.append("Error parsing the file " + info.getMachineLearningTrainingPath());
									return;
								} catch (IOException e) {
									GUITextArea.append("Error opening the file " + info.getMachineLearningTrainingPath());
									return;
								}
							} catch (IOException e) {
								GUITextArea.append("Error generating the Feature Extraction");
								return;
							}
						}else{
							ClassificationModelCreator trainer = new ClassificationModelCreator();
							trainer.setBeginTag(beginTag);
							trainer.setEndTag(endTag);
							trainer.setDocPerFile(docPerFile);
							try {
								info.setTrainingResult(trainer.trainMachineLearningModel(new File(info.getMachineLearningTrainingPath()),
								        info.getMachineLearningTechnique(),
								        info.getTypesList()));
							} catch (IOException e) {
								GUITextArea.append("Error opening the file " + info.getMachineLearningTrainingPath());
								return;
							} catch (ParseException e) {
								GUITextArea.append("Error parsing the file " + info.getMachineLearningTrainingPath());
								return;
							}
						}
					}else if(info.getTechniqueUsed() == Etxt2DBGrammarParser.CUNION){
						GUITextArea.append("Starting  the creation of a union model\n");
						OverlapedTagClassificationModel model = new OverlapedTagClassificationModel();
						for(String annotator : info.getAnnotators()){
							String annotatorName = annotator + ".ann";
							String annotatorPath = "./annotators/"+annotatorName;
							ClassificationModelSerializer serial = new ClassificationModelSerializer();
							ClassificationModel annot = serial.deserializeClassificationModel(annotatorPath);
							model.addAnnotator(annot);
						}
						info.setTrainingResult(model);
					}
					String annotatorName = info.getAnnotatorName() + ".ann";
					String annotatorPath = "./annotators/"+annotatorName;
					ClassificationModelSerializer serial = new ClassificationModelSerializer();
					serial.serializeClassificationModel(info.getTrainingResult(), annotatorPath);
					GUITextArea.append("Training finished with success");
				}else if(info.getAction() == Etxt2DBGrammarParser.CLASSIFYING){
					GUITextArea.append("Starting the classification of the corpus\n");
					ClassificationExecutor exec = new ClassificationExecutor();
					exec.setBeginTag(beginTag);
					exec.setEndTag(endTag);
					exec.setDocPerFile(docPerFile);
					List<String> list = info.getTypesList();
	
					String annotatorName = info.getAnnotatorName() + ".ann";
					String annotatorPath = "./annotators/"+annotatorName;
					ClassificationModelSerializer serial = new ClassificationModelSerializer();
					ClassificationModel annot = serial.deserializeClassificationModel(annotatorPath);
	
					File testingFile = new File(info.getTestingFile());
	
					try {
						GUITextArea.append(exec.getClassifiedString(testingFile, 
								annot, 
								list));
					} catch (IOException e) {
						GUITextArea.append("Error opening the file " + info.getTestingFile());
						return;
					} catch (ParseException e) {
						GUITextArea.append("Error parsing the file " + info.getTestingFile());
						return;
					}
				}else if(info.getAction() == Etxt2DBGrammarParser.EVALUATING){
					GUITextArea.append("Starting the classification of the corpus\n");
					ClassificationEvaluator eval = new ClassificationEvaluator();
					eval.setBeginTag(beginTag);
					eval.setEndTag(endTag);
					eval.setDocPerFile(docPerFile);
					
					List<String> list = info.getTypesList();
	
					String annotatorName = info.getAnnotatorName() + ".ann";
					String annotatorPath = "./annotators/"+annotatorName;
					ClassificationModelSerializer serial = new ClassificationModelSerializer();
					ClassificationModel annot = serial.deserializeClassificationModel(annotatorPath);
	
					File testingFile = new File(info.getTestingFile());
	
					try {
						float recall= eval.getRecall(testingFile, 
								annot, 
								list);
						float precision= eval.getPrecision(testingFile, 
								annot, 
								list);
						float fmeasure= eval.getFmeasure(testingFile, 
								annot, 
								list);
						GUITextArea.append("Recal: " + recall + "\nPrecision: " + precision + "\nF-measure: " + fmeasure);
					} catch (IOException e) {
						GUITextArea.append("Error opening the file " + info.getTestingFile());
						return;
					} catch (ParseException e) {
						GUITextArea.append("Error parsing the file " + info.getTestingFile());
						return;
					}
				}else if(info.getAction() == Etxt2DBGrammarParser.SETBEGINTAG){
					setBeginTag(info.getBeginTag());
					GUITextArea.append("Finished setting begin tag to " + info.getBeginTag() + "\n");
				}else if(info.getAction() == Etxt2DBGrammarParser.SETENDTAG){
					setEndTag(info.getEndTag());
					GUITextArea.append("Finished setting end tag to " + info.getEndTag() + "\n");
				}else if(info.getAction() == Etxt2DBGrammarParser.SETDOCPERFILE){
					setDocPerFile(info.isDocPerFile());
					GUITextArea.append("Finished setting document style to " + info.isDocPerFile() + "\n");
				}else{
					GUITextArea.append("Parsing error!");
					return;
				}
			}
		}
	}

}
