package etxt2db.parsing;

import java.io.File;
import java.io.IOException;
import java.text.ParseException;
import java.util.ArrayList;
import java.util.List;

import org.antlr.runtime.ANTLRStringStream;
import org.antlr.runtime.CharStream;
import org.antlr.runtime.CommonTokenStream;
import org.antlr.runtime.RecognitionException;

import etxt2db.api.ClassificationEvaluator;
import etxt2db.api.ClassificationExecutor;
import etxt2db.api.ClassificationModel;
import etxt2db.api.ClassificationModelCreator;
import etxt2db.api.Document;
import etxt2db.creator.WorkflowLoader;
import etxt2db.features.EditableTokenFE;
import etxt2db.mappings.AnnotationToKeyMapper;
import etxt2db.section.FullDocumentSectionSplitter;
import etxt2db.section.LineSectionSplitter;
import etxt2db.section.SectionSplitter;
import etxt2db.section.UCompareSectionSplitter;
import etxt2db.serialization.ClassificationModelSerializer;
import etxt2db.tokenizer.RegexTokenizer;
import etxt2db.tokenizer.Tokenizer;
import etxt2db.tokenizer.UCompareTokenizer;
import etxt2db.ucompare.query.Etxt2DBGrammarLexer;
import etxt2db.ucompare.query.Etxt2DBGrammarParser;
import etxt2db.ucompare.query.QueryInformationObject;

/**
 * E-txt2db specification interpreter.
 *
 * @author Gonçalo Simões
 * @author Rui Lageira
 */
public class Parser {
	
	private String pathForModels = "./";
	private String beginTag = "<";
	private String endTag = ">";
	
	private SectionSplitter splitter = new FullDocumentSectionSplitter();
	private Tokenizer tok = new RegexTokenizer("\\s*(\\d+|\\w+|\\W)\\s*");
	
	private AnnotationToKeyMapper mapper = new AnnotationToKeyMapper();

	/**
	 * Constructs an E-txt2db specification interpreter.
	 */
	public Parser(){
	}
	
	/**
	 * Constructs an E-txt2db specification interpreter and sets the directory where 
	 * the classification models will be saved.
	 * <p>
	 * @param path	the path where classification models will be saved
	 */
	public Parser(String path){
		this.pathForModels=path;
	}
	
	/**
	 * Sets the string of the beginning of a tag.
	 * 
	 * @param beginTag the string to set
	 */
	public void setBeginTag(String str){
		beginTag = str;
	}
	
	/**
	 * Sets the string of the ending of a tag.
	 * 
	 * @param endTag the string to set
	 */
	public void setEndTag(String str){
		endTag = str;
	}

	/**
	 * Parses a specification query using ANTLR generated Lexer and Parser 
	 * which populates a list of {@link QueryInformationObject} containing 
	 * all the information from the query which, on his turn will be interpreted 
	 * according to the logic of E-txt2db.
	 * <p>
	 * @param text	the specification query to interpret
	 * @return	a list of data objects containing useful information from the parsing 
	 * of the query
	 */
	public List<ParsingOutputData>  interpretQuery(String text) {

		// The list of model paths and attributes of all models generated.
		List<ParsingOutputData> outputDataObjects = new ArrayList<ParsingOutputData>();
		
		System.out.print("Please, wait while the query is parsed\n");
		CharStream input = new ANTLRStringStream(text);
		Etxt2DBGrammarLexer lex = new Etxt2DBGrammarLexer(input);

		CommonTokenStream tokens = new CommonTokenStream(lex);
		Etxt2DBGrammarParser parser = new Etxt2DBGrammarParser(tokens);
		try {
			parser.root();
		} catch (RecognitionException e1) {
		}
		List<String> errors = parser.getErrors();
		if(errors.size()>0){
			System.err.print("Query parsing finished with the following errors:\n");
			String result = "";
			for(String error: errors){
				result = result + error + "\n";
			}
			System.err.print(result);
			return null;
		}else{
			System.out.print("Parsing was a success\n");
			for(QueryInformationObject info : parser.queries){
				if(info.getAction() == Etxt2DBGrammarParser.TRAINING){
				
					List<String> capabilities = new ArrayList<String>();
					
					if(info.getWorkflowFile() != null) {
						
						switch (info.getContext()) {
						
						case Etxt2DBGrammarParser.DOCUM:
							splitter = new FullDocumentSectionSplitter();
							break;
						case Etxt2DBGrammarParser.PARAG:
							splitter = new UCompareSectionSplitter("Paragraph");
							capabilities.add("Paragraph");
							break;
						case Etxt2DBGrammarParser.SENT:
							splitter = new UCompareSectionSplitter("Sentence");
							capabilities.add("Sentence");
							break;
						case Etxt2DBGrammarParser.PERLINE:
							splitter = new LineSectionSplitter();
							break;
							
						}
						
						if(info.getTokenizerName() != null) {
							tok = new UCompareTokenizer(info.getTokenizerName());
							capabilities.add(info.getTokenizerName());
						}
						
					}
					
					if(info.getTechniqueUsed() == Etxt2DBGrammarParser.EXPREG){
						System.out.print("Starting  the creation of a regular expression model\n");
						ClassificationModelCreator creator = new ClassificationModelCreator(splitter,tok);
						info.setTrainingResult(creator.createRegexClassificationModel(info.getRegularExpression(), info.getRegularExpressionType()));
					}else if(info.getTechniqueUsed() == Etxt2DBGrammarParser.DICTIO){
						System.out.print("Starting  the creation of a dictionary model\n");
						try {
							ClassificationModelCreator creator = new ClassificationModelCreator(splitter,tok);
							info.setTrainingResult(creator.createDictionaryClassificationModel(new File(info.getDictionaryPath()), info.isDictionaryCase()));
						} catch (IOException e) {
							System.err.print("Error opening the file " + info.getDictionaryPath());
							return null;
						} catch (ParseException e) {
							System.err.print("Error parsing the file " + info.getDictionaryPath());
							return null;
						}
					}else if(info.getTechniqueUsed() == Etxt2DBGrammarParser.MACLEA){
						System.out.print("Starting  the training of a machine learning model\n");
						System.out.print("It may take a few minutes... Please wait...\n");
						
						ClassificationModelCreator trainer = new ClassificationModelCreator(splitter, tok);
						trainer.setBeginTag(beginTag);
						trainer.setEndTag(endTag);
						
						if(info.getWorkflowFile() != null) {

							System.out.println("Running UCompare workflow " + info.getWorkflowFile());
							
							WorkflowLoader workflowLoader = new WorkflowLoader(splitter, tok, mapper);
							
							List<Document> workflowDocuments = null;
							
							// loads the documents, executes the workflow, fills documents with annotations.
							try {
								workflowDocuments = workflowLoader.load(new File(info.getMachineLearningTrainingPath()), info.getWorkflowFile());
							} catch (Exception e1) {
								// TODO Auto-generated catch block
								e1.printStackTrace();
								System.exit(-1);
							}
							
							try {
							
								if(info.getUCompareFeaturesList() != null) {
									for(String ucFeature : info.getUCompareFeaturesList()) {
										capabilities.add(ucFeature);
									}
								}
								
								if(info.getFeaturesList() != null) {
									EditableTokenFE featureExtractor = new EditableTokenFE(info.getFeaturesList(),info.getFeaturesWindow());
									info.setTrainingResult(trainer.trainMachineLearningModel(
											workflowDocuments,
											info.getMachineLearningTechnique(),
											info.getTypesList(),
											featureExtractor));
								} else {
									info.setTrainingResult(trainer.trainMachineLearningModel(
											workflowDocuments,
									        info.getMachineLearningTechnique(),
									        info.getTypesList()));
								}
								
							} catch (ParseException e) {
								System.err.print("Error parsing the file " + info.getMachineLearningTrainingPath());
								return null;
							} catch (IOException e) {
								System.err.print("Error opening the file " + info.getMachineLearningTrainingPath());
								return null;
							}
							
						} else {
							
							try {
							
								if(info.getFeaturesList() != null) {
									EditableTokenFE featureExtractor = new EditableTokenFE(info.getFeaturesList(),info.getFeaturesWindow());
									info.setTrainingResult(trainer.trainMachineLearningModel(
											new File(info.getMachineLearningTrainingPath()),
									        info.getMachineLearningTechnique(),
									        info.getTypesList(),
									        featureExtractor));
								} else {
									info.setTrainingResult(trainer.trainMachineLearningModel(new File(info.getMachineLearningTrainingPath()),
									        info.getMachineLearningTechnique(),
									        info.getTypesList()));
								}
								
							} catch (ParseException e) {
								System.err.print("Error parsing the file " + info.getMachineLearningTrainingPath());
								return null;
							} catch (IOException e) {
								System.err.print("Error opening the file " + info.getMachineLearningTrainingPath());
								return null;
							}
						}
					}
					
					String annotatorName = info.getAnnotatorName() + ".ann";
					String annotatorPath = pathForModels+annotatorName;
					
					new File(pathForModels).mkdir();
					
					List<String> inputs = new ArrayList<String>();
					
					for(String inputKey : capabilities) {
						inputs.add(mapper.getPath(inputKey));
					}
					
					outputDataObjects.add(new ParsingOutputData(info.getAnnotatorName(), info.getTypesList(), inputs));
					
					ClassificationModelSerializer serial = new ClassificationModelSerializer();
					serial.serializeClassificationModel(info.getTrainingResult(), annotatorPath);
					System.out.print("Training finished with success\n");
				}else if(info.getAction() == Etxt2DBGrammarParser.CLASSIFYING){
					System.out.print("Starting the classification of the corpus\n");
					ClassificationExecutor exec = new ClassificationExecutor();
					exec.setBeginTag(beginTag);
					exec.setEndTag(endTag);
					List<String> list = info.getTypesList();
	
					String annotatorName = info.getAnnotatorName() + ".ann";
					String annotatorPath = pathForModels+annotatorName;
					ClassificationModelSerializer serial = new ClassificationModelSerializer();
					ClassificationModel annot = serial.deserializeClassificationModel(annotatorPath);
	
					File testingFile = new File(info.getTestingFile());
	
					try {
						System.out.print(exec.getClassifiedString(testingFile, 
								annot, 
								list));
					} catch (IOException e) {
						System.err.print("Error opening the file " + info.getTestingFile());
						return null;
					} catch (ParseException e) {
						System.err.print("Error parsing the file " + info.getTestingFile());
						return null;
					}
				}else if(info.getAction() == Etxt2DBGrammarParser.EVALUATING){
					System.out.print("Starting the classification of the corpus\n");
					ClassificationEvaluator eval = new ClassificationEvaluator();
					eval.setBeginTag(beginTag);
					eval.setEndTag(endTag);
					
					List<String> list = info.getTypesList();
	
					String annotatorName = info.getAnnotatorName() + ".ann";
					String annotatorPath = pathForModels+annotatorName;
					ClassificationModelSerializer serial = new ClassificationModelSerializer();
					ClassificationModel annot = serial.deserializeClassificationModel(annotatorPath);
	
					File testingFile = new File(info.getTestingFile());
	
					try {
						float recall= eval.getRecall(testingFile, 
								annot, 
								list);
						float precision= eval.getPrecision(testingFile, 
								annot, 
								list);
						float fmeasure= eval.getFmeasure(testingFile, 
								annot, 
								list);
						System.out.print("Recall: " + recall + "\nPrecision: " + precision + "\nF-measure: " + fmeasure);
					} catch (IOException e) {
						System.err.print("Error opening the file " + info.getTestingFile());
						return null;
					} catch (ParseException e) {
						System.err.print("Error parsing the file " + info.getTestingFile());
						return null;
					}
				}else if(info.getAction() == Etxt2DBGrammarParser.SETBEGINTAG){
					setBeginTag(info.getBeginTag());
					System.out.print("Finished setting begin tag to " + info.getBeginTag() + "\n");
				}else if(info.getAction() == Etxt2DBGrammarParser.SETENDTAG){
					setEndTag(info.getEndTag());
					System.out.print("Finished setting end tag to " + info.getEndTag() + "\n");
				}else{
					System.err.print("Parsing error!");
					return null;
				}
			}
		}
		return outputDataObjects;
	}

	/**
	 * Returns the annotation types mapper.
	 */
	public AnnotationToKeyMapper getMapper() {
		return mapper;
	}

	/**
	 * Sets the annotation types mapper.
	 * 
	 * @param mapper the mapper to set
	 */
	public void setMapper(AnnotationToKeyMapper mapper) {
		this.mapper = mapper;
	}

}
