grammar Etxt2DBGrammar;

@lexer::header 
{
package etxt2db.ucompare.query;
}

@header
{
package etxt2db.ucompare.query;

import etxt2db.api.ClassificationModel;
import etxt2db.api.ClassificationModelCreator;
import etxt2db.features.*;
import etxt2db.serialization.*;


import java.io.IOException;
import java.util.LinkedList;
}

@members {
    public static int TRAINING = 0;
    public static int CLASSIFYING = 1;
    public static int EVALUATING = 2;
    public static int SETBEGINTAG = 3;
    public static int SETENDTAG = 4;
    public static int SETDOCPERFILE = 5;
    public static int NOTHING = 6;
    public static final int EXPREG = 0;
    public static final int DICTIO = 1;
    public static final int MACLEA = 2;
    public static final int CUNION = 3;
    public static final int NOTECH = 4;
    
    /* RUI */
    public static final int DOCUM = 0;
    public static final int PARAG = 1;
    public static final int SENT = 2;
    public static final int PERLINE = 3;
    
    private List<String> errors = new ArrayList<String>();
    public List<QueryInformationObject> queries = new LinkedList<QueryInformationObject>();
    private QueryInformationObject current = new QueryInformationObject();
    
    public void displayRecognitionError(String[] tokenNames,
                                        RecognitionException e) {
        String hdr = getErrorHeader(e);
        String msg = getErrorMessage(e, tokenNames);
        errors.add(hdr + " " + msg);

    }
    
    public String getErrorHeader(RecognitionException e) {
        int numLine;
        if(e.token.equals(Token.EOF_TOKEN)){
             return "Error in the end of the file";
        }else{
             numLine = e.token.getLine();
        }
        return "Error in line "+numLine+":"+e.token.getCharPositionInLine() + " : ";
    }

    
    public String getTokenErrorDisplay(Token t) {
      return super.getTokenErrorDisplay(t);
    }
    
    public List<String> getErrors() {
        return errors;
    }


}


root  : (train {current.setAction(TRAINING);queries.add(current); current = new QueryInformationObject();} 
  | classify {current.setAction(CLASSIFYING);queries.add(current); current = new QueryInformationObject();} 
  | evaluate {current.setAction(EVALUATING);queries.add(current); current = new QueryInformationObject();}
  | setBeginTag {current.setAction(SETBEGINTAG);queries.add(current); current = new QueryInformationObject();} 
  | setEndTag {current.setAction(SETENDTAG);queries.add(current); current = new QueryInformationObject();}
  | setDocPerFile {current.setAction(SETDOCPERFILE);queries.add(current); current = new QueryInformationObject();})+ ;

// RUI
train   : trainHeader workflow? context? tokenization? technique;


trainHeader   
  : CREATE CLASSIFICATION MODEL AS GENERIC_NAME {current.setAnnotatorName($GENERIC_NAME.text);};

workflow
  : WITH STRING {current.setWorkflowFile($STRING.text.substring(1,$STRING.text.length()-1));};

// RUI
context
  : CONTEXT context_name;
context_name
  : DOCUMENT {current.setContext(DOCUM);}
  | PARAGRAPH {current.setContext(PARAG);}
  | SENTENCE {current.setContext(SENT);}
  | LINE {current.setContext(PERLINE);};

tokenization
  : TOKENIZED WITH UCOMPARE GENERIC_NAME {current.setTokenizerName($GENERIC_NAME.text);};

technique
  : (USING REGEX STRING TO FIND GENERIC_NAME) { current.setTechniqueUsed(EXPREG); current.setRegularExpression($STRING.text.substring(1,$STRING.text.length()-1)); current.setRegularExpressionType($GENERIC_NAME.text); }
  | (USING caseSelection? DICTIONARY STRING)  { current.setTechniqueUsed(DICTIO); current.setDictionaryPath($STRING.text.substring(1,$STRING.text.length()-1));}
  | (USING HMM TRAINED WITH STRING TO FIND listClasses)
        { current.setTechniqueUsed(MACLEA);
          current.setMachineLearningTrainingPath($STRING.text.substring(1,$STRING.text.length()-1));
          current.setMachineLearningTechnique(ClassificationModelCreator.MLTechnique.HMM);}
  
  
  | (USING mlTech TRAINED WITH STRING TO FIND listClasses features?)
        { current.setTechniqueUsed(MACLEA);
          current.setMachineLearningTrainingPath($STRING.text.substring(1,$STRING.text.length()-1));}
  | (USING CLASSIFICATION MODEL UNION listClassifiers) { current.setTechniqueUsed(CUNION);};

listClassifiers
  : GENERIC_NAME        {current.addAnnotator($GENERIC_NAME.text);}
  | GENERIC_NAME COMMA listClassifiers  {current.addAnnotator($GENERIC_NAME.text);};

caseSelection 
  : CASE ((SENSITIVE) {current.setDictionaryCase(true);} |(INSENSITIVE) {current.setDictionaryCase(false);});

listClasses
  : listNames
  | ('*') {current.setTypesList(null);};
  
listNames 
  : GENERIC_NAME      {current.addType($GENERIC_NAME.text);}
  | GENERIC_NAME COMMA listNames    {current.addType($GENERIC_NAME.text);};

mlTech  : MEMM        {current.setMachineLearningTechnique(ClassificationModelCreator.MLTechnique.MEMM);}
  | CRF       {current.setMachineLearningTechnique(ClassificationModelCreator.MLTechnique.CRF);}
  | SVM       {current.setMachineLearningTechnique(ClassificationModelCreator.MLTechnique.SVM);};

features: CAPTURING FEATURES listFeatures (IN WINDOW WITH RADIUS NUMBER)?  {if($NUMBER!=null){
                      current.setFeaturesWindow(Integer.parseInt($NUMBER.text));
                      }};

listFeatures
  : feature             
  | feature COMMA listFeatures;
  
feature : CLASSIFICATION MODEL GENERIC_NAME
      {
        if(current.getFeaturesList() == null){
          current.setFeaturesList(new ArrayList<CharacterFeatureClassifier>());
        }
        String annotatorName = $GENERIC_NAME.text + ".ann";
        String annotatorPath = "./annotators/"+annotatorName;
        ClassificationModelSerializer serial = new ClassificationModelSerializer();
        ClassificationModel annot = serial.deserializeClassificationModel(annotatorPath);
        current.addFeature(new ClassificationModelFeatureClassifier(annot.getClassifier(),$GENERIC_NAME.text));
      }
  | SUBSTRING FROM n1=NUMBER TO n2=NUMBER     
      {
        if(current.getFeaturesList() == null){
          current.setFeaturesList(new ArrayList<CharacterFeatureClassifier>());
        }
        current.addFeature(new SubstringFeatureClassifier(Integer.parseInt($n1.text),
                                                        Integer.parseInt($n2.text)));
      }
  | DICTIONARY STRING     
      {
        if(current.getFeaturesList() == null){
          current.setFeaturesList(new ArrayList<CharacterFeatureClassifier>());
        }
        try {
          current.addFeature(new DictionaryFeatureClassifier($STRING.text.substring(1,$STRING.text.length()-1)));
        }catch(IOException e){
        
        }
      }
  | MATCH STRING        
      {
        if(current.getFeaturesList() == null){
          current.setFeaturesList(new ArrayList<CharacterFeatureClassifier>());
        }
        current.addFeature(new RegexFeatureClassifier($STRING.text.substring(1,$STRING.text.length()-1)));
      }
  | CASE SENSITIVE VALUE          
      {
        if(current.getFeaturesList() == null){
          current.setFeaturesList(new ArrayList<CharacterFeatureClassifier>());
        }
        current.addFeature(new ValueCaseSensitiveFeatureClassifier());
      }
  | CASE INSENSITIVE VALUE          
      {
        if(current.getFeaturesList() == null){
          current.setFeaturesList(new ArrayList<CharacterFeatureClassifier>());
        }
        current.addFeature(new ValueCaseInsensitiveFeatureClassifier());
      }
  | SIZE              
      {
        if(current.getFeaturesList() == null){
          current.setFeaturesList(new ArrayList<CharacterFeatureClassifier>());
        }
        current.addFeature(new SizeFeatureClassifier());
      }
  | CHARACTER PATTERN         
      {
        if(current.getFeaturesList() == null){
          current.setFeaturesList(new ArrayList<CharacterFeatureClassifier>());
        }
        current.addFeature(new PatternFeatureClassifier());
      }
  | CHARACTER TYPE            
      {
        if(current.getFeaturesList() == null){
          current.setFeaturesList(new ArrayList<CharacterFeatureClassifier>());
        }
        current.addFeature(new CharacterTypeFeatureClassifier());
      }
  | UCOMPARE GENERIC_NAME
      {
        if(current.getUCompareFeaturesList() == null){
          current.setUCompareFeaturesList(new ArrayList<String>());
        }
        current.addUCompareFeature(new String($GENERIC_NAME.text));
        if(current.getFeaturesList() == null){
          current.setFeaturesList(new ArrayList<CharacterFeatureClassifier>());
        }
        current.addFeature(new UCompareAnnotationFeatureClassifier(new String($GENERIC_NAME.text)));
      };
  
classify: CLASSIFY listClasses
    FROM STRING
    USING GENERIC_NAME  {current.setAnnotatorName($GENERIC_NAME.text); current.setTestingFile($STRING.text.substring(1,$STRING.text.length()-1));};

evaluate: EVALUATE CLASSIFICATION OF listClasses
    FROM STRING
    USING GENERIC_NAME {current.setAnnotatorName($GENERIC_NAME.text); current.setTestingFile($STRING.text.substring(1,$STRING.text.length()-1));};
    
setBeginTag
  : SET BEGIN TAG STRING {current.setBeginTag($STRING.text.substring(1,$STRING.text.length()-1));};
  
setEndTag
  : SET END TAG STRING {current.setEndTag($STRING.text.substring(1,$STRING.text.length()-1));};
  
setDocPerFile
  : SET DOCUMENT STYLE documentStyle;
  
documentStyle
  : ONE DOCUMENT PER LINE {current.setDocPerFile(false);}
  | ONE DOCUMENT PER FILE {current.setDocPerFile(true);};
  
  
//Lexicon

// RUI
CONTEXT
  : C O N T E X T;
PARAGRAPH
  : P A R A G R A P H;
SENTENCE
  : S E N T E N C E;
TOKENIZED
  : T O K E N I Z E D;
UCOMPARE
  : U C O M P A R E;
CREATE  : C R E A T E;
CLASSIFY
  : C L A S S I F Y;
CLASSES : C L A S S E S;
EVALUATE: E V A L U A T E;
FROM
  : F R O M;
UNION
  : U N I O N;
CORPUS  : C O R P U S;
SET : S E T;
OF  : O F;
BEGIN : B E G I N;
END : E N D;
DOCUMENT
  : D O C U M E N T;
ONE : O N E;
PER : P E R;
LINE  : L I N E;
FILE  : F I L E;
STYLE : S T Y L E;
TAG : T A G;
CLASSIFICATION 
  : C L A S S I F I C A T I O N;
MODEL   : M O D E L;
USING   : U S I N G;
REGEX : R E G E X;
DICTIONARY  
  : D I C T I O N A R Y;
MATCH : M A T C H;
HMM : H M M;
MEMM  : M E M M;
CRF : C R F;
SVM : S V M;
TO  : T O;
AS  : A S;
FIND  : F I N D;
TRAINED : T R A I N E D;
WITH  : W I T H;
CASE  : C A S E;
VALUE : V A L U E;
CAPTURING
  : C A P T U R I N G;
FEATURES: F E A T U R E S;
SENSITIVE
  : S E N S I T I V E;
INSENSITIVE
  : I N S E N S I T I V E;
SUBSTRING
  : S U B S T R I N G;
SIZE  : S I Z E;
CHARACTER
  : C H A R A C T E R;
PATTERN : P A T T E R N;
TYPE  : T Y P E;
IN  : I N;
WINDOW  : W I N D O W;
RADIUS  : R A D I U S;
COMMA : ',';

PLUS  : '+';
MINUS : '-';

STRING  : ( '"' (~'"')* '"'
        |         '\'' (~'\'')* '\'');
         
GENERIC_NAME 
  : ( LETTER | '_' | ':'|'.'|'('|')'|'-'|'*'|'$') (NAMECHAR)* ;

NUMBER  :   (PLUS|MINUS)? DIGIT+ ;



//Characters

fragment NAMECHAR
    : LETTER | DIGIT | '_' | ':'|'.'|'('|')'|'-'|'*'|'$'
    ;

fragment LETTER
    : A|B|C|D|E|F|G|H|I|J|K|L|M|N|O|P|Q|R|S|T|U|V|W|X|Y|Z;

fragment A  : 'a'|'A';
fragment B  : 'b'|'B';
fragment C  : 'c'|'C';
fragment D  : 'd'|'D';
fragment E  : 'e'|'E';
fragment F  : 'f'|'F';
fragment G  : 'g'|'G';
fragment H  : 'h'|'H';
fragment I  : 'i'|'I';
fragment J  : 'j'|'J';
fragment K  : 'k'|'K';
fragment L  : 'l'|'L';
fragment M  : 'm'|'M';
fragment N  : 'n'|'N';
fragment O  : 'o'|'O';
fragment P  : 'p'|'P';
fragment Q  : 'q'|'Q';
fragment R  : 'r'|'R';
fragment S  : 's'|'S';
fragment T  : 't'|'T';
fragment U  : 'u'|'U';
fragment V  : 'v'|'V';
fragment W  : 'w'|'W';
fragment X  : 'x'|'X';
fragment Y  : 'y'|'Y';
fragment Z  : 'z'|'Z';

fragment DIGIT
    :    '0'..'9'
    ;
    
// whitespace
WS  : ( ' '
    | '\t'
    | ('\r' '\n')
    | ('\n')     
    | ('\r')     
    )
    {$channel = HIDDEN;}  //ignore this token
  ;