import weka.classifiers.rules.DecisionTable;
import xmlpreprocessed.*;

import java.io.BufferedWriter;
import java.io.File;
import java.io.FileWriter;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;

/**
* Created by IntelliJ IDEA.
* User: oye
* Date: 18.11.11
* Time: 13:40
* To change this template use File | Settings | File Templates.
*/
public class RTEPhase3 {

    XMLParser parser = new XMLParser();
    EntailmentCorpus entailmentCorpus = parser.getEntailementCorpus("rte2_dev_data/RTE2_dev.preprocessed.xml");
    List<Pair> pairs = entailmentCorpus.getProperties();

    public double[] featureExtraction(Pair pair){
        List<Sentence> sentencesInT = pair.getText().getProperties();
        List<Node> wordsInT = new ArrayList<Node>();
        for(Sentence sentence : sentencesInT){
            List<Node> tempNodes = sentence.getProperties();
            for (Node tempNode : tempNodes) {
                if(tempNode!=null && tempNode.getWord()!=null && tempNode.getLemma()!=null && !tempNode.getWord().equals(".") && !tempNode.getWord().equals(","))
                    wordsInT.add(tempNode);
            }
        }

        List<Sentence> sentencesInH = pair.getHypothesis().getProperties();
        List<Node> wordsInH = new ArrayList<Node>();
        for(Sentence sentence : sentencesInH){
            List<Node> tempNodes = sentence.getProperties();
            for (Node tempNode : tempNodes) {
                if(tempNode!=null && tempNode.getWord()!=null && tempNode.getLemma()!=null && !tempNode.getWord().trim().equals(".") && !tempNode.getWord().trim().equals(","))
                    wordsInH.add(tempNode);
            }
        }

        double commonWord=0, commonLemma=0, commonLemmaPOS=0;
        for (Node hNode : wordsInH) {
            //System.out.println("H-Word: "+hNode.getWord());
            boolean foundWord=false, foundLemma=false, foundLemmaPOS=false;
            for (Node tNode : wordsInT) {
                if(hNode.getLemma().equals(tNode.getLemma()) && !foundLemma) {
                    //System.out.println("Lemma Match: "+hNode.getLemma()+" : "+tNode.getLemma());
                    commonLemma++;
                    foundLemma=true;
                }

                if(hNode.getWord().trim().toLowerCase().equals(tNode.getWord().trim().toLowerCase())){
                    if(!foundWord){
                        //System.out.println("Word Match: "+hNode.getWord()+" : "+tNode.getWord());
                        commonWord++;
                        foundWord=true;
                    }

                    if(!hNode.getPosTag().equals(tNode.getPosTag()))
                        continue;
                }

                if(hNode.getLemma().equals(tNode.getLemma()) && !foundLemmaPOS){
                    //System.out.println("Lemma & POS Match: "+hNode.getLemma()+" : "+tNode.getLemma()+" {"+hNode.getPosTag()+" : "+tNode.getPosTag()+"}");
                    commonLemmaPOS++;
                    foundLemmaPOS=true;
                }
            }
        }

        double[] bigramCalc = calcCommonBiGrams(wordsInT, wordsInH);

        double[] features = new double[4];
        features[0] = commonWord/(double)wordsInH.size();
        features[1] = commonLemma/(double)wordsInH.size();
        features[2] = commonLemmaPOS/(double)wordsInH.size();
        features[3] = bigramCalc[0]/bigramCalc[1];

        //System.out.println("Common words: "+commonWord+" Word match: "+commonWord/(double)wordsInH.size());
        //System.out.println("Common lemmas: "+commonLemma+" Lemma match: "+commonLemma/(double)wordsInH.size());
        //System.out.println("Common lemmas + POS: "+commonLemmaPOS+" Lemma & POS match: "+commonLemmaPOS/(double)wordsInH.size());
        //System.out.println("Common bigrams: "+bigramCalc[0]+" Bigram match: "+bigramCalc[0]/bigramCalc[1]);
        return features;
    }

    private double[] calcCommonBiGrams(List<Node> tNodes, List<Node> hNodes){
        double sum=0, numBigrams=0;
        for (int i=0; (i+2)<=hNodes.size(); i++) {
            List<Node> hBiGram = hNodes.subList(i, i+2);
            for (int j=0; (j+2)<=tNodes.size(); j++) {
                List<Node> tBiGram = tNodes.subList(j, j+2);
                if(checkNGramEquality(hBiGram, tBiGram)){
                    sum++;
                    break;
                }
            }
            numBigrams++;
        }
        return new double[]{sum, numBigrams};
    }

    private boolean checkNGramEquality(List<Node> gram1, List<Node> gram2){
        for (Node g1 : gram1) {
            Node g2 = gram2.get(gram1.indexOf(g1));
            if(!g1.getWord().trim().toLowerCase().equals(g2.getWord().trim().toLowerCase()))
                return false;
        }
        return true;
    }

    public void buildFeatureARFF(String fileName) throws IOException {
        BufferedWriter writer = new BufferedWriter(new FileWriter(new File(fileName)));

        writer.write("% 1. Title: RTEPhase3\n");
        writer.write("%\n");
        writer.write("% 2. Sources:\n");
        writer.write("%      (a) Creator: J. K. Øye\n");
        writer.write("%      (b) Date: November, 2011\n");
        writer.write("%\n");
        writer.write("@RELATION RTEPhase3\n");
        writer.write("\n");
        writer.write("@ATTRIBUTE id  NUMERIC\n");
        writer.write("@ATTRIBUTE wordMatch  NUMERIC\n");
        writer.write("@ATTRIBUTE lemmaMatch  NUMERIC\n");
        writer.write("@ATTRIBUTE lemmaPOSMatch  NUMERIC\n");
        writer.write("@ATTRIBUTE bigramMatch  NUMERIC\n");
        writer.write("@ATTRIBUTE entailment  string\n");
        writer.write("\n");
        writer.write("@DATA\n");

        for(Pair pair : pairs){
            //System.out.println("PairID: "+pair.getId()+" Entailment: "+pair.getEntailment());
            writer.write(pair.getId()+",");
            double[] features = featureExtraction(pair);
            for (double feature : features) {
                writer.write(feature+",");
            }
            writer.write(pair.getEntailment()+"\n");
        }
        writer.close();
    }

    public static void main(String[] args) throws IOException{
        RTEPhase3 rtePhase3 = new RTEPhase3();
        rtePhase3.buildFeatureARFF("features.arff");

        Classifier classifier = new Classifier();
        try {
            classifier.setClassifier(new DecisionTable());
            classifier.validation("features.arff");
            System.out.println("Machine learning: "+eval_rte.evaluateLemmaMatching(rtePhase3.pairs, "bayesClassifier.txt")+"%");
            classifier.crossValidation("features.arff");
        } catch (Exception e) {
            e.printStackTrace();  //To change body of catch statement use File | Settings | File Templates.
        }
    }
}
