package com.cs224u;

import java.io.BufferedReader;
import java.io.FileReader;
import java.io.Serializable;
import java.util.List;
import java.util.regex.Matcher;
import java.util.regex.Pattern;

import weka.classifiers.Classifier;
import weka.classifiers.functions.SMO;
import weka.core.Attribute;
import weka.core.FastVector;
import weka.core.Instance;
import weka.core.Instances;
import weka.filters.Filter;
import weka.filters.unsupervised.attribute.StringToWordVector;

/**
 * This is based off of the Weka sample code, which can be found here:
 * http://www.cs.waikato.ac.nz/~ml/weka/example_code/2ed/MessageClassifier.java 
 * @author alecmgo@stanford.edu
 *
 */
public class Wsd implements Serializable {

  private static final long serialVersionUID = 6623646459046924197L;
    
  private static int MAX_NUM_SENSES = 43; //There are many senses for turn.v
  private static int NUM_ATTRIBUTES = 5; //Increase this whenever you add a new attribute
  
  //Training data
  private Instances m_Data = null;
  
  //Whether the model is up to date
  private boolean m_UpToDate;

  private StringToWordVector m_Filter = new StringToWordVector();

  private final Classifier m_Classifier;
  
  private static boolean USE_SURROUNDING_WORDS = true;
  private static boolean USE_BIGRAMS = false;
  private static boolean USE_SURROUNDING_POS = true;
    
  public Wsd() throws Exception {
    String nameOfDataset = "WSD";
    
    //Step 1: Create a classifier
    
    //Decision Tree
    //m_Classifier = new J48();
    
    //Instance based learning
    //m_Classifier = new IB1();
    
    //Instance based learning (KNN = 3)
    //m_Classifier = new IBk(3);
    
    //Logistic regression
    //m_Classifier = new Logistic();  //Get OOM errors
    
    //Bayes Net
    //m_Classifier = new BayesNet();
    
    //Naive Bayes
    //m_Classifier = new NaiveBayes();
    
    //SVM
    m_Classifier = new SMO();

    
    //Meta classifiers
    
    //Stacking
//  Stacking stacking = new Stacking();
//  Classifier[] classifiers = new Classifier[]{new IBk(3), new NaiveBayes(), new SMO()};
//  stacking.setClassifiers(classifiers);
//  m_Classifier = stacking;
    
    //Bagging
//  Bagging bagging = new Bagging();
//  bagging.setClassifier(new SMO());
//  m_Classifier = bagging;    
        
    //Adaboost
//  AdaBoostM1 adaBoost = new AdaBoostM1();
//  adaBoost.setClassifier(new SMO());
//  adaBoost.setNumIterations(1000);
//  m_Classifier = adaBoost;
    
    //Vote classifier
//  Vote voteClassifier = new Vote();
//  Classifier[] classifiers = new Classifier[]{new NaiveBayes(), new SMO(), bagging};
//  voteClassifier.setClassifiers(classifiers);
//  m_Classifier = voteClassifier;
    
    
    //Step 2: Initialize fields for the features
    FastVector attributes = new FastVector(NUM_ATTRIBUTES);
    
    //1. Unigrams
    attributes.addElement(new Attribute(Constants.ATTRIBUTE_UNIGRAMS, (FastVector) null));
    
    //2. Position words
    attributes.addElement(new Attribute(Constants.ATTRIBUTE_POSITION_WORDS, (FastVector) null));
    
    //3. Bigrams
    attributes.addElement(new Attribute(Constants.ATTRIBUTE_BIGRAMS, (FastVector) null));
    
    //4. Part of speech
    attributes.addElement(new Attribute(Constants.ATTRIBUTE_PART_OF_SPEECH, (FastVector) null));
    
    //5. Class labels (should be last in list!)
    FastVector classValues = new FastVector(MAX_NUM_SENSES);
    for(Integer i = 0; i < MAX_NUM_SENSES; i++) {
      classValues.addElement(i.toString());
    }
    attributes.addElement(new Attribute(Constants.ATTRIBUTE_CLASS, classValues));
    
    m_Data = new Instances(nameOfDataset, attributes, 100);
    m_Data.setClassIndex(m_Data.numAttributes() - 1); //i.e. the last column holds the label
    
    m_Classifier.setDebug(true);
    m_Filter.setLowerCaseTokens(true);
  }

  private static void process(Wsd wsd, String message, String classValue)
      throws Exception {
    if(classValue.length() != 0) {
      wsd.updateData(message.toString(), classValue);
    } else {
      wsd.classifyMessage(message.toString());
    }
  }

  private void classifyMessage(String message) throws Exception {
    if(m_Data.numInstances() == 0) {
      throw new Exception("No classifier available.");
    }
    
    if(!m_UpToDate) {
      m_Filter.setInputFormat(m_Data);
      Instances filteredData = Filter.useFilter(m_Data, m_Filter);
      
      m_Classifier.buildClassifier(filteredData);
      m_UpToDate = true;
      //System.err.println(m_Classifier.toString());
      //System.err.println(filteredData);
    }
    
    Instances testset = m_Data.stringFreeStructure();
    Instance instance = makeInstance(message, testset);
    m_Filter.input(instance);
    Instance filteredInstance = m_Filter.output();
    
//    for(int i = 0; i < filteredInstance.numAttributes(); i++) {
//      Attribute attribute = filteredInstance.attribute(i);
//      System.err.println(attribute.toString() + " " + filteredInstance.value(attribute));
//    }
    
    double predicted = m_Classifier.classifyInstance(filteredInstance);
    System.out.println(m_Data.classAttribute().value((int) predicted));
  }

  private void updateData(String message, String classValue) throws Exception {
    Instance instance = makeInstance(message, m_Data);
    instance.setClassValue(classValue);
    
    m_Data.add(instance);
    m_UpToDate = false;
    
  }

  /**
   * Given text, creates an instance with attributes
   * @param text
   * @param data
   * @return
   * @throws Exception 
   */
  private Instance makeInstance(String text, Instances data) throws Exception {
    Instance instance = new Instance(NUM_ATTRIBUTES);
    
    //Unigrams
    Attribute unigramsAttribute = data.attribute(Constants.ATTRIBUTE_UNIGRAMS);
    instance.setValue(unigramsAttribute, unigramsAttribute.addStringValue(text));
    
    //Surrounding words
    if(USE_SURROUNDING_WORDS) {
      List<String> surroundingWords = FeatureExtractor.getSurroundingTokens(text);
      Attribute surroundingWordsAttribute = data.attribute(Constants.ATTRIBUTE_POSITION_WORDS);
      instance.setValue(surroundingWordsAttribute, surroundingWordsAttribute.addStringValue(Util.join(surroundingWords)));
    }
    
    //Bigrams (lower accuracy)
    if(USE_BIGRAMS) {
      List<String> bigrams = FeatureExtractor.getBigrams(text);
      Attribute bigramsAttribute = data.attribute(Constants.ATTRIBUTE_BIGRAMS);
      instance.setValue(bigramsAttribute, bigramsAttribute.addStringValue(Util.join(bigrams)));
    }
    
    //Part of speech
    if(USE_SURROUNDING_POS) {
      List<String> pos = FeatureExtractor.getSurroundingPOS(text);
      Attribute partOfSpeechAttribute = data.attribute(Constants.ATTRIBUTE_PART_OF_SPEECH);
      instance.setValue(partOfSpeechAttribute, partOfSpeechAttribute.addStringValue(Util.join(pos)));
    }
    
    instance.setDataset(data);
    return instance;
  }

  public static void main(String[] options) throws Exception {
    String trainFile = options[0];
    System.err.println("Train file: " + trainFile);

    String testFile = options[1];
    System.err.println("Test file: " + testFile);

    String line;
    Wsd wsd = new Wsd();

    //Pattern is 
    Pattern regex = Pattern.compile("\\w+_\\d+");

    //Training data
    BufferedReader trainReader = new BufferedReader(new FileReader(trainFile));
    while((line = trainReader.readLine()) != null) {
      Matcher regexMatcher = regex.matcher(line);
      if(regexMatcher.find()) {
        String[] matchElements = regexMatcher.group(0).split("_");
        String keyword = matchElements[0];
        String sense = matchElements[1];
        process(wsd, line, sense);
      }
    }
    trainReader.close();

    //Test data
    BufferedReader testReader = new BufferedReader(new FileReader(testFile));
    while((line = testReader.readLine()) != null) {
      process(wsd, line, "");
    }
    testReader.close();
  }
}
