package com.cs224u;

import java.util.ArrayList;
import java.util.List;

import edu.stanford.nlp.tagger.maxent.MaxentTagger;

public class FeatureExtractor {
  public static final int WINDOW_SIZE = 5;

  //Part of Speech tagger
  private static String posTagPath = "resources/left3words-wsj-0-18.tagger";
  static {
    try {
      //Instantiating the POS tagger is very odd. See http://goo.gl/coMZ
      new MaxentTagger(posTagPath);
    } catch (Exception e) {
      e.printStackTrace();
    }
  }
  
  static List<String> getSurroundingPOS(String text) throws Exception {
    String[] tokens = text.split(" ");
    
    String target = "";
    //1. Clean the text to remove the underscores. Keep track of the target word.
    StringBuffer cleanText = new StringBuffer();
    for(int i = 0; i < tokens.length; i++) {
      if(tokens[i].contains("_")) {
        //System.err.print(tokens[i]);
        tokens[i] = tokens[i].substring(0, tokens[i].indexOf("_"));
        target = tokens[i];
        //System.err.println("->" + tokens[i]);
      }
      cleanText.append(tokens[i] + " ");
    }
    
    //System.err.println("Target is: " + target);
    //System.err.println(cleanText);
    String[] tokensWithTags = MaxentTagger.tagString(cleanText.toString()).split(" ");
    //String[] tokensWithTags = MaxentTagger.tagTokenizedString(cleanText.toString()).split(" ");
    
    //2. Find the position of the target in the new array
    int position = -1;
    for(int i = 0; i < tokensWithTags.length; i++) {
      //System.err.print(tokensWithTags[i] + " ");
      if(tokensWithTags[i].contains(target + "_")) {
        position = i;        
      }
    }
    
    //3. Extract the window of POS tags
    List<String> features = new ArrayList<String>();
    for(int offset = -WINDOW_SIZE / 2; offset <= WINDOW_SIZE / 2; offset++) {
      if(position + offset < 0) {
        continue;
      }      
      if(position + offset > tokensWithTags.length - 1) {
        continue;
      }
      
      String tokenWithTag = tokensWithTags[position + offset];
      //System.err.println("TokenWithTag: " + tokenWithTag);
      String tag = tokenWithTag.substring(tokenWithTag.indexOf("_") + 1, tokenWithTag.length());
      //System.err.println("tag_" + tag + "_position" + offset);
      features.add("tag_" + tag + "_position" + offset);
    }
    
    return features;
  }

  static List<String> getSurroundingTokens(String text) {
    //System.err.println(text);
    
    List<String> features = new ArrayList<String>();
    
    //Step 1. Find the last occurrence of the actual token
    String[] tokens = text.split(" ");
    int position = -1;
    for(int i = 0; i < tokens.length; i++) {
      if(tokens[i].contains("_")) {
        position = i;
      }
    }    
    
    //Step 2. Find the surrounding tokens, according to the window size
    for(int offset = -WINDOW_SIZE / 2; offset <= WINDOW_SIZE / 2; offset++) {
      if(position + offset < 0) {
        //There is a bug in the training data. The interest.train file isn't 
        //padded with xtrainx. So, we need to check to make sure the index
        //isn't out of bounds.
        continue;
      }
      
      String token = tokens[position + offset];
      if(!token.equals(Constants.TOKEN_PADDING)) {
        features.add(token + "_position" + offset);
      }
    }

    return features;
  }
  

  /**
   * Returns a list of bigrams, given 
   * @param text
   * @return
   */
  static List<String> getBigrams(String text) {
    text = text.toLowerCase();
    List<String> features = new ArrayList<String>();
    String[] tokens = text.split(" ");
    for(int i = 1; i < tokens.length - 1; i++) {
      features.add(tokens[i] + "_bigram_" + tokens[i + 1]);
    }
    return features;
  }
}
