import java.io.BufferedReader;
import java.io.FileNotFoundException;
import java.io.FileReader;
import java.io.IOException;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.Vector;

public class BagOfWordsFeature {
  public static String PUNCTUATION_REGEXP = "[ ]+";
  public static int BOW_FEAT_LENGTH_ = 20;
  private static String[] cats = {"verb"};
  private static String catFile = "data/EnglishLS/categories";
  private ArrayList<String> allowedTags;
  enum SimilarityMeasure {
    COSINE,
    WEIGHTED,
    BINARY,
  }
  //  opennlp.tools.postag.
  SimilarityMeasure sm_ = SimilarityMeasure.WEIGHTED;

  StopWords sws_;
  HashMap<String, Integer> currFeatureCount_;
  HashMap<String, BowFeatureVector> lexeltFeatureMap_;

  HashMap<String, Vector<BowSenseFeature>> trainFeatures_;

  HashMap<String, Integer> testFeatureMap_;

  WordSenseCount wsc_;

  public BagOfWordsFeature(WordSenseCount wsc) {
    try {
      sws_ = new StopWords();
      populateTags();
    } catch (IOException e) {
      e.printStackTrace();
      System.err.println("Could not load stop words.");
    }

    wsc_ = wsc;

    currFeatureCount_ = new HashMap<String, Integer>();
    lexeltFeatureMap_ = new HashMap<String, BowFeatureVector>();

    trainFeatures_ = new HashMap<String, Vector<BowSenseFeature>>();
    testFeatureMap_ = new HashMap<String, Integer>();
  }

  private void populateTags() {
    try {
      BufferedReader reader = new BufferedReader(new FileReader(catFile));
      String str;
      // would store category, {postags}
      HashMap<String, ArrayList<String>> posCategories = new HashMap<String, ArrayList<String>>();
      while((str=reader.readLine())!=null) {
        String[] split = str.split("=");
        ArrayList<String> posList = new ArrayList<String>();
        String[] postags = split[1].split(" ");
        for (String string : postags) {
          posList.add(string);
        }        
        posCategories.put(split[0], posList);
      }

      if(allowedTags == null)
        allowedTags = new ArrayList<String>();

      for (String cat : cats) {
        if(posCategories.containsKey(cat)) {
          allowedTags.addAll(posCategories.get(cat));
        }
      }
    } catch (FileNotFoundException e) {
      e.printStackTrace();
    } catch (IOException e) {
      e.printStackTrace();
    }
  }

  public BowFeatureVector getFeatureVectorForLexelt(String word) {
    return lexeltFeatureMap_.get(word);
  }
  /*
  public void processSenseFeature() {
    Iterator<Map.Entry<String, HashMap<String, Integer>>> it = senseInfo_
        .entrySet().iterator();

    while (it.hasNext()) {
      Map.Entry<String, HashMap<String, Integer>> senseMap_ = it.next();
      senseFeature_.put(
          senseMap_.getKey(),
          new BowFeatureVector(senseMap_.getValue(), BOW_FEAT_LENGTH_));
    }
  }
   */

  public double getFeatureCount(String lexelt, String word) {
    BowFeatureVector fv = lexeltFeatureMap_.get(lexelt);
    return fv.getFeatureCount(word);
  }

  public void handleTrainCharacters(final String fstr, XMLParserState state) {
    if (!state.isInContext()) return;

    String str = fstr.replaceAll(PUNCTUATION_REGEXP, "");
    String[] toks = str.split("\\s");

    for (int i = 0; i < toks.length; ++i) {
      if((toks[i] == null || toks[i].equals("") || toks[i].equals(" ") || sws_.isStopWord(toks[i])))
        continue;

      String[] split = toks[i].split("/");
      if (split.length == 1)
        System.err.println(toks[i] + " in " + str);
      String curr = split[0].toLowerCase();
      if(WSD.POS) {
        String postag = split[1];

        if (!isAllowed(postag)) {
          continue;
        }
      }
      state.addBowContextWord(curr);
      addWordToLexeltFeatureCount(curr);
    }
  }

  private boolean isAllowed(String postag) {
    return allowedTags.contains(postag);
  }

  public void addWordToLexeltFeatureCount(String word) {
    Integer c = currFeatureCount_.get(word);

    if (c == null) {
      currFeatureCount_.put(word, 1);
    } else {
      currFeatureCount_.put(word, c + 1);
    }
  }

  public void handleInstance(XMLParserState state) {
    Vector<BowSenseFeature> fv = getTrainInstanceFeatures(state.getLexelt()); 

    Vector<String> senses = state.getSensesForInstance();
    for (String currSense: senses) {
      BowSenseFeature bsf = new BowSenseFeature(currSense, state.getInstanceId(), state.getContextWords());
      fv.add(bsf);
    }
  }

  public Vector<BowSenseFeature> getTrainInstanceFeatures(String word) {
    Vector<BowSenseFeature> v = trainFeatures_.get(word);

    if (v == null) {
      v = new Vector<BowSenseFeature>();
      trainFeatures_.put(word, v);
    }

    return v;
  }

  public void handleLexelt(XMLParserState state) {
    BowFeatureVector bfv = new BowFeatureVector(currFeatureCount_, BOW_FEAT_LENGTH_);
    lexeltFeatureMap_.put(state.getLexelt(), bfv);

    Vector<String> lexeltFeatureVector = bfv.getFeatureVector();

    Vector<BowSenseFeature> fv = getTrainInstanceFeatures(state.getLexelt());
    for (int i = 0; i < fv.size(); ++i) {
      fv.get(i).processFeatureVector(lexeltFeatureVector);
      // CSV
      //      fv.get(i).writeToCSV(state.getLexelt());
    }



    currFeatureCount_.clear();
  }

  public void handleTestCharacters(final String fstr, XMLParserState state) {
    if (!state.isInContext()) return;

    String str = fstr.replaceAll(PUNCTUATION_REGEXP, "");
    String[] toks = str.split("\\s");

    for (int i = 0; i < toks.length; ++i) {
      if(toks[i] == null || toks[i].equals("") || toks[i].equals(" ") || sws_.isStopWord(toks[i]))
        continue;
      String[] split = toks[i].split("/");
      String curr = split[0].toLowerCase();
      String postag = split[1];
      if (!isAllowed(postag)) {
        continue;
      }

      state.addBowContextWord(curr);
    }
  } 

  public String getFeatureSense(XMLParserState state) {
    String targetWord = state.getLexelt();
    String bestSense = wsc_.getMostFrequentSense(targetWord);
    double maxSim = 0.0;

    BowSenseFeature testFeature = new BowSenseFeature("UNKNOWN", state.getInstanceId(), state.getContextWords());
    BowFeatureVector bfv = getFeatureVectorForLexelt(targetWord);
    testFeature.processFeatureVector(bfv.getFeatureVector());

    // CSV
    //    testFeature.writeToCSV(targetWord);

    Vector<BowSenseFeature> v = getTrainInstanceFeatures(targetWord);
    for (BowSenseFeature fv: v) {
      String currSense = fv.getSense();
      double simValue;

      switch(sm_) {
      case COSINE:
        simValue = testFeature.getCosineSimilarity(fv);
        break;
      case WEIGHTED:
        simValue = testFeature.getWeightedSimilarity(fv, bfv);
        break;
      case BINARY:
        simValue = testFeature.getBinarySimilarity(fv);
        break;
      default:
        simValue = -1.0;
        break;
      }

      if (simValue > maxSim) {
        bestSense = currSense;
      }
    }

    testFeatureMap_.clear();
    return bestSense;
  }
}
