/*
 * To change this template, choose Tools | Templates
 * and open the template in the editor.
 */

package stanford.tagger;

import edu.stanford.nlp.io.OutDataStreamFile;
import edu.stanford.nlp.tagger.maxent.TaggerConfig;
import edu.stanford.nlp.util.StringUtils;
import java.io.IOException;
import java.io.ObjectOutputStream;
import java.io.OutputStream;
import java.io.Serializable;
import java.util.Arrays;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Map;

/**
 *
 * @author hadoop
 */
public class StanfordTagger implements Serializable {

    Dictionary dict = new Dictionary();
    TTags tags;

    byte[][] fnumArr;
    LambdaSolveTagger prob;
    HashMap<FeatureKey,Integer> fAssociations = new HashMap<FeatureKey,Integer>();
    PairsHolder pairs = new PairsHolder();
    public Extractors extractors;
    public Extractors extractorsRare;
    AmbiguityClasses ambClasses;
    final boolean alltags = false;
    HashMap<String, HashSet<String>> tagTokens = new HashMap<String, HashSet<String>>();

    static final int RARE_WORD_THRESH = 5;
    static final int MIN_FEATURE_THRESH = 5;
    static final int CUR_WORD_MIN_FEATURE_THRESH = 2;
    static final int RARE_WORD_MIN_FEATURE_THRESH = 10;
    static final int VERY_COMMON_WORD_THRESH = 250;

    static final boolean OCCURRING_TAGS_ONLY = false;
    static final boolean POSSIBLE_TAGS_ONLY = false;

    double defaultScore;

    int leftContext;
    int rightContext;

    TaggerConfig config;

    /**
    * Determines which words are considered rare.  All words with count
    * in the training data strictly less than this number (standardly, &lt; 5) are
    * considered rare.
    */
    private int rareWordThresh = RARE_WORD_THRESH;

    /**
    * Determines which features are included in the model.  The model
    * includes features that occurred strictly more times than this number
    * (standardly, &gt; 5) in the training data.  Here I look only at the
    * history (not the tag), so the history appearing this often is enough.
    */
    int minFeatureThresh = MIN_FEATURE_THRESH;

    /**
    * This is a special threshold for the current word feature.
    *   Only words that have occurred strictly &gt; this number of times
    * in total will generate word features with all of their occurring tags.
    * The traditional default was 2.
    */
    int curWordMinFeatureThresh = CUR_WORD_MIN_FEATURE_THRESH;

    /**
    * Determines which rare word features are included in the model.
    * The features for rare words have a strictly higher support than
    * this number are included. Traditional default is 10.
    */
    int rareWordMinFeatureThresh = RARE_WORD_MIN_FEATURE_THRESH;

    /**
    * If using tag equivalence classes on following words, words that occur
    * strictly more than this number of times (in total with any tag)
    * are sufficiently frequent to form an equivalence class
    * by themselves. (Not used unless using equivalence classes.)
    *
    * There are places in the code (ExtractorAmbiguityClass.java, for one)
    * that assume this value is constant over the life of a tagger.
    */
    int veryCommonWordThresh = VERY_COMMON_WORD_THRESH;


    int xSize;
    int ySize;
    boolean occuringTagsOnly = OCCURRING_TAGS_ONLY;
    boolean possibleTagsOnly = POSSIBLE_TAGS_ONLY;

    private boolean initted = false;

    // TODO: presumably this should be tied to the command option -verbose
    static final boolean VERBOSE = false;
    


    public StanfordTagger(){
        
    }

    public StanfordTagger(TaggerConfig config){
        if (initted) return;  // TODO: why not reinit?

        this.config = config;

        String lang, arch;
        String[] openClassTags, closedClassTags;

        if (config == null) {
          lang = "english";
          arch = "left3words";
          openClassTags = StringUtils.EMPTY_STRING_ARRAY;
          closedClassTags = StringUtils.EMPTY_STRING_ARRAY;
        } else {
          lang = config.getLang();
          arch = config.getArch();          
          openClassTags = config.getOpenClassTags();
          closedClassTags = config.getClosedClassTags();

          if (((openClassTags.length > 0) && !lang.equals("")) || ((closedClassTags.length > 0) && !lang.equals("")) || ((closedClassTags.length > 0) && (openClassTags.length > 0))) {
            throw new RuntimeException("At least two of lang (\"" + lang + "\"), openClassTags (length " + openClassTags.length + ": " + Arrays.toString(openClassTags) + ")," +
                "and closedClassTags (length " + closedClassTags.length + ": " + Arrays.toString(closedClassTags) + ") specified---you must choose one!");
          } else if ((openClassTags.length == 0) && lang.equals("") && (closedClassTags.length == 0) && ! config.getLearnClosedClassTags()) {
            System.err.println("warning: no language set, no open-class tags specified, and no closed-class tags specified; assuming ALL tags are open class tags");
          }
        }

        if (openClassTags.length > 0) {
          tags = new TTags();
          tags.setOpenClassTags(openClassTags);
        } else if (closedClassTags.length > 0) {
          tags = new TTags();
          tags.setClosedClassTags(closedClassTags);
        } else {
          
           // unimplemented since we're using Indonesian language which isn't supported yet
           // in Stanford POS Tagger
           tags = new TTags(lang);
        }

        defaultScore = lang.equals("english") ? 1.0 : 0.0;

        if (config != null) {
          rareWordThresh = config.getRareWordThresh();
          minFeatureThresh = config.getMinFeatureThresh();
          curWordMinFeatureThresh = config.getCurWordMinFeatureThresh();
          rareWordMinFeatureThresh = config.getRareWordMinFeatureThresh();
          veryCommonWordThresh = config.getVeryCommonWordThresh();
          occuringTagsOnly = config.occuringTagsOnly();
          possibleTagsOnly = config.possibleTagsOnly();
          // System.err.println("occuringTagsOnly: "+occuringTagsOnly);
          // System.err.println("possibleTagsOnly: "+possibleTagsOnly);

          if(config.getDefaultScore() >= 0)
            defaultScore = config.getDefaultScore();
        }

        if (config == null || config.getMode() == TaggerConfig.Mode.TRAIN) {
          // initialize the extractors based on the arch variable
          // you only need to do this when training; otherwise they will be
          // restored from the serialized file
          if(!arch.equals("generic")){
                System.err.println("Architecture beside generic is not yet implemented, "
                        + "automatically set to generic");
          }
          extractors = new Extractors(ExtractorFramesGeneral.getExtractorFrames("generic"));
          extractorsRare = new Extractors(ExtractorFramesRareGeneral.getExtractorFramesRare(arch, tags));

          setExtractorsGlobal();
        }

        ambClasses = new AmbiguityClasses(tags);

        initted = true;
    }

   // Sometimes there is data associated with the tagger (such as a
   // dictionary) that we don't want saved with each extractor.  This
   // call lets those extractors get that information from the tagger
   // after being loaded from a data file.
   private void setExtractorsGlobal() {
        extractors.setGlobalHolder(this);
        extractorsRare.setGlobalHolder(this);
   }

   public void saveModel(String filename, TaggerConfig config){
       try {
          OutDataStreamFile file = new OutDataStreamFile(filename);
          config.saveConfig(file);
          file.writeInt(xSize);
          file.writeInt(ySize);
          dict.save(file);
          tags.save(file, tagTokens);

          saveExtractors(file);

          file.writeInt(fAssociations.size());
          for (Map.Entry<FeatureKey,Integer> item : fAssociations.entrySet()) {
            int numF = item.getValue();
            file.writeInt(numF);
            FeatureKey fk = item.getKey();
            fk.save(file);
          }

          LambdaSolve.save_lambdas(file, prob.lambda);
          file.close();
        } catch (IOException ioe) {
          System.err.println("Error saving tagger to file " + filename);
          ioe.printStackTrace();
        }
   }

    public TaggerConfig getConfig() {
        return config;
    }

    public void setConfig(TaggerConfig config) {
        this.config = config;
    }

    public Extractors getExtractors() {
        return extractors;
    }

    public void setExtractors(Extractors extractors) {
        this.extractors = extractors;
    }

    public Extractors getExtractorsRare() {
        return extractorsRare;
    }

    public void setExtractorsRare(Extractors extractorsRare) {
        this.extractorsRare = extractorsRare;
    }

    public HashMap<FeatureKey, Integer> getfAssociations() {
        return fAssociations;
    }

    public void setfAssociations(HashMap<FeatureKey, Integer> fAssociations) {
        this.fAssociations = fAssociations;
    }

    public byte[][] getFnumArr() {
        return fnumArr;
    }

    public void setFnumArr(byte[][] fnumArr) {
        this.fnumArr = fnumArr;
    }

    public LambdaSolveTagger getProb() {
        return prob;
    }

    public void setProb(LambdaSolveTagger prob) {
        this.prob = prob;
    }

    public void setTags(TTags tags) {
        this.tags = tags;
    }

    public int getxSize() {
        return xSize;
    }

    public void setxSize(int xSize) {
        this.xSize = xSize;
    }

    public int getySize() {
        return ySize;
    }

    public void setySize(int ySize) {
        this.ySize = ySize;
    }

      /* Package access so it doesn't appear in public API. */
  public boolean isRare(String word) {
    return dict.sum(word) < rareWordThresh;
  }

   // serialize the ExtractorFrames and ExtractorFramesRare in filename
  private void saveExtractors(OutputStream os) throws IOException {

    ObjectOutputStream out = new ObjectOutputStream(os);

    System.out.println(extractors.toString() + "\nrare" + extractorsRare.toString());
    out.writeObject(extractors);
    out.writeObject(extractorsRare);
  }

  public void setTagTokens(TagToken tagtoken){
      this.tagTokens = tagtoken.get();
  }

  public void setDictionary(Dictionary dict){
      this.dict = dict;
  }

    public boolean isAlltags() {
        return alltags;
    }

    public TTags getTags() {
        return tags;
    }

    
}
