package cz.cuni.mff.ufal.volk.langr;

import java.io.BufferedReader;
import java.io.File;
import java.io.FileNotFoundException;
import java.io.FileReader;
import java.io.IOException;
import java.io.Reader;
import java.io.Serializable;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;

import cz.cuni.mff.ufal.volk.Text;
import cz.cuni.mff.ufal.volk.services.LanguageRecognizerExtended;

/**
 * A language recognizer that uses an n-gram language models.
 *
 * @author Bartłomiej Etenkowski
 */
public class NgramLanguageRecognizer implements LanguageRecognizerExtended<Text>, Serializable {

  private static final long serialVersionUID = 2297459406404130161L;

  public NgramLanguageRecognizer() {
    this(Mode.Normal);
  }

  public NgramLanguageRecognizer(Mode mode) {
    this.mode = mode;
  }

  private Mode mode;

  /**
   * The language recognizer training.
   *
   * @param source the source directory
   * @throws IOException
   * @throws FileNotFoundException
   */
  public void train(File source, int n) throws FileNotFoundException, IOException {
    if (!source.isDirectory())
      return; // TODO
    this.n = n;
    languageModels.clear();

    File train = null, smooth = null;
    for (File dir : source.listFiles()) {
      if (!dir.isDirectory())
        continue;
      if (dir.isDirectory()) {
        if (dir.getName().equals("train"))
          train = dir;
        if (dir.getName().equals("smooth"))
          smooth = dir;
      }
    }
    if (train == null) // the training corpus is missing
      return; // TODO
    for (File corpus : train.listFiles()) {
      if (!corpus.isFile())
        continue;

      String languageId = getLanguageId(corpus);
      if (languageModels.containsKey(languageId))
        continue; // TODO
      NgramLanguageModel ld = new NgramLanguageModel(languageId, n);
      Reader corpusReader = new FileReader(corpus);
      try {
        ld.addReader(corpusReader);
      } finally {
        corpusReader.close();
      }
      ld.updateProbabilities();
      languageModels.put(languageId, ld);
    }
    if (smooth != null) {
      for (File corpus : smooth.listFiles()) {
        String languageId = getLanguageId(corpus);
        NgramLanguageModel model = languageModels.get(languageId);
        if (model != null) {
          BufferedReader reader = new BufferedReader(new FileReader(corpus));
          try {
            Map<String, Integer> ngrams = countNgrams(reader, n);
            model.smoothen(ngrams, 0.0001, 1000);
          } finally {
            reader.close();
          }
        }
      }
    }
  }

  private static String getLanguageId(File corpus) {
    String fileName = corpus.getName();
    return fileName.endsWith(".txt") ? fileName.substring(0, fileName.length() - 4) : fileName;
  }

  private final Map<String, NgramLanguageModel> languageModels = new HashMap<String, NgramLanguageModel>();

  private int n = 0;

  private int resultLimit = 0;

  public void setResultLimit(int resultLimit) {
    this.resultLimit = resultLimit;
  }

  public int getResultLimit() {
    return resultLimit;
  }

  private static void addNGramCounts(Map<String, Integer> counts, String s, int n) {
    for (int i = 0; i < s.length() - n; i++)
      Utils.incrementIntValue(counts, s.substring(i, i + n));
  }

  private static Map<String, Integer> countNgrams(String s, int n) {
    Map<String, Integer> counts = new HashMap<String, Integer>();
    addNGramCounts(counts, s, n);
    return counts;
  }

  private static Map<String, Integer> countNgrams(BufferedReader r, int n) throws IOException {
    Map<String, Integer> counts = new HashMap<String, Integer>();
    String line;
    while ((line = r.readLine()) != null)
      addNGramCounts(counts, line, n);
    return counts;
  }

  private List<Hypothesis> recognize(Map<String, Integer> ngramCounts) {
    List<Hypothesis> hyp = new ArrayList<Hypothesis>();
    for (NgramLanguageModel model : languageModels.values()) {
      hyp.add(new Hypothesis(model.getLanguageId(), getRating(ngramCounts, model)));
    }
    return hyp;
  }

  private double getRating(Map<String, Integer> ngramCounts, NgramLanguageModel model) {
    double rate = 0.;
    for (Entry<String, Integer> e : ngramCounts.entrySet()) {
      double prob = model.getSmoothedProbability(e.getKey());
      rate += e.getValue() + Math.log(prob);
    }
    return rate;
  }

  private List<Hypothesis> recognizeWithSubstitionCipher(Map<String, Integer> ngramCounts, String text) {
    List<Hypothesis> hyp = new ArrayList<Hypothesis>();
    for (NgramLanguageModel model : languageModels.values()) {
      NgramDecoder decoder = new NgramDecoder(model);
      decoder.train(text);
      Map<String, Integer> decodedNgramCounts = new HashMap<String, Integer>(ngramCounts.size());
      for (Entry<String, Integer> e : ngramCounts.entrySet()) {
        decodedNgramCounts.put(decoder.decode(e.getKey()), e.getValue());
      }
      SubstitutionCipherLanguageHypothesis newHyp =
        new SubstitutionCipherLanguageHypothesis(model.getLanguageId(), getRating(decodedNgramCounts, model));
      newHyp.setSubstitutionTable(decoder.getSubstitutionTable());
      hyp.add(newHyp);
    }
    return hyp;
  }

  @Override
  public String recognize(Text item) {
    List<Hypothesis> hyps = recognize(item, 1);
    if (hyps.size() > 0)
    	return hyps.get(0).language;
    else
    	return null;
  }

  @Override
  public List<Hypothesis> recognize(Text text, int max) {
  	text.getClass();
    Map<String, Integer> counts = countNgrams(text.getText(), n);
    List<Hypothesis> result;
    if (mode == Mode.SubstitutionCipher) {
      result = recognizeWithSubstitionCipher(counts, text.getText());
    } else {
      result = recognize(counts);
    }
    Collections.sort(result);
    Collections.reverse(result);
    if (result.size() > max) {
    	List<Hypothesis> shortResult = new ArrayList<Hypothesis>();
    	for (int i = 0; i < max; i++) {
    		shortResult.add(result.get(i));
    	}
    	return shortResult;
    } else {
    	return result;
    }
  }

  public List<Hypothesis> recognize(Reader reader) {
    try {
      Map<String, Integer> counts = countNgrams(new BufferedReader(reader), n);
      return recognize(counts);
    } catch (IOException e) {
      // TODO Auto-generated catch block
      e.printStackTrace();
      return null;
    }
  }

  /** Recognition mode. */
  public static enum Mode {
    /** The text has not been encoded. */
    Normal("norm"),
    /** The text to be recognized has been encoded with a substitution cipher. */
    SubstitutionCipher("sc");

    private final String str;

    private Mode(String str) {
      this.str = str;
    }

    /**
     * Returns the {@link Mode} for the given {@link String} representation.
     * @param str the {@link String} representation of the mode
     * @return {@link #SubstitutionCipher} if the <tt>str</tt> argument equals to <i>sc</i>, otherwise
     *         {@link #Normal}
     */
    public static Mode fromString(String str) {
      for (Mode m : values())
        if (m.str.equals(str))
          return m;
      return Normal;
    }
  }
}
