package hr.irb.goTermList;

import hr.irb.geneOntology.GOTerm;
import hr.irb.geneOntology.GeneOntology;
import hr.irb.geneOntology.GoTermProperties;
import java.io.Serializable;
import java.util.Arrays;
import java.util.Collection;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.LinkedHashMap;
import java.util.Map;
import java.util.Set;
import java.util.StringTokenizer;
import weka.core.tokenizers.Tokenizer;

/**
 *
 * @author Fran Supek
 */
public class GoTermWordCorpus implements Serializable {

  private static final long serialVersionUID = 1111212L;

  private final Map<String,Double> wordFreqs
          = new HashMap<String, Double>();

  /**
   * Gets frequency of given word in corpus.
   */
  public Double getWordFreq(String word) {
    if ( wordFreqs.containsKey(word) )
      return wordFreqs.get(word);
    else
      return 0.0;
  }

  public Set<String> getAllWords() {
    return wordFreqs.keySet();
  }

  /**
   * A GoTermWordCorpus may be created from a GoTermSizes object.
   */
  public GoTermWordCorpus( GoTermSizes goCatSizes, GeneOntology myGo ) {

    int sumOfSizes = 0;
    for ( Integer termId : goCatSizes.termSizes.keySet() ) {
      sumOfSizes += goCatSizes.getSizeGuessIfUnknown( termId, myGo );
    }


    for ( Integer termId : goCatSizes.termSizes.keySet() ) {

      // do not consider terms inapplicable to this organism
      // or terms with poor annotation coverage
      if ( goCatSizes.getSizeGuessIfUnknown( termId, myGo ) < Math.min(2, sumOfSizes / 100000 ) ) {
        continue;                                
      }

      GOTerm term = myGo.get( termId );

      for ( String keyword : term.getKeywords() ) {
        if ( ! this.wordFreqs.containsKey(keyword) ) {
          this.wordFreqs.put(keyword, 1.0);
        } else {
          this.wordFreqs.put(keyword, this.wordFreqs.get(keyword) + 1.0 );
        }
      }
    }
    
    removeStopWords();
    normalizeWordFreqs();

  }


  /**
   * A GoTermWordCorpus may be created from a Collection (=List or Set)
   * of GoTerms.
   *
   * @param goTerms
   */
  public GoTermWordCorpus( Collection<GOTerm> goTerms ) {

    Set<GOTerm> termsWithParents = new HashSet<GOTerm>();
    for ( GOTerm term : goTerms ) {
      termsWithParents.add(term);
      termsWithParents.addAll(term.getAllParents());
    }


    for ( GOTerm term : termsWithParents ) {

      Set<String> wordSet = new HashSet<String>();
      wordSet.addAll(term.getKeywords());
      /*for ( GOTerm termInner : term.getParents() ) {
        wordSet.addAll(termInner.getKeywords());
      }*/

      for ( String keyword : wordSet ) {
        if ( ! this.wordFreqs.containsKey(keyword) ) {
          this.wordFreqs.put(keyword, 1.0);
        } else {
          this.wordFreqs.put(keyword, this.wordFreqs.get(keyword) + 1.0 );
        }
      }

    }

    removeStopWords();
    normalizeWordFreqs();

  }



  private Set<String> stopwords = new HashSet<String>(Arrays.asList(
          "a", "the", "for", "and", "or", "in", "to", "an", "as", "at",
          "about", "by", "be", "is", "it", "how", "of", "on", "that", "this",
          "was", "what", "when", "where", "who", "will", "with", "during", "it",
          "its", "being", "find", "found", "acts", "terms", "term", "into", "etc",
          "their", "using", "use", "no", "are", "each", "contain", "certain",
          "but", "two", "one", "some", "acting", "comprises", "also", "include",
          "examples", "any", "may", "such", "more", "another", "known",
          "from", "relative", "entity", "involving", "which", "function", "usually",
          "includes", "together", "other", "moiety", "exclude", "excludes",
          "operate", "operates", "either", "conceptually", "actually",
          "essentially", "within", "involved", "containing", "wholly", "among",
          "given", "necessary", "singly", "unknown", "bears", "distinctive",
          "out", "consisting", "corresponds", "not", "comprising", "comprise",
          "whose", "involves", "involve", "either", "so", "relatively",
          "enables", "enable", "features", "all", "some", "normally",
          "several", "perform", "namely", "characteristically", "distinct",
          "characteristic", "hence", "already", "predominantly", "similar",
          "many", "distinguished", "encompasses", "aspect", "occupies", "gene's",
          "gene", "protein", "genes", "work", "pertinent", "biological",
          "consist", "mainly", "perform", "actual", "namely", "symbol",
          "especially", "regarded", "when", "involves", "ones", "besides",
          "derives", "derive", "typically", "goes", "serves", "produces", "similar",
          "whereas", "suitable", "comprises", "whose", "necessarily",
          "exist", "included", "classed", "especially", "highly", "resulting"));
  

  /** Self-explanatory. */
  private void removeStopWords() {
    Iterator<String> it = this.wordFreqs.keySet().iterator();
    while ( it.hasNext() ) {
      String word = it.next();
      if ( word.length() < 2 || stopwords.contains(word) )
        it.remove();
    }
  }


  /**
   * Normalizes by dividing with average frequency.
   */
  private void normalizeWordFreqs() {
    double sum = 0.0;
    for ( String word : this.wordFreqs.keySet() ) {
      sum += this.wordFreqs.get(word);
    }
    for ( String word : this.wordFreqs.keySet() ) {
      this.wordFreqs.put( word, this.wordFreqs.get(word) / sum * this.wordFreqs.size() );
    }
  }



  /**
   * For each word present in the baselineWordCorpus, calculates how many times
   * more or less frequently that word is used here (in this WordCorpus).
   *
   * @param baselineWordCorpus
   * @return
   */
  public Map<String, Double> calculateWordEnrichment( 
          GoTermWordCorpus baselineWordCorpus,
          int numFromTop, int numFromBottom) {

    Map<String, Double> result = new HashMap<String, Double>(
            baselineWordCorpus.wordFreqs.size() );

    for ( String word : baselineWordCorpus.wordFreqs.keySet() ) {

      if ( numFromBottom<=0  // if we're not interesed in depletion, only enrichment
              && this.getWordFreq(word) < 0.5 )  // the frequency of word must be above 50% of average
        continue;

      result.put(word,
              this.getWordFreq(word) / Math.pow(baselineWordCorpus.getWordFreq(word), 0.66) );
    }

    // now, select a subset of results
    Double[] enrichments = result.values().toArray(new Double[0]);
    Arrays.sort(enrichments); // ascending order
    double thresholdTop;
    if (numFromTop <= 0)
      thresholdTop = Double.MAX_VALUE;
    else
      thresholdTop = enrichments[ enrichments.length - Math.min( numFromTop, enrichments.length ) ];

    double thresholdBottom;
    if (numFromBottom <= 0)
      thresholdBottom = -Double.MAX_VALUE;
    else
      thresholdBottom = enrichments[ Math.min( numFromBottom, enrichments.length ) - 1 ];

    // keep only the ones conforming to either of the thresholds
    // (or both, in the case the array of enrichments is really short)
    Iterator<String> it = result.keySet().iterator();
    while (it.hasNext()) {
      double val = result.get(it.next());
      if ( val > thresholdBottom && val < thresholdTop )
        it.remove();
    }

    return result;

  }


  /**
   * Creates a GoTermWordCorpus from a Map of GoTerms, where to each GoTerm a 
   * numeric value is associated.<p/>
   * 
   * The word 'frequencies' in this corpus are not actually frequencies, but
   * instead they are a measure of correlation of a specific word to the 
   * value of the designated property of GO Terms.<p/>
   *
   * The GoTerms that do not have the specified property defined will be
   * skipped.<p/>
   *
   * The measure is =2*AUC and therefore varies from 0.0 (anticorrelated) over
   * 1.0 (no correlation) to 2.0 (correlated).<p/>
   */
  public GoTermWordCorpus( Collection<GOTerm> goTerms, GoTermProperties termProps,
          String goTermProperty, boolean invertSense ) {

    // first, copy the data to a LinkedHashMap, we need the predictable
    // iteration order 
    LinkedHashMap<GOTerm, Double> lhm = new LinkedHashMap<GOTerm, Double>();

    for ( GOTerm curTerm : goTerms ) {
      if ( termProps.getAllProps(curTerm).containsKey(goTermProperty) )
        lhm.put(curTerm, termProps.get(curTerm, goTermProperty) );
    }

    // dismantle Map into two arrays
    GOTerm[] keys = lhm.keySet().toArray(new GOTerm[0]);
    double[] vals = new double[lhm.size()];
    int i = 0;
    for ( GOTerm term : lhm.keySet() ) {
      vals[i] = lhm.get(term);
      i++;
    }

    // sort by values - ascending order, smallest first
    // and larger values of the GoTermPropery are better, by definition
    int[] sortedIndices = weka.core.Utils.sort(vals);

    // now go through the key-value pairs in the sorted order, and compute for
    // each word:
    // (a) sum of ranks for all 1's - in "this.wordFreqs"
    // (b) count of 1's - in "countOf1s"
    // from those two, we can compute the AUC (or Mann-Whitney U)

    Map<String, Integer> countOf1s = new HashMap<String, Integer>();

    for ( int j = 0; j < sortedIndices.length; j++ ) {

      int curPairIdx = sortedIndices[j];
      GOTerm term = keys[curPairIdx];
      Set<String> allKeywords = term.getKeywords();

      for ( String word : allKeywords ) {

        if ( !countOf1s.containsKey(word) )
          countOf1s.put(word, 1);
        else
          countOf1s.put(word, countOf1s.get(word) + 1);

        double rankToAdd = invertSense ? (sortedIndices.length - j) : (j + 1) ;
        if ( !this.wordFreqs.containsKey(word) )
          this.wordFreqs.put(word, rankToAdd);
        else
          this.wordFreqs.put( word, this.wordFreqs.get(word) + rankToAdd );

      }

    }

    // okay, now convert the sum of ranks to 2*AUC (which is the result of
    // this function)
    for ( String word : this.wordFreqs.keySet() ) {

      int n1 = countOf1s.get(word);
      int n2 = sortedIndices.length-countOf1s.get(word);
      double u = this.wordFreqs.get(word) - (n1*(n1+1))/2;
      double auc = u / (double) ( n1 * n2 );
      this.wordFreqs.put(word, 2.0*auc);

    }

    removeStopWords();

  }


  public Map<String, Double> getMostFrequentWords(int numFromTop) {

    Map<String, Double> result = new HashMap<String, Double>();
    if ( numFromTop <= 0 )  // why would someone want this, I don't know
      return result;

    double[] allWordFreqs = new double[ this.wordFreqs.size() ];
    int i = 0;
    for ( Double freq : this.wordFreqs.values() ) {
      allWordFreqs[i] = freq;
      i++;
    }

    // sort by values
    int[] sortedIndices = weka.core.Utils.sort(allWordFreqs);

    double cutoff = allWordFreqs[ sortedIndices[ Math.max( allWordFreqs.length-numFromTop, 0 ) ] ];

    for ( String word : this.wordFreqs.keySet() ){
      if ( this.wordFreqs.get(word) >= cutoff )
        result.put(word, this.wordFreqs.get(word));
    }

    return result;

  }


}
