package u1;

import static java.lang.Math.*;
import static java.lang.System.*;
import static u1.Util.*;

import gnu.trove.THashMap;
import gnu.trove.TIntDoubleHashMap;
import gnu.trove.TIntHashSet;
import gnu.trove.TObjectObjectProcedure;
import gnu.trove.decorator.TIntDoubleHashMapDecorator;

import java.io.BufferedReader;
import java.io.File;
import java.io.FileInputStream;
import java.io.IOException;
import java.io.InputStreamReader;
import java.util.ArrayList;
import java.util.Collection;
import java.util.HashMap;
import java.util.Map;

/**
 * Compute movie-movie similarity matrix based on link TFIDF and cosine
 * similarity.
 * 
 * Takes an adjacency list (see u1.ForwardLinks) on standard in.
 * 
 * Arguments:
 * 
 * 1. A .movies.with_ids file (see u1.MovieIdNumbers for format). These are the
 * Netflix movie ids and Wikipedia page numbers to generate similarity estimates
 * for.
 * 
 * 2. A .rank file (see pagerank.cpp for format). We can use page ranks in
 * combination with TF/DF.
 * 
 * 3. (Optional) Scaling factor alpha; see getPageVector.
 * 
 * Output is a 17770 by 17770 similarity matrix where all entries are in [0, 1]
 * and an entry is 0 if we have no information on it.
 */
public class LinkSimilarity {
  /**
   * We cache the norms because the similarity computation is very slow.
   */
  public static class PageVector {
    public double norm;
    public TIntDoubleHashMap v;
  }

  public static void main(String[] args) throws IOException {
    if (args.length != 3) {
      err.println("See comments for usage.");
      exit(-1);
    }
    File movieIdsToPageNumbersFile = new File(args[0]);
    File rankFile = new File(args[1]);
    double alpha = Double.valueOf(args[2]);

    trace("Reading movie ids to ordinals map from %s...",
        movieIdsToPageNumbersFile);
    BufferedReader movieIdsAndPageNumbers = new BufferedReader(
        new InputStreamReader(new FileInputStream(movieIdsToPageNumbersFile),
            "UTF-8"));
    final Map<Integer, Integer> movieIdsToPageNumbers = new HashMap<Integer, Integer>();
    readIntIntMap(movieIdsToPageNumbers, movieIdsAndPageNumbers);
    movieIdsAndPageNumbers.close();
    trace("Read %d movies.", movieIdsToPageNumbers.size());

    TIntDoubleHashMap rankT = new TIntDoubleHashMap();
    if (alpha == 0) {
      trace("Ignoring ranks because alpha is zero.");
    } else {
      trace("Reading ranks from %s...", rankFile);
      // null means no filtering on which ranks we read in
      readIntDoubleMap(new TIntDoubleHashMapDecorator(rankT), rankFile, null);
      trace("Read %d page ranks.", rankT.size());
    }

    /*
     * Interested only in links from movie pages (but maybe could do something
     * with links to movie pages). We don't get any data on how many times a
     * link appears on a page because the adjacency list uses sets. So, we just
     * get a bit for each link ("term" by analogy) on the page.
     * 
     * tf[term] = set of pages on which a link to term appears (term freq).
     * 
     * df[term] = size of tf[term].
     */
    trace("Reading adjacency list from stdin.");
    final THashMap<Integer, TIntHashSet> tf = new THashMap<Integer, TIntHashSet>();
    AdjReader reader = new AdjReader() {
      @Override
      public void readLinks(Integer source, Collection<Integer> targets) {
        if (getPageIndex() % 100000 == 0)
          trace("Processed %d pages.", getPageIndex());

        if (!movieIdsToPageNumbers.containsValue(source))
          return;
        for (Integer target : targets) {
          TIntHashSet tfSet = tf.get(target);
          if (tfSet == null) {
            tfSet = new TIntHashSet();
            tf.put(target, tfSet);
          }
          tfSet.add(source);
        }
      }
    };
    reader.read(in);
    trace("Got %d terms.", tf.size());

    /*
     * For interest, compute some statistics.
     */
    StatCounter dfCounter = new StatCounter("doc freq");
    for (TIntHashSet tfe : tf.values()) {
      dfCounter.record(tfe.size());
    }
    trace(dfCounter.toString());

    /*
     * Compute article (movie) feature vectors.
     */
    trace("Computing movie feature vectors...");
    ArrayList<PageVector> vs = getPageVectors(movieIdsToPageNumbers, rankT, tf,
        alpha);
    trace("Computed %d vectors.", vs.size());

    /*
     * Now compute similarity matrix.
     */
    trace("Writing similarity matrix...");

    StatCounter simCounter = new StatCounter("similarity");
    int zeroCount = 0;
    int i = 0;
    for (PageVector vi : vs) {
      ++i;
      int j = 0;
      for (PageVector vj : vs) {
        ++j;
        if (vi == null || vj == null || vi.v.size() == 0 || vj.v.size() == 0) {
          // No data for movies i, j.
          out.print("0 ");
        } else {
          // Cosine similarity is 1 when pages are maximally similar and
          // -1 when they are maximally dissimilar. We want 1 for similar and
          // 0 for dissimilar.
          double similarity = dot(vi.v, vj.v) / vi.norm / vj.norm;
          simCounter.record(similarity);
          out.print(similarity);
          out.print(" ");

          // We use 0 for unknown, but it might that two pages are maximally
          // dissimilar. Make note of this, if it happens.
          if (similarity == 0) {
            ++zeroCount;
            // This produces too much output.
            //trace("Zero similarity %f for movies %d, %d.", similarity, i, j);
          }

          // Check for NaNs. I think they might be possible.
          if (similarity != similarity) {
            trace("Got a NaN for movies %d, %d. Feature vector lengths"
                + " %d and %d.", i, j, vi.v.size(), vj.v.size());
          }
        }
      }
      out.println();
      if (i % 1000 == 0) {
        trace("Finished %d rows.", i);
      }
    }
    trace(simCounter.toString());
    trace("%d pairs were orthogonal", zeroCount);
    trace("Done.");
  }

  public static ArrayList<PageVector> getPageVectors(
      final Map<Integer, Integer> movieIdsToPageNumbers,
      TIntDoubleHashMap rankT, final THashMap<Integer, TIntHashSet> tf,
      double alpha) {
    ArrayList<PageVector> vs = new ArrayList<PageVector>(NUM_SIMILARITY_MOVIES);
    for (int i = 1; i <= NUM_SIMILARITY_MOVIES; ++i) {
      Integer pageNumber = movieIdsToPageNumbers.get(i);
      if (pageNumber == null) {
        vs.add(null);
      } else {
        TIntDoubleHashMap v = new TIntDoubleHashMap();
        getPageVector(pageNumber, rankT, tf, alpha, v);
        PageVector pv = new PageVector();
        pv.v = v;
        pv.norm = norm(v);
        vs.add(pv);
      }
    }
    return vs;
  }

  /**
   * <pre>
   * pageVector[term] =
   *   1 * ranks[term]&circ;alpha / df[term], if i in tf[term]
   *   0, otherwise
   * </pre>
   * 
   * @param page
   * @param ranks
   * @param tf
   * @param v
   */
  public static void getPageVector(final int page,
      final TIntDoubleHashMap ranks, final THashMap<Integer, TIntHashSet> tf,
      final double alpha, final TIntDoubleHashMap v) {

    tf.forEachEntry(new TObjectObjectProcedure<Integer, TIntHashSet>() {

      @Override
      public boolean execute(Integer term, TIntHashSet pages) {
        if (pages.contains(page))
          v.put(term, pow(ranks.get(term), alpha) / pages.size());
        return true;
      }

    });
  }
}
/*
 * Copyright (c) 2009 John Lees-Miller
 * 
 * Permission is hereby granted, free of charge, to any person obtaining a copy
 * of this software and associated documentation files (the "Software"), to deal
 * in the Software without restriction, including without limitation the rights
 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
 * copies of the Software, and to permit persons to whom the Software is
 * furnished to do so, subject to the following conditions:
 * 
 * The above copyright notice and this permission notice shall be included in
 * all copies or substantial portions of the Software.
 * 
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 * SOFTWARE.
 */

