package u1;

import static java.lang.Math.*;
import static java.lang.System.*;

import gnu.trove.TIntDoubleHashMap;
import gnu.trove.TIntDoubleProcedure;

import java.io.BufferedReader;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.InputStream;
import java.net.URLDecoder;
import java.util.Date;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Scanner;
import java.util.Set;
import java.util.regex.Matcher;
import java.util.regex.Pattern;

import org.apache.commons.lang.StringEscapeUtils;

@SuppressWarnings("serial")
public class Util {

  public static final Map<String, String> NAMESPACES = new HashMap<String, String>() {
    {
      // Standard namespaces from:
      // http://en.wikipedia.org/wiki/Wikipedia:Namespace
      put("Media:", "Media:");
      put("Special:", "Special:");
      put(":", "");
      put("Talk:", "Talk:");
      put("User:", "User:");
      put("User talk:", "User talk:");
      put("Wikipedia:", "Wikipedia:");
      put("Wikipedia talk:", "Wikipedia talk:");
      put("File:", "File:");
      put("File talk:", "File talk:");
      put("MediaWiki:", "MediaWiki:");
      put("MediaWiki talk:", "MediaWiki talk:");
      put("Template:", "Template:");
      put("Template talk:", "Template talk:");
      put("Help:", "Help:");
      put("Help talk:", "Help talk:");
      put("Category:", "Category:");
      put("Category talk:", "Category talk:");
      put("Portal:", "Portal:");
      put("Portal talk:", "Portal talk:");
      put("WikiProject:", "WikiProject:");
      put("WikiProject talk:", "WikiProject talk:");
      put("Reference:", "Reference:");
      put("Reference talk:", "Reference talk:");

      // Prefixes from:
      // http://en.wikipedia.org/wiki/Wikipedia:Shortcut#List_of_prefixes
      // note that I changed wp and wt to Wp and Wt because it simplifies the
      // canonicalization code... it's only slightly wrong.
      put("WP:", "Wikipedia:");
      put("Wp:", "Wikipedia:");
      put("WT:", "Wikipedia talk:");
      put("Wt:", "Wikipedia talk:");
      put("CAT:", "Category:");
      put("P:", "Portal:");
      put("T:", "Template:");
      put("H:", "Help:");
      put("C:", "Category:");
      put("Cat:", "Category:");
    }
  };

  /**
   * A link, with or without display text. Captures the page name first and the
   * displayed text second.
   * 
   * See http://en.wikipedia.org/wiki/Wikipedia:Cheatsheet
   */
  public static final Pattern INTERNAL_LINK = Pattern
      .compile("\\[\\[([^|\\]]*)\\|?([^\\]]*)\\]\\]");

  public static final Pattern MANY_SPACES = Pattern.compile(" +");

  /**
   * Extract movie ID, year and title from netflixMovies.txt file.
   */
  public static final Pattern NETFLIX_MOVIES_RECORD = Pattern
      .compile("^(\\d+)\\s+(\\d+)\\s(.*)$");

  public static final String NL = System.getProperty("line.separator");

  /**
   * We trap a % followed by any of:
   * <ol>
   * <li>two non-hex digits</li>
   * <li>something followed by a non-hex digit</li>
   * <li>a non-hex digit followed by something</li>
   * <li>zero or one chars and the end of the string</li>
   * </ol>
   */
  public static final Pattern NON_HEX_PERCENT = Pattern
      .compile("%([^0-9a-fA-F]{2}|.[^0-9a-fA-F]|[^0-9a-fA-F].|.?$)");

  /**
   * Number of movies on one axis of a similarity matrix. Everything else
   * assumes this is 17770; change at your peril.
   */
  public static final int NUM_SIMILARITY_MOVIES = 17770;

  /**
   * For sizing hash tables that will contain pages. This is a loose upper bound
   * because we don't want lots of collisions.
   */
  public static final int PAGES_MAX = 10000000;

  /**
   * A redirect. Case insensitive. Captures the page name.
   * 
   * See http://en.wikipedia.org/wiki/Wikipedia:Cheatsheet
   */
  public static final Pattern REDIRECT = Pattern
      .compile("(?i)#REDIRECT\\s*\\[\\[([^\\]|]+)\\|?[^\\]]*\\]\\]");
  
  /**
   * A comment.
   * 
   * See http://en.wikipedia.org/wiki/Help:Wikitext examples
   */
  public static final Pattern COMMENT = Pattern.compile("(?s)<!--.*?-->");
  
  /**
   * A nowiki tag.
   * 
   * See http://en.wikipedia.org/wiki/Help:Wikitext examples
   */
  public static final Pattern NOWIKI = Pattern.compile("(?is)<nowiki>.*?</nowiki>");
  
  /**
   * A pre tag.
   * 
   * See http://en.wikipedia.org/wiki/Help:Wikitext examples
   */
  public static final Pattern PRE = Pattern.compile("(?is)<pre>.*?</pre>");
  
  /**
   * Get rid of comments, etc. so that the links we read in are actually
   * links.
   * 
   * @param s not null
   * @return not null
   */
  public static String stripCommentsAndNonMarkupSections(String s) {
    s = COMMENT.matcher(s).replaceAll("");
    s = NOWIKI.matcher(s).replaceAll("");
    s = PRE.matcher(s).replaceAll("");
    return s;
  }

  /**
   * Take link to standard format that is comparable with titles. See
   * Wikipedia:Naming conventions (technical restrictions) and
   * Wikipedia:Canonicalization for more of the ugly details.
   * 
   * @param link
   *          not null
   * 
   * @return not null
   */
  public static String canonicalizeLink(String link) {
    // We are ignoring anchors (page#anchor -> page).
    int anchorIndex = link.indexOf('#');
    if (anchorIndex > -1)
      link = link.substring(0, anchorIndex);

    // Underscores and spaces are equivalent; we use spaces.
    link = link.replace('_', ' ');
    
    // For some reason, the unicode "left-to-right mark" (code U+200E) occurs in
    // quite a few places. It looks a lot like a space, so presumably people
    // are somehow entering it when they want a space.
    link = link.replace('\u200e', ' ');

    // Multiple spaces are collapsed.
    link = MANY_SPACES.matcher(link).replaceAll(" ");

    // Leading and trailing spaces are removed.
    link = link.trim();

    // URLDecoder doesn't check whether the characters following the % are hex.
    // It throws lots of exceptions, if we don't do these checks.
    link = NON_HEX_PERCENT.matcher(link).replaceAll("%25$1");

    // URLDecoder kills the plus sign (+). We want to keep it.
    link = link.replace("+", "%2B");

    // Percent-encoded sequences are decoded.
    try {
      link = URLDecoder.decode(link, "UTF-8");
    } catch (Exception e) {
      // This method is pretty fragile. Log it and continue.
      err.println("decode failed on: " + link);
      e.printStackTrace();
    }

    // HTML-escaped sequences (eg. &quot;) are unescaped.
    link = StringEscapeUtils.unescapeHtml(link);

    // The first character is always upper-cased.
    if (link.length() > 0)
      link = Character.toUpperCase(link.charAt(0)) + link.substring(1);

    // If there is a colon, this may be a namespace.
    // Namespaces require special treatment.
    int colonIndex = link.indexOf(':');
    if (colonIndex >= 0) {
      for (String ns : NAMESPACES.keySet()) {
        if (link.startsWith(ns)) {
          String rest = link.substring(colonIndex + 1).trim();
          if (rest.length() > 0)
            rest = Character.toUpperCase(rest.charAt(0)) + rest.substring(1);
          link = NAMESPACES.get(ns) + rest;
          break;
        }
      }
    }
    
    return link;
  }

  /**
   * Take title to a standard format that is comparable with other titles. The
   * titles in the wiki dump appear to be quite good; they follow the
   * conventions AFAICT. The important stuff happens in canonicalizeLink.
   * 
   * @param title
   *          not null
   * @return not null
   */
  public static String canonicalizeTitle(String title) {
    return title;
  }

  /**
   * Cosine similarity between sparse vectors vi and vj.
   * 
   * @param terms
   *          not null; see dot(...) for an explanation.
   * @param vi
   *          not null
   * @param vj
   *          not null
   * @return in [-1, 1], where 1 is identical and -1 is opposite.
   */
  public static double cosineSimilarity(TIntDoubleHashMap vi,
      TIntDoubleHashMap vj) {
    return dot(vi, vj) / norm(vi) / norm(vj);
  }

  /**
   * Helper for dot method. In what other language does computing a dot product
   * require a helper class?
   */
  private static class DotAccumulator implements TIntDoubleProcedure {
    TIntDoubleHashMap vj;
    double sum;

    public DotAccumulator(TIntDoubleHashMap vj) {
      this.vj = vj;
      this.sum = 0;
    }

    @Override
    public boolean execute(int index, double value) {
      sum += value * vj.get(index);
      return true;
    }
  }

  /**
   * Dot product of sparse vectors vi and vj.
   * 
   * @param vi
   *          not null
   * @param vj
   *          not null
   * @return
   */
  public static double dot(final TIntDoubleHashMap vi,
      final TIntDoubleHashMap vj) {
    // More efficient if vi is smaller than vj.
    if (vi.size() > vj.size())
      return dot(vj, vi);
    DotAccumulator accumulator = new DotAccumulator(vj);
    vi.forEachEntry(accumulator);
    return accumulator.sum;
  }

  /**
   * Add all internal links in the given text to the given set. Note that this
   * will include interlanguage links ([[fr:Foo]]) and media links
   * ([[Image:foo.jpg]]). It will also include links from #REDIRECT directives.
   * No regularization is performed (eg. lower casing) or removing parameters.
   * 
   * @param text
   *          not null
   * @param set
   *          not null; collects names of pages linked from text
   */
  public static void findInternalLinks(String text, Set<String> set) {
    Matcher m = INTERNAL_LINK.matcher(text);
    int start = 0;
    while (m.find(start)) {
      set.add(m.group(1));
      start = m.end();
    }
  }

  /**
   * Check whether the given article is a redirect and, if it is, return the
   * page that it redirects to.
   * 
   * @param text
   *          not null
   * @return null iff given article is not a redirect
   */
  public static String findRedirect(String text) {
    Matcher m = REDIRECT.matcher(text);
    if (m.find()) {
      return m.group(1);
    }
    return null;
  }

  /**
   * Sparse vector L2 norm.
   * 
   * @param v
   *          not null
   * @return non-negative; 0 if v empty
   */
  public static double norm(TIntDoubleHashMap v) {
    double sum = 0;
    for (double d : v.getValues())
      sum += d * d;
    return sqrt(sum);
  }

  public static void readIntDoubleMap(Map<Integer, Double> map, File f,
      Set<Integer> filter) throws FileNotFoundException {
    Scanner s = new Scanner(new FileInputStream(f));
    while (s.hasNextInt()) {
      int page = s.nextInt();
      if (s.hasNextDouble()) {
        double pageWeight = s.nextDouble();

        if (filter == null || filter.contains(page))
          map.put(page, pageWeight);
      } else {
        trace("Failed when reading ranks - no rank after page %d.", page);
        exit(-1);
      }
    }
    s.close();
  }

  /**
   * Read a list of whitespace-delimited decimal integers.
   * 
   * @param is
   *          not null
   * @param br
   *          not null
   */
  public static void readIntegers(List<Integer> list, InputStream is) {
    Scanner s = new Scanner(is);
    while (s.hasNextInt()) {
      list.add(s.nextInt());
    }
  }

  /**
   * Read (for example) an integer histogram from a file with one bucket per
   * line, each line being the bucket and the number of entries in that bucket,
   * separated by whitespace.
   * 
   * @param br
   *          not null
   * @return not null
   * @throws IOException
   */
  public static void readIntIntMap(Map<Integer, Integer> hist, BufferedReader br)
      throws IOException {
    String line = br.readLine();
    String[] nums;
    while (line != null) {
      if (line.length() > 0) {
        nums = line.split("\\s+");
        hist.put(Integer.decode(nums[0]), Integer.decode(nums[1]));
      }
      line = br.readLine();
    }
  }

  /**
   * Reads all lines from the reader into the given list.
   * 
   * @param list
   *          not null
   * @param br
   *          not null
   * @throws IOException
   */
  public static void readLines(List<String> list, BufferedReader br)
      throws IOException {
    String line = br.readLine();
    while (line != null) {
      list.add(line);
      line = br.readLine();
    }
  }

  /**
   * Read one string per line from reader and map from those strings to their
   * ordinal in the file (starting at zero for the first entry, one for the
   * second entry, etc.).
   * 
   * @param s
   *          not null
   * @return not null
   * @throws IOException
   */
  public static HashMap<String, Integer> readStringIndex(BufferedReader br)
      throws IOException {
    // Note: Don't use a Scanner for this; there appears to be some kind of bug
    // with newline handling (JLM 20080321, JDK 6).
    HashMap<String, Integer> index = new HashMap<String, Integer>();
    String line = br.readLine();
    while (line != null) {
      index.put(line, index.size());
      line = br.readLine();
    }
    return index;
  }

  /**
   * Reverse the map source and put the results in dest.
   * 
   * @param source
   *          not null
   * @param dest
   *          not null
   */
  public static <T, U> void reverse(Map<T, U> source, Map<U, T> dest) {
    for (Map.Entry<T, U> e : source.entrySet()) {
      dest.put(e.getValue(), e.getKey());
    }
  }

  /**
   * Reverse the multimap source and put the results in dest.
   * 
   * @param source
   *          not null
   * @param dest
   *          not null
   */
  public static <T, U> void reverseMulti(Map<T, Set<U>> source,
      Map<U, HashSet<T>> dest) {
    for (Map.Entry<T, Set<U>> e : source.entrySet()) {
      for (U u : e.getValue()) {
        HashSet<T> set = dest.get(u);
        if (set == null) {
          set = new HashSet<T>();
          set.add(e.getKey());
          dest.put(u, set);
        } else {
          set.add(e.getKey());
        }
      }
    }
  }

  /**
   * Dump sparse vector.
   * 
   * @param v
   *          not null
   * @return not null
   */
  public static String sparseVectorToString(TIntDoubleHashMap v) {
    final StringBuilder sb = new StringBuilder();
    v.forEachEntry(new TIntDoubleProcedure() {

      @Override
      public boolean execute(int a, double b) {
        sb.append(a);
        sb.append(':');
        sb.append(b);
        sb.append(NL);
        return true;
      }

    });
    return sb.toString();
  }

  /**
   * Trace to stderr.
   */
  public static void trace(String format, Object... args) {
    System.err.printf("%1$tH:%1$tM:%1$tS: ", new Date());
    System.err.println(String.format(format, args));
  }

  /*
   * Note: to open a zip file: zip = new ZipFile(file); Enumeration<? extends
   * ZipEntry> entries = zip.entries(); if (!entries.hasMoreElements()) { throw
   * new RuntimeException("Expect zip file with at least one entry."); }
   * ZipEntry entry = entries.nextElement(); input = zip.getInputStream(entry);
   */
}
/*
* Copyright (c) 2009 John Lees-Miller
* 
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use,
* copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following
* conditions:
* 
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
* 
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
* OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
* HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
* WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/

