/*
 * Licensed to the Apache Software Foundation (ASF) under one or more
 * contributor license agreements.  See the NOTICE file distributed with
 * this work for additional information regarding copyright ownership.
 * The ASF licenses this file to You under the Apache License, Version 2.0
 * (the "License"); you may not use this file except in compliance with
 * the License.  You may obtain a copy of the License at
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */
package org.apache.nutch.scoring.webgraph;

import java.io.BufferedReader;
import java.io.IOException;
import java.io.InputStreamReader;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Random;
import java.util.Set;
import java.lang.Math;

import org.apache.commons.cli.CommandLine;
import org.apache.commons.cli.CommandLineParser;
import org.apache.commons.cli.GnuParser;
import org.apache.commons.cli.HelpFormatter;
import org.apache.commons.cli.Option;
import org.apache.commons.cli.OptionBuilder;
import org.apache.commons.cli.Options;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.FloatWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.ObjectWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.io.WritableUtils;
import org.apache.hadoop.mapred.FileInputFormat;
import org.apache.hadoop.mapred.FileOutputFormat;
import org.apache.hadoop.mapred.JobClient;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.MapFileOutputFormat;
import org.apache.hadoop.mapred.Mapper;
import org.apache.hadoop.mapred.OutputCollector;
import org.apache.hadoop.mapred.Reducer;
import org.apache.hadoop.mapred.Reporter;
import org.apache.hadoop.mapred.SequenceFileInputFormat;
import org.apache.hadoop.mapred.SequenceFileOutputFormat;
import org.apache.hadoop.mapred.TextOutputFormat;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
import org.apache.nutch.crawl.Inlinks;
import org.apache.nutch.scoring.webgraph.Loops.LoopSet;
import org.apache.nutch.util.FSUtils;
import org.apache.nutch.util.NutchConfiguration;
import org.apache.nutch.util.NutchJob;
import org.apache.nutch.util.URLUtil;

public class LinkRank
  extends Configured
  implements Tool {

  public static final Log LOG = LogFactory.getLog(LinkRank.class);
  private static final String NUM_NODES = "_num_nodes_";

  private Class<? extends Mapper> inverterMapClass = Inverter.class;
  private Class<? extends Reducer> inverterReduceClass = Inverter.class;

  private Class<? extends Mapper> analyzerMapClass = Analyzer.class;
  private Class<? extends Reducer> analyzerReduceClass = Analyzer.class;

  private Class<? extends Mapper> normalizerMapClass = Normalizer.class;
  private Class<? extends Reducer> normalizerReduceClass = null;

  private Class<? extends Mapper> convergerMapClass = Converger.class;
  private Class<? extends Reducer> convergerReduceClass = Converger.class;

  protected void setAnalyzerMapClass(Class<? extends Mapper> analyzerMapClass) {
	this.analyzerMapClass = analyzerMapClass;
  }

  protected void setAnalyzerReduceClass(
		Class<? extends Reducer> analyzerReduceClass) {
	this.analyzerReduceClass = analyzerReduceClass;
  }

  public void setInverterMapClass(Class<? extends Mapper> inverterMapClass) {
    this.inverterMapClass = inverterMapClass;
  }

  public void setInverterReduceClass(
      Class<? extends Reducer> inverterReduceClass) {
    this.inverterReduceClass = inverterReduceClass;
  }

  public void setNormalizerMapClass(Class<? extends Mapper> normalizerMapClass) {
    this.normalizerMapClass = normalizerMapClass;
  }

  public void setNormalizerReduceClass(
      Class<? extends Reducer> normalizerReduceClass)
  {
    this.normalizerReduceClass = normalizerReduceClass;
  }

  protected Class<? extends Mapper> getConvergerMapClass() {
	return convergerMapClass;
  }

  protected void setConvergerMapClass(Class<? extends Mapper> convergerMapClass) {
	this.convergerMapClass = convergerMapClass;
  }

  protected Class<? extends Reducer> getConvergerReduceClass() {
	return convergerReduceClass;
  }

  protected void setConvergerReduceClass(
		Class<? extends Reducer> convergerReduceClass) {
	this.convergerReduceClass = convergerReduceClass;
  }

  /**
   * Runs the counter job. The counter job determines the number of links in the
   * webgraph. This is used during analysis.
   * 
   * @param fs The job file system.
   * @param webGraphDb The web graph database to use.
   * 
   * @return The number of nodes in the web graph.
   * @throws IOException If an error occurs while running the counter job.
   */
  private int runCounter(FileSystem fs, Path webGraphDb)
    throws IOException
  {
    // configure the counter job
    Path numLinksPath = new Path(webGraphDb, NUM_NODES);
    Path nodeDb = new Path(webGraphDb, WebGraph.NODE_DIR);
    JobConf counter = new NutchJob(getConf());
    counter.setJobName("LinkRank Counter");
    FileInputFormat.addInputPath(counter, nodeDb);
    FileOutputFormat.setOutputPath(counter, numLinksPath);
    counter.setInputFormat(SequenceFileInputFormat.class);
    counter.setMapperClass(Counter.class);
    counter.setCombinerClass(Counter.class);
    counter.setReducerClass(Counter.class);
    counter.setMapOutputKeyClass(Text.class);
    counter.setMapOutputValueClass(LongWritable.class);
    counter.setOutputKeyClass(Text.class);
    counter.setOutputValueClass(LongWritable.class);
    counter.setNumReduceTasks(1);
    counter.setOutputFormat(TextOutputFormat.class);

    // run the counter job, outputs to a single reduce task and file
    LOG.info("Starting link counter job");
    try {
      JobClient.runJob(counter);
    }
    catch (IOException e) {
      LOG.error(StringUtils.stringifyException(e));
      throw e;
    }
    LOG.info("Finished link counter job");

    // read the first (and only) line from the file which should be the
    // number of links in the web graph
    LOG.info("Reading numlinks temp file");
    FSDataInputStream readLinks = fs.open(new Path(numLinksPath, "part-00000"));
    BufferedReader buffer = new BufferedReader(new InputStreamReader(readLinks));
    String numLinksLine = buffer.readLine();
    readLinks.close();

    // delete temp file and convert and return the number of links as an int
    LOG.info("Deleting numlinks temp file");
    fs.delete(numLinksPath, true);
    String numLinks = numLinksLine.split("\\s+")[1];
    return Integer.parseInt(numLinks);
  }

  /**
   * Runs the initializer job. The initializer job sets up the nodes with a
   * default starting score for link analysis.
   * 
   * @param nodeDb The node database to use.
   * @param output The job output directory.
   * 
   * @throws IOException If an error occurs while running the initializer job.
   */
  private void runInitializer(int numNodes, Path nodeDb, Path output)
    throws IOException
  {
    // configure the initializer
    JobConf initializer = new NutchJob(getConf());
    initializer.setJobName("LinkAnalysis Initializer");
    initializer.set("link.analyze.nodes.num", Integer.toString(numNodes));
    FileInputFormat.addInputPath(initializer, nodeDb);
    FileOutputFormat.setOutputPath(initializer, output);
    initializer.setInputFormat(SequenceFileInputFormat.class);
    initializer.setMapperClass(Initializer.class);
    initializer.setMapOutputKeyClass(Text.class);
    initializer.setMapOutputValueClass(Node.class);
    initializer.setOutputKeyClass(Text.class);
    initializer.setOutputValueClass(Node.class);
    initializer.setOutputFormat(MapFileOutputFormat.class);

    // run the initializer
    LOG.info("Starting initialization job");
    try {
      JobClient.runJob(initializer);
    }
    catch (IOException e) {
      LOG.error(StringUtils.stringifyException(e));
      throw e;
    }
    LOG.info("Finished initialization job.");
  }

  /**
   * Runs the inverter job. The inverter job flips outlinks to inlinks to be
   * passed into the analysis job.
   * 
   * The inverter job takes a link loops database if it exists. It is an
   * optional componenet of link analysis due to its extreme computational and
   * space requirements but it can be very useful is weeding out and eliminating
   * link farms and other spam pages.
   * 
   * @param nodeDb The node database to use.
   * @param outlinkDb The outlink database to use.
   * @param loopDb The loop database to use if it exists.
   * @param output The output directory.
   * 
   * @throws IOException If an error occurs while running the inverter job.
   */
  private void runInverter(Path nodeDb, Path inlinkDb, Path outlinkDb,
    Path loopDb, Path output) throws IOException
  {
    /* This should never happen, just to make sure we are OK. */
    if (inverterMapClass == null)
    {
      String error = new String(this.getClass().toString() +
	": Inverter is not set");

      LOG.error(error);
      throw new IllegalArgumentException(error);
    }

    // configure the inverter
    JobConf inverter = new NutchJob(getConf());
    inverter.setJobName("LinkAnalysis Inverter");
    FileInputFormat.addInputPath(inverter, nodeDb);
    FileInputFormat.addInputPath(inverter, inlinkDb);
    FileInputFormat.addInputPath(inverter, outlinkDb);

    // add the loop database if it exists, isn't null
    if (loopDb != null)
    {
      // FileInputFormat.addInputPath(inverter, loopDb);
    	LOG.info("Tried to add loopDB `" + loopDb.toString() +
    	  "' to an input path of inverter. MUHAHAHA!");
    }
    FileOutputFormat.setOutputPath(inverter, output);
    inverter.setInputFormat(SequenceFileInputFormat.class);
    inverter.setMapperClass(inverterMapClass);
    if (inverterReduceClass != null)
      inverter.setReducerClass(inverterReduceClass);
    inverter.setMapOutputKeyClass(Text.class);
    inverter.setMapOutputValueClass(ObjectWritable.class);
    inverter.setOutputKeyClass(Text.class);
    inverter.setOutputValueClass(LinkDatum.class);
    inverter.setOutputFormat(SequenceFileOutputFormat.class);

    // run the inverter job
    LOG.info("Starting inverter job");
    try {
      JobClient.runJob(inverter);
    }
    catch (IOException e) {
      LOG.error(StringUtils.stringifyException(e));
      throw e;
    }
    LOG.info("Finished inverter job.");
  }

  /**
   * Runs the link analysis job. The link analysis job applies the link rank
   * formula to create a score per url and stores that score in the NodeDb.
   * 
   * Typically the link analysis job is run a number of times to allow the link
   * rank scores to converge.
   * 
   * @param nodeDb The node database from which we are getting previous link
   * rank scores.
   * @param inverted The inverted inlinks
   * @param output The link analysis output.
   * @param iteration The current iteration number.
   * @param numIterations The total number of link analysis iterations
   * 
   * @throws IOException If an error occurs during link analysis.
   */
  protected void runAnalysis(Path nodeDb, Path inverted, Path output,
    int iteration, int numIterations, float rankOne)
    throws IOException
  {
    /* This should never happen, just to make sure we are OK. */
    if (analyzerMapClass == null)
    {
      String error = new String(this.getClass().toString() +
	": Analyzer is not set");

      LOG.error(error);
      throw new IllegalArgumentException(error);
    }

    JobConf analyzer = new NutchJob(getConf());
    analyzer.set("link.analyze.iteration", String.valueOf(iteration + 1));
    analyzer.setJobName("LinkAnalysis Analyzer, iteration " + (iteration + 1)
      + " of " + numIterations);
    FileInputFormat.addInputPath(analyzer, nodeDb);
    FileInputFormat.addInputPath(analyzer, inverted);
    FileOutputFormat.setOutputPath(analyzer, output);
    analyzer.set("link.analyze.rank.one", String.valueOf(rankOne));
    analyzer.setMapOutputKeyClass(Text.class);
    analyzer.setMapOutputValueClass(ObjectWritable.class);
    analyzer.setInputFormat(SequenceFileInputFormat.class);
    analyzer.setMapperClass(analyzerMapClass);
    if (analyzerReduceClass != null)
      analyzer.setReducerClass(analyzerReduceClass);
    analyzer.setOutputKeyClass(Text.class);
    analyzer.setOutputValueClass(Node.class);
    analyzer.setOutputFormat(MapFileOutputFormat.class);

    LOG.info("Starting analysis job");
    try {
      JobClient.runJob(analyzer);
    }
    catch (IOException e) {
      LOG.error(StringUtils.stringifyException(e));
      throw e;
    }
    LOG.info("Finished analysis job.");
  }

    // Calculate norma.
  private Map<String, Float> getNorms(FileSystem fs, Path nodeDb)
    throws IOException
  {
    Map<String, Float> res = new HashMap<String, Float>();

    // configure the normaCalc job
    Path normaPath = new Path(nodeDb.getParent(), "_normaCalc");
    JobConf normaCalc = new NutchJob(getConf());
    normaCalc.setJobName("LinkRank normaCalculator");
    FileInputFormat.addInputPath(normaCalc, nodeDb);
    FileOutputFormat.setOutputPath(normaCalc, normaPath);
    normaCalc.setInputFormat(SequenceFileInputFormat.class);
    normaCalc.setMapperClass(NormaCalculator.class);
    normaCalc.setReducerClass(NormaCalculator.class);
    normaCalc.setMapOutputKeyClass(Text.class);
    normaCalc.setMapOutputValueClass(FloatWritable.class);
    normaCalc.setOutputKeyClass(Text.class);
    normaCalc.setOutputValueClass(FloatWritable.class);
    normaCalc.setNumReduceTasks(1);
    normaCalc.setOutputFormat(TextOutputFormat.class);

    // run the normaCalc job, outputs to a single reduce task and file
    LOG.info("Starting link normaCalc job");
    try {
      JobClient.runJob(normaCalc);
    }
    catch (IOException e) {
      LOG.error(StringUtils.stringifyException(e));
      throw e;
    }
    LOG.info("Finished link normaCalc job");

    // read the first (and only) line from the file which should be the
    // number of links in the web graph
    LOG.info("Reading norma temp file");
    FSDataInputStream readLinks = fs.open(new Path(normaPath, "part-00000"));
    BufferedReader buffer = new BufferedReader(new InputStreamReader(readLinks));

    for (int i = 0; i < 2; i++)
    {
      String[] tokens = buffer.readLine().split("\\s+");

      res.put(tokens[0], Float.parseFloat(tokens[1]));
    }
    readLinks.close();
    // delete temp file and convert and return the number of links as an int
    LOG.info("Deleting numlinks temp file");
    fs.delete(normaPath, true);

    return res;
  }

  private boolean runConverger(FileSystem fs, Path prevNodeDb, Path newNodeDb) throws IOException
  {
    final float DELTA = 0.01f;

    Path tempNodeDb = new Path(newNodeDb.getParent(), ".converger_tmp");
    JobConf converger = new NutchJob(getConf());
    converger.setJobName("LinkRank Converger");
    FileInputFormat.addInputPath(converger, prevNodeDb);
    FileInputFormat.addInputPath(converger, newNodeDb);
    FileOutputFormat.setOutputPath(converger, tempNodeDb);
    converger.setInputFormat(SequenceFileInputFormat.class);
    converger.setMapperClass(convergerMapClass);
    converger.setReducerClass(convergerReduceClass);
    converger.setMapOutputKeyClass(Text.class);
    converger.setMapOutputValueClass(ObjectWritable.class);
    converger.setOutputKeyClass(Text.class);
    converger.setOutputValueClass(Node.class);
    converger.setOutputFormat(SequenceFileOutputFormat.class);

    LOG.info("Starting converging job");
    try {
      JobClient.runJob(converger);
    }
    catch (IOException e) {
      LOG.error(StringUtils.stringifyException(e));
      throw e;
    }
    LOG.info("Finished converging job");

    Map<String, Float> norms = getNorms(fs, tempNodeDb);
    fs.delete(tempNodeDb);
    return (norms.get("hubNorma") < DELTA && norms.get("authNorma") < DELTA) ?
      true : false;
  }

  private void runNormalizer(FileSystem fs, Path tempNodeDb, Path nodeDb)
    throws IOException
  {
    Map<String, Float> norms = getNorms(fs, tempNodeDb);

    /* Normalizing node scores. */
    JobConf normalizer = new NutchJob(getConf());
    normalizer.setJobName("LinkRank normaCalculator");
    normalizer.set("link.normalizer.hubNorma",
      Float.toString(norms.get("hubNorma")));
    normalizer.set("link.normalizer.authNorma",
      Float.toString(norms.get("authNorma")));
    FileInputFormat.addInputPath(normalizer, tempNodeDb);
    FileOutputFormat.setOutputPath(normalizer, nodeDb);
    normalizer.setInputFormat(SequenceFileInputFormat.class);
    normalizer.setMapperClass(normalizerMapClass);
    normalizer.setMapOutputKeyClass(Text.class);
    normalizer.setMapOutputValueClass(Node.class);
    normalizer.setOutputKeyClass(Text.class);
    normalizer.setOutputValueClass(Node.class);
    normalizer.setOutputFormat(MapFileOutputFormat.class);

    // run the normaCalc job, outputs to a single reduce task and file
    LOG.info("Starting node normalizing job");
    try {
      JobClient.runJob(normalizer);
    }
    catch (IOException e) {
      LOG.error(StringUtils.stringifyException(e));
      throw e;
    }
    LOG.info("Finished node normalizing job");
  }

  /**
   * The Counter job that determines the total number of nodes in the WebGraph.
   * This is used to determine a rank one score for pages with zero inlinks but
   * that contain outlinks.
   */
  private static class Counter
    implements Mapper<Text, Node, Text, LongWritable>,
    Reducer<Text, LongWritable, Text, LongWritable> {

    private JobConf conf;
    private static Text numNodes = new Text(NUM_NODES);
    private static LongWritable one = new LongWritable(1L);

    public void configure(JobConf conf) {
      this.conf = conf;
    }

    /**
     * Outputs one for every node.
     */
    public void map(Text key, Node value,
      OutputCollector<Text, LongWritable> output, Reporter reporter)
      throws IOException {
      output.collect(numNodes, one);
    }

    /**
     * Totals the node number and outputs a single total value.
     */
    public void reduce(Text key, Iterator<LongWritable> values,
      OutputCollector<Text, LongWritable> output, Reporter reporter)
      throws IOException {

      long total = 0;
      while (values.hasNext()) {
        total += values.next().get();
      }
      output.collect(numNodes, new LongWritable(total));
    }

    public void close() {
    }
  }

  private static class Initializer
    implements Mapper<Text, Node, Text, Node>
  {
    private JobConf conf;
    private float initialScore = 1.0f;

    public void configure(JobConf conf) {
      this.conf = conf;
      this.initialScore = conf.getFloat("link.analyze.initial.score", 1.0f);
    }

    public void map(Text key, Node node, OutputCollector<Text, Node> output,
      Reporter reporter)
      throws IOException
    {
      String url = key.toString();
      Node outNode = (Node)WritableUtils.clone(node, conf);
      outNode.setInlinkScore(initialScore);

      output.collect(new Text(url), outNode);
    }

    public void close() {
    }
  }

  /**
   * Inverts outlinks and attaches current score from the NodeDb of the
   * WebGraph. The link analysis process consists of inverting, analyzing and
   * scoring, in a loop for a given number of iterations.
   */
  private static class Inverter
    implements Mapper<Text, Writable, Text, ObjectWritable>,
    Reducer<Text, ObjectWritable, Text, LinkDatum>
  {
    private JobConf conf;

    public void configure(JobConf conf) {
      this.conf = conf;
    }

    /**
     * Convert values to ObjectWritable
     */
    public void map(Text key, Writable value,
      OutputCollector<Text, ObjectWritable> output, Reporter reporter)
      throws IOException
    {
      ObjectWritable objWrite = new ObjectWritable();
      objWrite.set(value);
      output.collect(key, objWrite);
    }

    /**
     * Inverts outlinks to inlinks, attaches current score for the outlink from
     * the NodeDb of the WebGraph and removes any outlink that is contained
     * within the loopset.
     */
    /*      Authorities                       Hubs 
     *
     *         a_1 <-------------------+  +--- h_1 
     *                        +--------|--+        
     *                        |        |           
     *         a_2  <---------+        +------ h_2 
     *          A             |        |           
     *          +-------------|--------|--+        
     *                        |        |  |        
     *          +-------------+        |  |        
     *          V                      |  |        
     *         a_3  <------------------+  +--- h_3 
     *
     *  a_i = [ for (j,i) in E: h_j ] # inlinks
     *  h_i = [ for (i,j) in E: a_j ] # outlinks
     */
    public void reduce(Text key, Iterator<ObjectWritable> values,
      OutputCollector<Text, LinkDatum> output, Reporter reporter)
      throws IOException
    {
      String url = key.toString();
      List<LinkDatum> links = new ArrayList<LinkDatum>();
      Node node = null;
      LoopSet loops = null;

      // aggregate outlinks, assign other values
      while (values.hasNext())
      {
        ObjectWritable write = values.next();
        Object obj = write.get();

        if (obj instanceof Node)
          node = (Node)obj;
        else if (obj instanceof LinkDatum)
          links.add((LinkDatum)obj);
        else if (obj instanceof LoopSet)
          loops = (LoopSet)obj;
      }

      // get the number of outlinks and the current inlink and outlink scores
      // from the node of the url
      int inLinks = node.getNumInlinks();
      int numOutlinks = node.getNumOutlinks();
      float inlinkScore = node.getInlinkScore();
      float outlinkScore = node.getOutlinkScore();
      LOG.debug(url + ": num outlinks " + numOutlinks + ", num inlinks " + inLinks);

//       Set<String> loopSet = (loops != null) ? loops.getLoopSet() : null;
      for (Object objLink : links.toArray())
      {
    	LinkDatum link = (LinkDatum)objLink;
        String linkUrl = link.getUrl();

        link.setUrl(url);
        if (link.getLinkType() == LinkDatum.OUTLINK)
        {
          /* My outlink - his inlink => set to my hub weight */
          link.setScore(outlinkScore);
          link.setLinkType(LinkDatum.INLINK);
        }
        else
        {
          /* My inlink - his outlink => set to my authority weight */
          link.setScore(inlinkScore);
          link.setLinkType(LinkDatum.OUTLINK);        	
        }

        // collect the inverted outlink
        output.collect(new Text(linkUrl), link);
      }
    }

    public void close()
    {
    }
  }

  /**
   * Runs a single link analysis iteration.
   */
  private static class Analyzer
    implements Mapper<Text, Writable, Text, ObjectWritable>,
    Reducer<Text, ObjectWritable, Text, Node> {

    private JobConf conf;
    private float dampingFactor = 0.85f;
    private float rankOne = 0.0f;
    private int itNum = 0;
    private boolean limitPages = true;
    private boolean limitDomains = true;

    /**
     * Configures the job, sets the damping factor, rank one score, and other
     * needed values for analysis.
     */
    public void configure(JobConf conf) {

      try {
        this.conf = conf;
        this.dampingFactor = conf.getFloat("link.analyze.damping.factor", 0.85f);
        this.rankOne = conf.getFloat("link.analyze.rank.one", 0.0f);
        this.itNum = conf.getInt("link.analyze.iteration", 0);
        limitPages = conf.getBoolean("link.ignore.limit.page", true);
        limitDomains = conf.getBoolean("link.ignore.limit.domain", true);
      }
      catch (Exception e) {
        LOG.error(StringUtils.stringifyException(e));
        throw new IllegalArgumentException(e);
      }
    }

    /**
     * Convert values to ObjectWritable
     */
    public void map(Text key, Writable value,
      OutputCollector<Text, ObjectWritable> output, Reporter reporter)
      throws IOException {

      ObjectWritable objWrite = new ObjectWritable();
      objWrite.set(WritableUtils.clone(value, conf));
      output.collect(key, objWrite);
    }

    /**
     * Performs a single iteration of link analysis. The resulting scores are
     * stored in a temporary NodeDb which replaces the NodeDb of the WebGraph.
     */
    public void reduce(Text key, Iterator<ObjectWritable> values,
      OutputCollector<Text, Node> output, Reporter reporter)
      throws IOException {

      String url = key.toString();
      Set<String> domains = new HashSet<String>();
      Set<String> pages = new HashSet<String>();
      Node node = null;

      // a page with zero inlinks has a score of rankOne
      int numInlinks = 0;
      float totalInlinkScore = rankOne;

      while (values.hasNext()) {

        ObjectWritable next = values.next();
        Object value = next.get();
        if (value instanceof Node) {
          node = (Node)value;
        }
        else if (value instanceof LinkDatum) {

          LinkDatum linkDatum = (LinkDatum)value;
          float scoreFromInlink = linkDatum.getScore();
          String inlinkUrl = linkDatum.getUrl();
          String inLinkDomain = URLUtil.getDomainName(inlinkUrl);
          String inLinkPage = URLUtil.getPage(inlinkUrl);

          // limit counting duplicate inlinks by pages or domains
          if ((limitPages && pages.contains(inLinkPage))
            || (limitDomains && domains.contains(inLinkDomain))) {
            LOG.debug(url + ": ignoring " + scoreFromInlink + " from "
              + inlinkUrl + ", duplicate page or domain");
            continue;
          }

          // aggregate total inlink score
          numInlinks++;
          totalInlinkScore += scoreFromInlink;
          domains.add(inLinkDomain);
          pages.add(inLinkPage);
          LOG.debug(url + ": adding " + scoreFromInlink + " from " + inlinkUrl
            + ", total: " + totalInlinkScore);
        }
      }

      // calculate linkRank score formula
      float linkRankScore = (1 - this.dampingFactor)
        + (this.dampingFactor * totalInlinkScore);

      LOG.info(url + ": score: " + linkRankScore + " num inlinks: "
        + numInlinks + " iteration: " + itNum + "\n");

      // store the score in a temporary NodeDb
      Node outNode = (Node)WritableUtils.clone(node, conf);
      outNode.setInlinkScore(linkRankScore);
      output.collect(key, outNode);
    }

    public void close()
      throws IOException {
    }
  }

  private static class Converger
    implements Mapper<Text, Writable, Text, ObjectWritable>,
    Reducer<Text, ObjectWritable, Text, Node>
  {
    private JobConf conf;
    public void configure(JobConf conf) {
      this.conf = conf;
    }

    /**
     * Convert values to ObjectWritable
     */
    public void map(Text key, Writable value,
      OutputCollector<Text, ObjectWritable> output, Reporter reporter)
      throws IOException
    {
      ObjectWritable objWrite = new ObjectWritable();
      objWrite.set(WritableUtils.clone(value, conf));
      output.collect(key, objWrite);
    }

    public void reduce(Text key, Iterator<ObjectWritable> values,
      OutputCollector<Text, Node> output, Reporter reporter)
      throws IOException
    {
      String url = key.toString();
      List<Node> nodes = new ArrayList<Node>();

      while (values.hasNext())
        nodes.add((Node)values.next().get());

      // store the scores in a temporary NodeDb
      Node outNode = (Node)WritableUtils.clone(nodes.get(0), conf);
      outNode.setInlinkScore(Math.abs(outNode.getInlinkScore() -
	nodes.get(1).getInlinkScore()));
      outNode.setOutlinkScore(Math.abs(outNode.getOutlinkScore() -
	nodes.get(1).getOutlinkScore()));
      output.collect(key, outNode);
    }

    public void close()
      throws IOException {
    }
  }

  /* Calculate norma for both authority vector and hub vector. */
  private static class NormaCalculator
    implements Mapper<Text, Node, Text, FloatWritable>,
    Reducer<Text, FloatWritable, Text, FloatWritable>
  {
    private JobConf conf;
    private static Text hubNorma = new Text("hubNorma");
    private static Text authNorma = new Text("authNorma");

    public void configure(JobConf conf) {
      this.conf = conf;
    }

    public void map(Text key, Node value,
      OutputCollector<Text, FloatWritable> output, Reporter reporter)
      throws IOException
    {
      output.collect(authNorma, new FloatWritable(value.getInlinkScore()));
      output.collect(hubNorma, new FloatWritable(value.getOutlinkScore()));
    }

    public void reduce(Text key, Iterator<FloatWritable> values,
      OutputCollector<Text, FloatWritable> output, Reporter reporter)
      throws IOException
    {
      float max = -1f;

      while (values.hasNext()) {
        max = Math.max( max, values.next().get());
      }
      output.collect(key, new FloatWritable(max));
    }

    public void close() {
    }
  }

  private static class Normalizer
    implements Mapper<Text, Node, Text, Node>
  {
    private JobConf conf;
    private float hubNorma = 1.0f;
    private float authNorma = 1.0f;

    public void configure(JobConf conf) {
      this.conf = conf;
      hubNorma = conf.getFloat("link.normalizer.hubNorma", hubNorma);
      authNorma = conf.getFloat("link.normalizer.authNorma", authNorma);
    }

    public void map(Text key, Node node, OutputCollector<Text, Node> output,
      Reporter reporter) throws IOException
    {
      String url = key.toString();
      Node outNode = (Node)WritableUtils.clone(node, conf);
      
      outNode.setInlinkScore(node.getInlinkScore() / authNorma);
      outNode.setOutlinkScore(node.getOutlinkScore() / hubNorma);
      
      output.collect(new Text(url), outNode);
    }

    public void close() {
    }
  }

  /**
   * Default constructor.
   */
  public LinkRank() {
    super();
  }

  /**
   * Configurable constructor.
   */
  public LinkRank(Configuration conf) {
    super(conf);
  }

  public void close() {
  }

  /**
   * Runs the complete link analysis job. The complete job determins rank one
   * score. Then runs through a given number of invert and analyze iterations,
   * by default 10. And finally replaces the NodeDb in the WebGraph with the
   * link rank output.
   * 
   * @param webGraphDb The WebGraph to run link analysis on.
   * 
   * @throws IOException If an error occurs during link analysis.
   */
  public void analyze(Path webGraphDb)
    throws IOException {

    // store the link rank under the webgraphdb temporarily, final scores get
    // upddated into the nodedb
    Path linkRank = new Path(webGraphDb, "linkrank");
    Configuration conf = getConf();
    FileSystem fs = FileSystem.get(conf);

    // create the linkrank directory if needed
    if (!fs.exists(linkRank)) {
      fs.mkdirs(linkRank);
    }

    // the webgraph outlink and node database paths
    Path wgOutlinkDb = new Path(webGraphDb, WebGraph.OUTLINK_DIR);
    Path wgInlinkDb = new Path(webGraphDb, WebGraph.INLINK_DIR);
    Path wgNodeDb = new Path(webGraphDb, WebGraph.NODE_DIR);
    Path nodeDb = new Path(linkRank, WebGraph.NODE_DIR);
    Path loopDb = new Path(webGraphDb, Loops.LOOPS_DIR);
    if (!fs.exists(loopDb)) {
      loopDb = null;
    }

    // get the number of total nodes in the webgraph, used for rank one, then
    // initialze all urls with a default score
    int numLinks = runCounter(fs, webGraphDb);
    runInitializer(numLinks, wgNodeDb, nodeDb);
    float rankOneScore = (1f / (float)numLinks);

    if (LOG.isInfoEnabled()) {
      LOG.info("Number of links " + numLinks);
      LOG.info("Rank One " + rankOneScore);
    }

    // run invert and analysis for a given number of iterations to allow the
    // link rank scores to converge
    int numIterations = conf.getInt("link.analyze.num.iterations", 10);
    boolean is_converged = false;
    for (int i = 0; i < numIterations && is_converged == false; i++)
    {
      // the input to inverting is always the previous output from analysis
      LOG.info("Running iteration " + (i + 1) + " of " + numIterations);
      Path tempRank = new Path(linkRank + "-"
        + Integer.toString(new Random().nextInt(Integer.MAX_VALUE)));
      fs.mkdirs(tempRank);
      Path tempInverted = new Path(tempRank, "inverted");
      Path tempNodeDb = new Path(tempRank, WebGraph.NODE_DIR + ".tmp");
      Path normalizedNodeDb = new Path(tempRank, WebGraph.NODE_DIR);

      // run invert and analysis
      runInverter(nodeDb, wgInlinkDb, wgOutlinkDb, loopDb, tempInverted);
      runAnalysis(nodeDb, tempInverted, tempNodeDb, i, numIterations,
        rankOneScore);

      if (normalizerMapClass != null)
      {
    	runNormalizer(fs, tempNodeDb, normalizedNodeDb);
    	// Remove temporary nodeDb file.
        fs.delete(tempNodeDb);
      }
      else
      {
      	// Normalizer is not ran, just rename tempNodeDb.
      	FSUtils.replace(fs, normalizedNodeDb, tempNodeDb, true);
      }
      
      if (convergerMapClass != null && convergerReduceClass != null)
        is_converged = runConverger(fs, nodeDb, normalizedNodeDb);

      // replace the temporary NodeDb with the output from analysis
      LOG.info("Installing new link scores");
      FSUtils.replace(fs, linkRank, tempRank, true);
      LOG.info("Finished analysis iteration " + (i + 1) + " of "
        + numIterations);
    }

    // replace the NodeDb in the WebGraph with the final output of analysis
    LOG.info("Installing web graph nodes");
    FSUtils.replace(fs, wgNodeDb, nodeDb, true);

    // remove the temporary link rank folder
    fs.delete(linkRank, true);
    LOG.info("Finished analysis");
  }

  public static void main(String[] args)
    throws Exception {
    int res = ToolRunner.run(NutchConfiguration.create(), new LinkRank(), args);
    System.exit(res);
  }

  /**
   * Runs the LinkRank tool.
   */
  public int run(String[] args)
    throws Exception {

    Options options = new Options();
    Option helpOpts = OptionBuilder.withArgName("help").withDescription(
      "show this help message").create("help");
    Option webgraphOpts = OptionBuilder.withArgName("webgraphdb").hasArg().withDescription(
      "the web graph db to use").create("webgraphdb");
    options.addOption(helpOpts);
    options.addOption(webgraphOpts);

    CommandLineParser parser = new GnuParser();
    try {

      CommandLine line = parser.parse(options, args);
      if (line.hasOption("help") || !line.hasOption("webgraphdb")) {
        HelpFormatter formatter = new HelpFormatter();
        formatter.printHelp("LinkRank", options);
        return -1;
      }

      String webGraphDb = line.getOptionValue("webgraphdb");

      analyze(new Path(webGraphDb));
      return 0;
    }
    catch (Exception e) {
      LOG.fatal("LinkAnalysis: " + StringUtils.stringifyException(e));
      return -2;
    }
  }
}
