/* 
 * Cloud9: A MapReduce Library for Hadoop
 *
 * Licensed under the Apache License, Version 2.0 (the "License"); you
 * may not use this file except in compliance with the License. You may
 * obtain a copy of the License at
 *
 * http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
 * implied. See the License for the specific language governing
 * permissions and limitations under the License.
 */

package com.hadoop.lab.three;

import java.io.IOException;
import edu.umd.cloud9.collection.wikipedia.*;
import org.apache.hadoop.fs.*;
import org.apache.log4j.Logger;
import java.io.IOException;
import java.util.*;
import org.apache.hadoop.conf.*;
import org.apache.hadoop.io.*;
import org.apache.hadoop.mapred.*;
import org.apache.hadoop.util.*;
import org.apache.hadoop.conf.Configuration;

/**
 * <p>
 * Tool for counting the number of pages in a particular Wikipedia XML dump
 * file. This program keeps track of total number of pages, redirect pages,
 * disambiguation pages, empty pages, actual articles (including stubs), stubs,
 * and non-articles ("File:", "Category:", "Wikipedia:", etc.). This also
 * provides a skeleton for MapReduce programs to process the collection. The
 * program takes a single command-line argument, which is the path to the
 * Wikipedia XML dump file.
 * </p>
 * 
 * <p>
 * Here's a sample invocation:
 * </p>
 * 
 * <blockquote>
 * 
 * <pre>
 * hadoop jar cloud9.jar edu.umd.cloud9.collection.wikipedia.DemoCountWikipediaPages \
 *   -libjars bliki-core-3.0.15.jar,commons-lang-2.5.jar \
 *   /user/jimmy/Wikipedia/raw/enwiki-20101011-pages-articles.xml
 * </pre>
 * 
 * </blockquote>
 * 
 * @author Jimmy Lin
 */
@SuppressWarnings("deprecation")
public class PageRankWikipediaPages extends Configured {
	
	public static int N; 
	private static final Logger LOG = Logger.getLogger(PageRankWikipediaPages.class);

	private static enum PageTypes { TOTAL, REDIRECT, DISAMBIGUATION, EMPTY, ARTICLE, STUB, NON_ARTICLE };


        private static class MyMapper extends MapReduceBase implements
								Mapper<LongWritable, WikipediaPage, Text, IntWritable> {

	    private Text word = new Text();
	    private final static IntWritable one = new IntWritable(1);

	    public void map(LongWritable key, WikipediaPage p,
			    OutputCollector<Text, IntWritable> output, Reporter reporter) throws IOException {
		if (isArticle(p) && !p.isDisambiguation() && !p.isEmpty() && !p.isRedirect()){
		    word.set("wiki");
		    output.collect(word, one);
		    reporter.incrCounter(PageTypes.ARTICLE, 1);
		}

		reporter.incrCounter(PageTypes.TOTAL, 1);
	    }

	    public boolean isArticle(WikipediaPage p) {
		return !(p.getTitle().startsWith("File:") || p.getTitle().startsWith("Category:")
			 || p.getTitle().startsWith("Special:") || p.getTitle().startsWith("Wikipedia:")
			 || p.getTitle().startsWith("Wikipedia:") || p.getTitle().startsWith("Template:")
			 || p.getTitle().startsWith("Portal:"));
	    }
        }


	private static class LinkMapper extends MapReduceBase implements
			Mapper<LongWritable, WikipediaPage, Text, Text> {
		
		private Text word = new Text();
		private Text list = new Text();

		private final static IntWritable one = new IntWritable(1);

		public void map(LongWritable key, WikipediaPage p,
				OutputCollector<Text, Text> output, Reporter reporter) throws IOException {
		    if (isArticle(p) && !p.isRedirect() && !p.isDisambiguation() || !p.isEmpty()) {
			word.set(p.getTitle() + " | 1.0");
			String l = toString(extractLinkDestinations(p.getRawXML()));
			if(!l.isEmpty()){
			    list.set(l);
			    output.collect(word, list);
			    reporter.incrCounter(PageTypes.ARTICLE, 1);
			}
		    }
		    
		    reporter.incrCounter(PageTypes.TOTAL, 1);
		}
		
		public boolean isArticle(WikipediaPage p) {
		    return !(p.getTitle().startsWith("File:") || p.getTitle().startsWith("Category:")
			     || p.getTitle().startsWith("Special:") || p.getTitle().startsWith("Wikipedia:")
			     || p.getTitle().startsWith("Wikipedia:") || p.getTitle().startsWith("Template:")
			     || p.getTitle().startsWith("Portal:"));
		}
		
		public String toString(List<String> list){
		   
		    if (list.size() <=0){
			return "[]";
		    } else {
			String l = "[";
			for(int i = 0; i < list.size() -1; i++){
			    l += list.get(i) + ", ";
			}
			
			return l + list.get(list.size()-1) + "]";
		    }
		}
		
		public List<String> extractLinkDestinations(String mPage) {
		    int start = 0;
		    List<String> links = new ArrayList<String>();

		    while (true) {
			start = mPage.indexOf("[[", start);

			if (start < 0)
			    break;

			int end = mPage.indexOf("]]", start);

			if (end < 0)
			    break;

			String text = mPage.substring(start + 2, end);

			// skip empty links
			if (text.length() == 0) {
			    start = end + 1;
			    continue;
			}

			// skip special links
			if (text.indexOf(":") != -1) {
			    start = end + 1;
			    continue;
			}

			// if there is anchor text, get only article title
			int a;
			if ((a = text.indexOf("|")) != -1) {
			    text = text.substring(0, a);
			}

			if ((a = text.indexOf("#")) != -1) {
			    text = text.substring(0, a);
			}

			// ignore article-internal links, e.g., [[#section|here]]
			if (text.length() == 0 ) {
			    start = end + 1;
			    continue;
			}
			
			links.add(text.trim());

			start = end + 1;
		    }

		    return links;
		}

	}
		
    public static class ProbMap extends MapReduceBase implements Mapper<LongWritable, Text, Text, Text> {
        private final static IntWritable one = new IntWritable(1);
        private Text word = new Text();
	private Text p = new Text();
        public void map(LongWritable key, Text value, OutputCollector<Text, Text> output, Reporter reporter) throws IOException {
            String file = value.toString();
            StringTokenizer tokenizer = new StringTokenizer(file, "\n");
            while (tokenizer.hasMoreTokens()) {
		//List<String> list = new ArrayList<String>();
		String line = tokenizer.nextToken();
		///String probability = line.substring(line.indexOf('|'), line.indexOf('[')-1).trim();
		//String links = line.substring(line.indexOf('[')+1, line.indexOf(']')-1);
		//	list = createList(links);
		//	for(int i = 0; i < list.size(); i++){
		    word.set(line);
		    p.set("o");
		    output.collect(word, p);
		    //}
            }
        }
	
	public List<String> createList(String links){
	    List<String> list = new ArrayList<String>();
	    StringTokenizer st = new StringTokenizer(links, ",");
	    while(st.hasMoreTokens()){
		list.add(st.nextToken().trim());
	    }
	    return list;
	}

    }


    public static class FirstReduce extends MapReduceBase
	implements Reducer<Text, IntWritable, Text, IntWritable> {
	public void reduce(Text key, Iterator<IntWritable> values,
			   OutputCollector<Text, IntWritable> output, Reporter reporter) throws IOException {
	    //group all the same requests together and map each request to the total times it was accessed                                                            
	    int sum = 0;
	    while (values.hasNext()) {
		sum += values.next().get();
	    }
            output.collect(key, new IntWritable(sum));
	}
    }


    public static class LinkReduce extends MapReduceBase 
        					implements Reducer<Text, Text, Text, Text> {
       public void reduce(Text key, Iterator<Text> values,
			OutputCollector<Text, Text> output, Reporter reporter) throws IOException {
		 	//group all the same requests together and map each request to the total times it was accessed
	   output.collect(key, values.next());
       }
    }


    public static class ProbReduce extends MapReduceBase
	implements Reducer<Text, Text, Text, Text> {
	public void reduce(Text key, Iterator<Text> values,
			   OutputCollector<Text, Text> output, Reporter reporter) throws IOException {
	    //group all the same requests together and map each request to the total times it was accessed                                                            
	    output.collect(key, values.next());
	}
    }

	

	
	public static void main(String[] args) throws Exception {
	    byte [] buffer = new byte [100];
		Configuration conf1 = new Configuration();
		FileSystem fs = FileSystem.get(conf1);

		JobConf conf = new JobConf(PageRankWikipediaPages.class);
		conf.setJobName("CountWikipediaPages");

		conf.setNumMapTasks(10);

		FileInputFormat.setInputPaths(conf, new Path(args[0]));

		conf.setInputFormat(WikipediaPageInputFormat.class);
		conf.setOutputFormat(TextOutputFormat.class);
                conf.setOutputKeyClass(Text.class);
                conf.setOutputValueClass(IntWritable.class);
                conf.setMapperClass(MyMapper.class);
                conf.setCombinerClass(FirstReduce.class);
                conf.setReducerClass(FirstReduce.class);
                FileInputFormat.setInputPaths(conf, new Path(args[0]));
                Path outputDir = new Path(args[1]);
                FileOutputFormat.setOutputPath(conf, outputDir);
		JobClient.runJob(conf);

		outputDir = outputDir.suffix("//part-00000");
                FSDataInputStream in = fs.open(outputDir);
                String messageIn = in.readLine();
                System.out.println(messageIn);
                String [] result = messageIn.split("\t");
                N= Integer.parseInt(result[1]);
                System.out.println(N);
                in.close();
		
		JobConf conf2 = new JobConf(PageRankWikipediaPages.class);
		conf2.setJobName("GetLinks");
		
		conf2.setNumMapTasks(10);
		
		FileInputFormat.setInputPaths(conf2, new Path(args[0]));
		conf2.setNumMapTasks(10);
		conf2.setInputFormat(WikipediaPageInputFormat.class);
		conf2.setOutputFormat(TextOutputFormat.class);
		conf2.setOutputKeyClass(Text.class);
		conf2.setOutputValueClass(Text.class);
		conf2.setMapperClass(LinkMapper.class);
		conf2.setCombinerClass(LinkReduce.class);
		conf2.setReducerClass(LinkReduce.class);
		FileInputFormat.setInputPaths(conf2, new Path(args[0]));
		
		Path interm = new Path(args[2]);
		FileOutputFormat.setOutputPath(conf2, interm);
		JobClient.runJob(conf2);
		
		interm = interm.suffix("//part-00000");
	 
                JobConf conf3 = new JobConf(PageRankWikipediaPages.class);
                conf3.setJobName("GetLinks");

                conf3.setNumMapTasks(10);

                conf3.setInputFormat(TextInputFormat.class);
                conf3.setOutputFormat(TextOutputFormat.class);
                conf3.setOutputKeyClass(Text.class);
                conf3.setOutputValueClass(Text.class);
                conf3.setMapperClass(ProbMap.class);
                 conf3.setCombinerClass(ProbReduce.class);
                conf3.setReducerClass(ProbReduce.class);
                FileInputFormat.setInputPaths(conf3, interm);
                FileOutputFormat.setOutputPath(conf3, new Path(args[2]+ "_output"));
                JobClient.runJob(conf3);
		
	
	}

}
