package edu.hit.crawler;

import java.net.URL;
import java.text.SimpleDateFormat;
import java.util.ArrayList;
import java.util.HashSet;
import java.util.Set;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
import org.apache.log4j.Logger;

import edu.hit.crawler.io.CrawItem;
import edu.hit.crawler.mapred.ParseMapper;
import edu.hit.crawler.mapred.ParseReducer;

/**
 * Parser Drvier drives the a parsing job.<br/>
 * This job will analyze all the pages that crawler job downloads.
 * 
 * @author zzc (zzc3615@gmail.com)
 * 
 */
@SuppressWarnings("deprecation")
public class ParserDriver extends Configured implements Tool {

	public static Logger logger = Logger.getLogger(DocReader.class);


	public static class ForbiddenMapper extends
			Mapper<Text, CrawItem, Text, CrawItem> {
		@Override
		protected void map(Text key, CrawItem value, Context context)
				throws java.io.IOException, InterruptedException {
			URL u = new URL(key.toString());
			context.write(new Text(u.getHost()), value);
		}
	}

	public static class ForbiddenReducer extends
			Reducer<Text, CrawItem, Text, CrawItem> {
		@Override
		protected void reduce(Text key, Iterable<CrawItem> values,
				Context context) throws java.io.IOException,
				InterruptedException {
			ArrayList<CrawItem> list = new ArrayList<CrawItem>();
			for (CrawItem it : values) {

				if (it.getStatus() == CrawItem.FORBIDDEN) {
					System.out.println("this host forbidden: " + key);
					list.clear();
					break;
				}
				list.add(new CrawItem(it));
			}

			for (CrawItem it : list) {
				context.write(new Text(it.getUrl()), it);
			}
		}
	}

	public static void main(String[] args) throws Exception {

		long start = System.currentTimeMillis();
		Configuration conf = new Configuration();

		int res = ToolRunner.run(conf, new ParserDriver(), args);
		System.out.println("use time : " + (System.currentTimeMillis() - start)
				/ 1000);

		System.exit(res);
	}

	@Override
	public int run(String[] args) throws Exception {

		Configuration conf = getConf();
		Job job = new Job(conf, "Parse");

		job.setJarByClass(Crawler.class);

		job.setMapperClass(ParseMapper.class);
		job.setReducerClass(ParseReducer.class);

		job.setMapOutputKeyClass(Text.class);
		job.setMapOutputValueClass(CrawItem.class);

		job.setOutputKeyClass(Text.class);
		job.setOutputValueClass(CrawItem.class);

		job.setInputFormatClass(SequenceFileInputFormat.class);
		job.setOutputFormatClass(SequenceFileOutputFormat.class);

		job.setNumReduceTasks(6);

		String workdir = conf.get("org.work.crawler.dir", "crawler/");
		FileSystem fs = FileSystem.get(conf);
		// find the latest doc folder that downloads
		long latest = 0;
		Path inPath = null;

		for (FileStatus stat : fs.listStatus(new Path(workdir + "doc"))) {
			if (stat.getModificationTime() > latest) {
				inPath = stat.getPath();
				latest = stat.getModificationTime();
			}
		}

		// System.out.println(inPath.toString());

		FileInputFormat.addInputPath(job, inPath);
		Path out = new Path(workdir
				+ "tmp/"
				+ new SimpleDateFormat("yyyy_MM_dd_HH_mm_ss").format(System
						.currentTimeMillis()));
		FileOutputFormat.setOutputPath(job, out);

		int res = job.waitForCompletion(true) ? 0 : 1;

	/*	if (res == 0) {

			System.out.println("parse done.");
			System.out.println("remove url forbidden");
			Job job1 = new Job(conf);
			job1.setJarByClass(ParserDriver.class);
			job1.setJobName("remove_forbidden");

			job1.setMapperClass(ForbiddenMapper.class);
			job1.setReducerClass(ForbiddenReducer.class);

			job1.setNumReduceTasks(6);

			job1.setOutputKeyClass(Text.class);
			job1.setOutputValueClass(CrawItem.class);

			job1.setInputFormatClass(SequenceFileInputFormat.class);
			job1.setOutputFormatClass(SequenceFileOutputFormat.class);

			FileStatus[] status = fs.listStatus(out);

			FileInputFormat.addInputPath(job1, out);
			FileInputFormat.addInputPath(job1, new Path(workdir + "forbidden/"));

			Path outputDir = new Path(workdir
					+ "tmp/"
					+ "after_remove_" + new SimpleDateFormat("yyyy_MM_dd_HH_mm_ss").format(System
							.currentTimeMillis()));
		
			SequenceFileOutputFormat.setOutputPath(job1, outputDir);

			res = job1.waitForCompletion(true) ? 0 : 1;
			if (res == 0)
				System.out.println("remove finished");
			else
				System.out.println("remove failure!");

		} else {
			System.out.println("parse failed.");
		}*/

		return res;
	}
}
