package edu.hit.crawler;

import java.io.IOException;
import java.text.SimpleDateFormat;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
import org.apache.log4j.Logger;

import edu.hit.crawler.io.CrawItem;
import edu.hit.crawler.io.FetchOutput;
import edu.hit.crawler.mapred.CrawlReducer;
import edu.hit.crawler.mapred.PartitionByHost;

/**
 * Driver for Crawler hadoop program. Crawl will download all the URL links that
 * listed in its input folder within multithreads
 * 
 * @author
 * 
 */
@SuppressWarnings("deprecation")
public class CrawlDriver extends Configured implements Tool {

	public static Logger logger = Logger.getLogger(CrawlDriver.class);

	public static class CrawlMapper extends
			Mapper<Text, CrawItem, Text, CrawItem> {

		@Override
		public void map(Text key, CrawItem value, Context context) {
			try {
				context.write(key, value);
			} catch (IOException e) {
				e.printStackTrace();
			} catch (InterruptedException e) {
				e.printStackTrace();
			}
		}

	}

	public static void main(String[] args) throws Exception {
		int res = ToolRunner.run(new Configuration(), new CrawlDriver(), args);
		System.exit(res);
	}

	@Override
	public int run(String[] args) throws Exception {
		// config a job and start it
		Configuration conf = getConf();
		conf.setLong("mapreduce.task.timeout", 0);
		Job job = new Job(conf, "Download");
		job.setJarByClass(Crawler.class);
		
		System.out.println(conf);
	

		job.setMapperClass(CrawlMapper.class);
		job.setPartitionerClass(PartitionByHost.class);
		job.setReducerClass(CrawlReducer.class);
		// TODO: 加上排序函数？参考nutch的，这样是否可以省去reduce的shuffle

		job.setMapOutputKeyClass(Text.class);
		job.setMapOutputValueClass(CrawItem.class);

		job.setOutputKeyClass(CrawItem.class);
		job.setOutputValueClass(FetchOutput.class);

		job.setInputFormatClass(SequenceFileInputFormat.class);
		job.setOutputFormatClass(SequenceFileOutputFormat.class);

		int reduce_task_number = conf.getInt("org.work.crawler.num.reduceTask",
				6);
		job.setNumReduceTasks(reduce_task_number);

		String workdir = conf.get("org.work.crawler.dir", "crawler/");
		FileSystem fs = FileSystem.get(conf);
		long latest = 0;
		Path in = null;
		for (FileStatus stat : fs.listStatus(new Path(workdir + "generate/"))) {

			if (stat.getModificationTime() > latest) {
				in = stat.getPath();
				latest = stat.getModificationTime();
				// TODO: just DEBUG. sholdn't here.
				// break;
			}
		}

		FileInputFormat.addInputPath(job, in);

		String time = new SimpleDateFormat("yyyy_MM_dd_HH_mm_ss").format(System
				.currentTimeMillis());
		Path out = new Path(workdir + "doc/" + time);
		SequenceFileOutputFormat.setOutputPath(job, out);

		int res = job.waitForCompletion(true) ? 0 : 1;
		if (res == 0) {
			/*
			 * status = fs.listStatus(new Path(workdir + "doc/")); long oldest =
			 * Long.MAX_VALUE; Path toDel = null; if (status.length > 2) { for
			 * (FileStatus s : status) { if (s.getModificationTime() < oldest) {
			 * toDel = s.getPath(); oldest = s.getModificationTime(); } }
			 * logger.info("delete old doc dir : " + toDel.getName());
			 * //fs.delete(toDel, true); }
			 */

			System.out.println("crawler done.");
		} else {
			System.out.println("crawler failed.");
		}

		return res;
	}
}