package mr.scan;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.filecache.DistributedCache;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;

import util.LongPair;

public class TextNumScan {

	public static void main(String[] args) throws Exception {

		Boolean ok = false;
		Path OUT_TMP = null;
		Path OUTPUT = null;
		Path INPUT = null;
		if (args.length >= 2) {
			INPUT = new Path(args[0]);
			OUTPUT = new Path(args[1]);

			Configuration conf = new Configuration();
			FileSystem fs = FileSystem.get(conf);
			if (fs.exists(OUTPUT)) {
				fs.delete(OUTPUT, true);
			}

			Job job = new Job(conf, "scan1");
			job.setJarByClass(SeqNumScan.class);
			job.setMapperClass(FirstMapper.class);
			job.setReducerClass(ScanReducer.class);
			job.setNumReduceTasks(1);

			job.setMapOutputKeyClass(IntWritable.class);
			job.setMapOutputValueClass(LongPair.class);
			job.setOutputKeyClass(IntWritable.class);
			job.setOutputValueClass(Text.class);
			job.setOutputFormatClass(TextOutputFormat.class);
			FileInputFormat.addInputPath(job, INPUT);
			OUT_TMP = new Path(args[1] + "_int");
			if (fs.exists(OUT_TMP))
				fs.delete(OUT_TMP, true);
			FileOutputFormat.setOutputPath(job, OUT_TMP);
			ok = job.waitForCompletion(true);
		}

		if (ok) {
			Configuration conf2 = new Configuration();
			FileSystem fs = FileSystem.get(conf2);
			Path partialSumList = new Path(OUT_TMP, "part-r-00000");
			// not very good
			Path localCache = new Path("/tmp/scan_mr/partialSumList.txt");
			fs.copyToLocalFile(partialSumList, localCache);

			Path cacheFile = new Path("scan/cache/partialSumList.txt");
			if (fs.exists(cacheFile))
				fs.delete(cacheFile, true);
			fs.copyFromLocalFile(localCache, cacheFile);
			DistributedCache.addCacheFile(cacheFile.toUri(), conf2);
			System.out.println(cacheFile.toUri());
			Job job2 = new Job(conf2, "scan2");
			job2.setJarByClass(SecondMapper.class);
			job2.setMapperClass(SecondMapper.class);
			job2.setNumReduceTasks(0);
			job2.setMapOutputKeyClass(Text.class);
			job2.setMapOutputValueClass(NullWritable.class);
			// for seq file
			// job2.setOutputKeyClass(LongWritable.class);
			// job2.setOutputValueClass(LongWritable.class);
			job2.setInputFormatClass(TextInputFormat.class);
			job2.setOutputFormatClass(TextOutputFormat.class);
			FileInputFormat.addInputPath(job2, new Path(args[0]));
			FileOutputFormat.setOutputPath(job2, new Path(args[1]));
			int rst = job2.waitForCompletion(true) ? 0 : 1;
			fs.deleteOnExit(OUT_TMP);
			fs.deleteOnExit(OUTPUT); // output are too large, so I delete them
			System.exit(rst);

		} else
			System.exit(1);

	}
}
