package com.godo.hadoop.merger;

import java.io.File;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;

import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.mapreduce.lib.reduce.IntSumReducer;


import com.godo.hadoop.mr.TestM;
import com.godo.hadoop.mr.TestR;

public class MyTestMerger extends BaseMerger {

	public MyTestMerger(String input,String output){
		inputPaths = new ArrayList<Path>() ;
		List<File> files = getDirFileList(new File(input)) ;
		for(File file:files){
			inputPaths.add(new Path(file.getPath())) ;
		}
		if(new File(output).exists() && new File(output).isDirectory()){
			super.deleteDirectory(new File(output)) ;
			try {
				Thread.sleep(1000) ;
			} catch (InterruptedException e) {
				// TODO Auto-generated catch block
				e.printStackTrace();
			}
		}
		outputPath = new Path(output) ;
	}
	@Override
	public void merger() throws IOException, InterruptedException, ClassNotFoundException {
//		Job job = new Job(conf, "word count");    // 原来的JobConf没有了，用Job来进行代替，这里的Job继承自JobContext，它集成了JobConf  
		
		job.setJobName("mytest merger") ;
		job.setJarByClass(this.getClass());

		job.setCombinerClass(IntSumReducer.class);  // 设置组合方法
		
		job.setMapperClass(TestM.class); // 设置Map方法
		job.setReducerClass(TestR.class); // 设置Reduce方法
		
		job.setOutputKeyClass(Text.class);    // 设置Map和Reduce的输出Key类型  
		job.setOutputValueClass(IntWritable.class); // 设置Map和Reduce的输出Value类型 
		
		for(Path p:inputPaths){
			FileInputFormat.addInputPath(job, p);  // 设置InputFile的路径  
		}
		FileOutputFormat.setOutputPath(job, outputPath);  // 设置OutputFile的路径  
		
		job.waitForCompletion(true);
//		System.exit(job.waitForCompletion(true) ? 0 : 1);  // 运行task  
	}

}
