package cn.pengpeng.day03.local;

import java.io.IOException;
import java.net.URI;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;

import cn.pengpeng.day03.firstMr.MapTask;
import cn.pengpeng.day03.firstMr.ReduceTask;

public class Driver {
	public static void main(String[] args) throws Exception {
		//System.setProperty("HADOOP_USER_NAME", "root");
		
				Configuration conf = new Configuration();
				/*conf.set("fs.defaultFS", "hdfs://bigdata01:9000");
				conf.set("mapreduce.framework.name","yarn"); 
				conf.set("yarn.resourcemanager.hostname", "bigdata01");
				conf.set("mapreduce.app-submission.cross-platform", "true");*/
				Job job = Job.getInstance(conf);
				
				//设置job的map 和 reduce 是哪一个 提交任务的类
				job.setMapperClass(MapTask.class);
				job.setReducerClass(ReduceTask.class);
				job.setJar("C:\\Users\\root\\Desktop\\wc2.jar");
				
				//设置输出的类型
				job.setMapOutputKeyClass(Text.class);
				job.setMapOutputValueClass(IntWritable.class);
				
				job.setOutputKeyClass(Text.class);
				job.setOutputValueClass(IntWritable.class);
				
				//设置输入目录和输出目录
				FileInputFormat.addInputPath(job, new Path("d:/data/word.txt"));
				FileOutputFormat.setOutputPath(job, new Path("d:/out/wcoutput"));
				
				
				/*//如果输出目录已经存在，需要删除
				FileSystem fs = FileSystem.get(new URI("hdfs://bigdata01:9000"), conf, "root");
				if(fs.exists(new Path("/data/wcoutput/"))){
					fs.delete(new Path("/data/wcoutput/"),true);
				}*/
				
				//提交任务并等待任务结束，能够打印日志
				boolean waitForCompletion = job.waitForCompletion(true);
				System.out.println(waitForCompletion?"程序正常退出":"程序有问题了");
	}

}
