package com.tonysor.hadoop.mapreduce;

import java.util.Properties;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;

public class WordJob {
	public static void main(String[] args) throws Exception {  
		  
        //构建一个JOB对象  
        Job job = Job.getInstance(new Configuration());  
        Properties properties = System.getProperties();
//        properties.setProperty("HADOOP_USER_NAME", "hadoop");
        System.out.println(properties.getProperty("HADOOP_USER_NAME"));
        //注意：main方法所在的类  
        job.setJarByClass(WordJob.class);  
//        job.setUser("hadoop");
        //设置Mapper相关属性  
        job.setMapperClass(WordMapper.class);  
        job.setMapOutputKeyClass(Text.class);  
        job.setMapOutputValueClass(LongWritable.class);  
        //FileInputFormat.setInputPaths(job, new Path(args[0]));
        Path  in  =  new  Path("hdfs://h1:9000/input/aa/test.txt");  //读入文件地址
        Path  out  = new Path("hdfs://h1:9000/output2");
        FileInputFormat.setInputPaths(job, in);
        //设置Reducer相关属性  
        job.setReducerClass(WordReduce.class);  
        job.setOutputKeyClass(Text.class);  
        job.setOutputValueClass(LongWritable.class);  
        FileOutputFormat.setOutputPath(job, out);  
          
        //提交任务  
        job.waitForCompletion(true);  
    }  
  
}
