package com.chb.wordcount;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;

/**
 * @Description<p>
 * 服务器模式:在本地提交
首先需要在src下放置服务器上的hadoop配置文件

1、在本地直接调用，执行过程在服务器上（调试）

a、把MR程序打包（jar），直接放到本地
b、修改hadoop的源码 ，注意：确保项目的lib需要真实安装的jdk的lib
c、增加一个属性： 
config.set(“mapred.jar”, “C:\Users\Administrator\Desktop\wc.jar”);
d、本地执行main方法，servlet调用MR。
 * </p>
 *
 * @author chb
 * 
 * @mail 1228532445@qq.com
 */
public class WC1 {
	public static void main(String[] args) {
		//System.setProperty("hadoop.home.dir", "E:\\SetUpDir\\hadoop_compile_dir\\hadoop\\hadoop-2.5.2");
		System.out.println("start wc1 ...");
		Configuration conf = new Configuration();
		conf.set("mapred.jar", "F:\\Project\\HadoopProject\\HadoopUtils\\target\\HadoopUtils-0.0.1.jar");
		try {
			FileSystem fs = FileSystem.get(conf);
			//��������
			Job job =Job.getInstance();
			job.setJarByClass(com.chb.wordcount.WC1.class);
			
			job.setJobName("WC1");
			//Mapper��Reducer��
			job.setMapperClass(com.chb.wordcount.WCMapper.class);
			job.setReducerClass(com.chb.wordcount.WCReducer.class);
			//Mapper的输出key, value类型
			job.setMapOutputKeyClass(Text.class);
			job.setMapOutputValueClass(IntWritable.class);
			//输入输出路径
			FileInputFormat.addInputPath(job, new Path("hdfs://192.168.179.4:8020/apps/idc/data/"));
			Path out = new Path("hdfs://192.168.179.4:8020/apps/idc/wcOutput");
			if(fs.exists(out)) {
				fs.delete(out, true);
			}
			FileOutputFormat.setOutputPath(job, out);
			boolean f = job.waitForCompletion(true);
			if(f){
				System.out.println("completion...");
			}
		} catch (Exception e) {
			e.printStackTrace();
		}
	}
}
