package com.mr.wordcount.keyvalue;

import java.io.IOException;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.input.KeyValueTextInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;

/*
 * 1.一旦启动这个线程，运行job
 */

public class WCDriver {

	public static void main(String[] args) throws IOException, ClassNotFoundException, InterruptedException {

		// 作为整个job的配置
		Configuration conf = new Configuration();

		// 设置*为分隔符
		// this.separator = (byte) sepStr.charAt(0); 只会取字符串的第一个字符
		conf.set("mapreduce.input.keyvaluelinerecordreader.key.value.separator", "***");

		// 创建job
		Job job = Job.getInstance(conf);

		// 设置名称
		job.setJobName("wordcount");

		// 将类WCDriver所在的jar包作为job的jar包
		job.setJarByClass(WCDriver.class);

		// 设置job
		// 设置job运行的Mapper,Reducer类型，Mapper,Reducer输出的key-value类型
		job.setMapperClass(WCMapper.class);
		job.setReducerClass(WCReducer.class);

		// job需要根据Mapper和Reducer输出的key-value类型准备序列化器，通过序列化器对输出的key-value进行序列化
		// 如果Mapper和Reducer输出的key-value一致，直接设置job最终的输出类型
		job.setOutputKeyClass(Text.class);
		job.setOutputValueClass(LongWritable.class);

		// 设置输入输出目录
		Path input = new Path("E:/尚硅谷大数据/05-Hadoop，HadoopHA，Zookeeper/05-Mapreduce/mrinput/keyvalue");
		Path output = new Path("E:/尚硅谷大数据/05-Hadoop，HadoopHA，Zookeeper/05-Mapreduce/mroutput/keyvalue");

		// 保证输出目录不能存在
		FileSystem fs = FileSystem.get( conf);

		if (fs.exists(output)) {
			fs.delete(output, true);
		}

		// 声明使用KeyValueTextInputFormat
		job.setInputFormatClass(KeyValueTextInputFormat.class);

		FileInputFormat.setInputPaths(job, input);
		FileOutputFormat.setOutputPath(job, output);

		// 运行job
		job.waitForCompletion(true);
	}

}


