package com.cxl.mapreduce._14index;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;

import java.io.IOException;

/**
 * 总需求：有三个txt文档   a.txt   b.txt   c.txt
 * 如 a.txt文档下内容
 * atguigu pingping
 * atguigu ss
 * atguigu ss
 * 期望输出数据
 * atguigu	c.txt-->2	b.txt-->2	a.txt-->3
 * pingping	c.txt-->1	b.txt-->3	a.txt-->1
 * ss	c.txt-->1	b.txt-->1	a.txt-->2
 * 思路:需要多job串联
 * 1.输入数据
 *atguigu pingping
 * atguigu ss
 * atguigu ss
 * 2.第一job完毕期望输出结果
 * 	atguigu--a.txt	3
 * 	atguigu--b.txt	2
 * 	atguigu--c.txt	2
 *3.第二job期望输出结果
 *  atguigu	 c.txt-->2	b.txt-->2	a.txt-->3
 *  pingping	c.txt-->1	b.txt-->3	a.txt-->1
 *  ss	c.txt-->1	b.txt-->1	a.txt-->2
 *
 *
 *
 */
public class OneIndexDriver {

	public static void main(String[] args) throws Exception, IOException {

		// 输入输出路径需要根据自己电脑上实际的输入输出路径设置
		args = new String[] { "/Users/a123/devWorkspace/bigData/hdfs/src/main/resources/input/manyJob",
				"/Users/a123/devWorkspace/bigData/hdfs/src/main/resources/output/index-part1" };

		Configuration conf = new Configuration();

		Job job = Job.getInstance(conf);
		job.setJarByClass(OneIndexDriver.class);

		job.setMapperClass(OneIndexMapper.class);
		job.setReducerClass(OneIndexReduce.class);

		job.setMapOutputKeyClass(Text.class);
		job.setMapOutputValueClass(IntWritable.class);

		job.setOutputKeyClass(Text.class);
		job.setOutputValueClass(IntWritable.class);

		FileInputFormat.setInputPaths(job, new Path(args[0]));
		FileOutputFormat.setOutputPath(job, new Path(args[1]));

		job.waitForCompletion(true);

	}
}
