package com.example.example01;

import java.io.IOException;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;

public class HadoopMapReduceDrive {
  public HadoopMapReduceDrive(){}
  /**
   * 测试主体
   * 编写map与reduce，上传文件
   * 编写序列化读取、写入类
   * 上传yarn，开始计算
   * 主要思路：
   * 
   * 1，利用text流以行为单位读取文本数据
   * 2，拆分行，返回单词与计数
   * 3，reduce阶段统计数据并写入
   * 4，组装mr
   * 5，发布jar，并上传yarn集群
   * 6，上传测试文件，并运行测试代码
   * @throws IOException
   * @throws InterruptedException
   * @throws ClassNotFoundException
   */
  public void Start(String inputPaths,String outputPath) throws IOException, ClassNotFoundException, InterruptedException{
    // 1 获取配置信息以及封装任务
    Configuration configuration = new Configuration();
    configuration.set("user", "root");
    Job job = Job.getInstance(configuration);
    // 2 设置jar加载路径
    job.setJarByClass(HadoopMapReduceDrive.class);
    // 3 设置map和reduce类
    job.setMapperClass(WordCountMapper.class);
    job.setReducerClass(WordCountReducer.class);
    // 4 设置map输出
    job.setMapOutputKeyClass(Text.class);
    job.setMapOutputValueClass(IntWritable.class);
    // 5 设置最终输出kv类型
    job.setOutputKeyClass(Text.class);
    job.setOutputValueClass(IntWritable.class);
    // 6 设置输入和输出路径
    FileInputFormat.setInputPaths(job, new Path(inputPaths));
    FileOutputFormat.setOutputPath(job, new Path(outputPath));
    // 7 提交
    boolean result = job.waitForCompletion(true);
    System.exit(result ? 0 : 1);
  }
}
