package com.qingguo.MapReduce;


import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;

import java.io.IOException;

public class offset {
    /**
     * 查看偏移量
     */
//第一步：创建Mymap类继承Mapper类
    public static class Mymap extends Mapper<LongWritable, Text, LongWritable, Text> {
        //重写map函数，实现自己的一些业务逻辑，每一条数据会调用一次map函数
        //每个map任务会处理默认128M的数据，每个map任务的处理逻辑一致。
        @Override
        protected void map(LongWritable k1, Text v1, Context context)
                throws IOException, InterruptedException {
            //context是上下文，记录整个mr任务的信息，传递数据，读写数据
            context.write (k1, v1);
        }
    }
//********************************************************************************************
//第二步：创建Myreduce类并继承

    public static class Myreduce extends Reducer<LongWritable, Text, LongWritable, Text> {
        //重写reduce函数，每一组数据会调用一次reduce函数
        @Override
        protected void reduce
        (LongWritable k2, Iterable<Text> v2, Context context)
                throws IOException, InterruptedException {

            context.write (k2, new Text ("显示的是每行的偏移量"));
        }
    }
//********************************************************************************************
//第二步：写主函数，并组装mapreduce

    public static void main(String[] args) throws IOException, ClassNotFoundException, InterruptedException {
        //配置配置文件
        Configuration conf = new Configuration ();
        //创建任务对象(通过单例设计模式)
        Job job = Job.getInstance (conf, offset.class.getName ());
        //设置打包的类
        job.setJarByClass (offset.class);
        //设置hdfs入口参数
        FileInputFormat.addInputPath (job, new Path (args[0]));
        //设置使用到的map类
        job.setMapperClass (Mymap.class);
        //设置map输出的序列化类型
        job.setMapOutputKeyClass (LongWritable.class);
        job.setMapOutputValueClass (Text.class);
        //设置使用到的reduce类
        job.setReducerClass (Myreduce.class);
        //设置reduce输出的序列化类型
        job.setOutputKeyClass (LongWritable.class);
        job.setOutputValueClass (Text.class);
        //指定输出的hdfs路径
        FileOutputFormat.setOutputPath (job, new Path (args[1]));
        //设置打印日志
        job.waitForCompletion (true);
    }
}










