package com.shujia.hbase;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil;
import org.apache.hadoop.hbase.mapreduce.TableMapper;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;

import java.io.IOException;

public class Demo3HbaseToHdfs {

    /*
     * 利用mapreduce读取hbase中的数据统计每个班级学生的人数
     *
     */


    public static class HbaseTodHdfsMapper extends TableMapper<Text, LongWritable> {

        //  每一行数据运行一次
        @Override
        protected void map(ImmutableBytesWritable key, Result result, Context context) throws IOException, InterruptedException {

            //取出班级
            String clazz = Bytes.toString(result.getValue("info".getBytes(), "clazz".getBytes()));


            //输出到reduce端
            context.write(new Text(clazz), new LongWritable(1));

        }
    }

    public static class HbaseTodHdfsReduce extends Reducer<Text, LongWritable, NullWritable, Text> {


        /**
         * reduce : 每一个key执行一次
         */

        @Override
        protected void reduce(Text key, Iterable<LongWritable> values, Context context) throws IOException, InterruptedException {


            String clazz = key.toString();

            //统计班级人数

            Long sum = 0L;

            for (LongWritable value : values) {
                sum = sum + value.get();
            }

            String line = clazz + "," + sum;

            context.write(NullWritable.get(), new Text(line));

        }
    }


    public static void main(String[] args) throws Exception {

        Configuration configuration = new Configuration();

        //指定zk的地址
        configuration.set("hbase.zookeeper.quorum", "master:2181,node1:2181,node2:2181");

        Job job = Job.getInstance(configuration);


        //这是reduce的数量,如果不设置默认一个
        job.setNumReduceTasks(2);

        job.setJarByClass(Demo3HbaseToHdfs.class);
        job.setJobName("hbaseToHdfs");

        //设置读取hbase表   设置mapper
        Scan scan = new Scan();
        //这里也可以使用过滤器
        TableMapReduceUtil.initTableMapperJob(
                "shujia:student",
                scan,
                HbaseTodHdfsMapper.class,
                Text.class,
                LongWritable.class,
                job);


        //设置reduce
        job.setReducerClass(HbaseTodHdfsReduce.class);
        job.setOutputKeyClass(NullWritable.class);
        job.setOutputValueClass(Text.class);


        //加了会报错
        //job.setOutputFormatClass(FileOutputFormat.class);

        //设置输出路径
        FileOutputFormat.setOutputPath(job, new Path("/data/student/clazz_num"));

        //启动job
        job.waitForCompletion(true);

    }


}
