package com.bigdata.hbase_mr.write;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.CompareOperator;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.filter.BinaryComparator;
import org.apache.hadoop.hbase.filter.FilterList;
import org.apache.hadoop.hbase.filter.SingleColumnValueFilter;
import org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
import org.apache.hadoop.util.GenericOptionsParser;

import java.io.IOException;

/**
 * 从HBase读取学生的信息，计算每个班级的学生数
 */
public class ClazzCount {

    public static void main(String[] args) throws IOException, InterruptedException, ClassNotFoundException {
        //    1.从本地加载配置文件得到配置对象
        Configuration conf = new Configuration(true);

        // 在window下并且是local本地模式执行mr任务，需要加一些配置
        // windows异构平台运行
        conf.set("mapreduce.app-submission.cross-platform", "true");
        //决定了集群运行
        conf.set("mapreduce.framework.name", "local");
        // 指定hbase使用zk的集群地址
        conf.set("hbase.zookeeper.quorum", "node02,node03,node04");


//    2.使用配置对象创建Job
        Job job = Job.getInstance(conf);
        // job.setJar("D:\\bigdata_project\\bigdata_2307\\bigdata_hbase\\target\\bigdata_hbase-1.0-SNAPSHOT.jar");


//    3.设置任务的主启动类，job名称
        job.setJarByClass(ClazzCount.class);
        job.setJobName("ClazzCount");

        // 创建Scan对象，用来指定查询的HBase的数据
        Scan scan = new Scan();

        // 只统计小于等于23岁的女生
        SingleColumnValueFilter filter1 = new SingleColumnValueFilter("info".getBytes(), "age".getBytes(),
                CompareOperator.LESS_OR_EQUAL,
                new BinaryComparator("23".getBytes()));

        SingleColumnValueFilter filter2 = new SingleColumnValueFilter("info".getBytes(), "gender".getBytes(),
                CompareOperator.EQUAL,
                new BinaryComparator("女".getBytes()));

        FilterList filterList = new FilterList(filter1, filter2);
        scan.setFilter(filterList);

        // 扫描的目标表
        TableName tableName = TableName.valueOf("api_test:student");

        // 指定mapper处理类，和写出类型
        TableMapReduceUtil.initTableMapperJob(
                tableName,        // input HBase table name
                scan,             // Scan instance to control CF and attribute selection
                ClazzMapper.class,   // mapper
                Text.class,             // mapper output key
                IntWritable.class,      // mapper output value
                job);

        TableMapReduceUtil.initTableReducerJob("api_test:t_result", ClazzReducer.class, job,
                null, null, null, null, false
        );

        // 默认reduce task的数量是1个,可以自定义的指定
        // job.setNumReduceTasks(2);

        // 8.提交任务, true 表示监控任务
        job.waitForCompletion(true);

    }


}
