package com.bblei.hbaseDemo.mapReduceDemo;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;

import java.io.IOException;

import static com.bblei.hbaseDemo.mapReduceDemo.HBaseMr.initTB;
import static com.bblei.hbaseDemo.mapReduceDemo.HBaseMr.tableName2;
import static com.bblei.hbaseDemo.mapReduceDemo.HBaseMr.tableName;
import static com.bblei.hbaseDemo.mapReduceDemo.HBaseMr.col;
import static com.bblei.hbaseDemo.mapReduceDemo.HBaseMr.colf;
import static com.bblei.hbaseDemo.mapReduceDemo.HBaseMr.config;

public class MyDriver {

    public static void main(String[] args) throws IOException,
            ClassNotFoundException, InterruptedException {
        config.set("df.default.name", "hdfs://master:8020/");//设置hdfs的默认路径
        config.set("hadoop.job.ugi", "root,root");//用户名，组
        config.set("mapred.job.tracker", "master:8021");//设置jobtracker在哪
        //初始化表
        initTB();//初始化表 存有原始数据的表和计算出结果保存数据的表
        //创建job
        Job job = Job.getInstance(config);
        job.setJarByClass(HBaseMr.class);//通过传入的class 找到job的jar包
        //创建scan 数据源
        Scan scan = new Scan();
        //可以指定查询某一列
        scan.addColumn(Bytes.toBytes(colf), Bytes.toBytes(col));
        //创建查询hbase的mapper，设置表名、scan、mapper类、mapper的输出key、mapper的输出value
        TableMapReduceUtil.initTableMapperJob(tableName, scan, MyMapper.class,Text.class, IntWritable.class, job);


        //创建写入hbase的reducer，指定表名、reducer类、job
        TableMapReduceUtil.initTableReducerJob(tableName2, MyReducer.class, job);
        System.exit(job.waitForCompletion(true) ? 0 : 1);
    }

}
