package com.ke.xjt;

import org.apache.hadoop.hbase.client.*;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
/**
 * 实现wordcount
 * 1.从hdfs读取数据
 * 2.将数据的结果存储到hdfs
 */
public class XJTRunner {

    public static void main(String[] args)  throws Exception {

        Configuration configuration = new Configuration(true);

        // 使用hbase,需要加载zookeeper配置
        configuration.set("hbase.zookeeper.quorum","ke02,ke03,ke04");
        //让框架知道是windows异构平台运行
        configuration.set("mapreduce.app-submission.cross-platform","true");
        configuration.set("mapreduce.framework.name","local");

        //创建job对象
        Job job = new Job(configuration);
        job.setJarByClass(XJTRunner.class);

        //设置mapper类
        job.setMapperClass(XJTMapper.class);
        job.setMapOutputKeyClass(Text.class);
        job.setMapOutputValueClass(IntWritable.class);


        //设置reduce类
        //wordcount是hbase表
        TableMapReduceUtil.initTableReducerJob("wordcount",XJTReducer.class, job,null,null,null,null,false);
        job.setOutputKeyClass(NullWritable.class);
        job.setOutputValueClass(Put.class);

        //指定hdfs读取数据的目录
        FileInputFormat.addInputPath(job, new Path("/data/godno"));


        job.waitForCompletion(true);
    }
}
