package com.company.HDFS;
 
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.mapred.TableOutputFormat;
import org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil;
import org.apache.hadoop.hbase.mapreduce.TableReducer;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
 
import java.io.IOException;
 

public class HdfsToHBase
{
    public static void main(String[] args) throws IOException, ClassNotFoundException, InterruptedException
    {
        Configuration conf = HBaseConfiguration.create();
        conf.set("hbase.rootdir","hdfs://node1.itcast.cn:8020/hbase");  //约束hbase.root的路径，与hadoop的配置文件一致
        conf.set(TableOutputFormat.OUTPUT_TABLE ,"hdfs://192.168.88.100/output/demo");
        Job job = Job.getInstance(conf,HdfsToHBase.class.getSimpleName());  //对job的约束
        TableMapReduceUtil.addDependencyJars(job);
        job.setJarByClass(HdfsToHBase.class);
 
        job.setMapperClass(HdfsToHBaseMapper.class);
        job.setMapOutputKeyClass(Text.class);
        job.setMapOutputValueClass(Text.class);
 
        job.setReducerClass(HdfsToHBaseReducer.class);
 
        FileInputFormat.addInputPath(job,new Path("J:\\the_efforts_paid_offf\\HDFS_HBase_HiveApi\\src\\main\\java\\com\\company\\datas\\demo"));
        job.setOutputFormatClass(org.apache.hadoop.hbase.mapreduce.TableOutputFormat.class); //这里设置的格式特别
 
        Boolean b = job.waitForCompletion(true);
        if(!b)
        {
            System.err.println("failed");
        }
        else
            System.out.println("finished!");
    }
 
    public static class HdfsToHBaseMapper extends Mapper<LongWritable,Text,Text,Text>
    {
        private Text outKey = new Text();
        private Text outValue = new Text();
        public void map(LongWritable key,Text value,Context context) throws IOException, InterruptedException
        {
            String[] splits = value.toString().split(",");
            outKey.set(splits[0]);
            outValue.set(splits[1]);
            context.write(outKey,outValue);
        }
    }
 
    public static class HdfsToHBaseReducer extends TableReducer<Text,Text,NullWritable>
    {
        public void reduce(Text k2, Iterable<Text> v2s,Context context) throws IOException, InterruptedException
        {
            Put put = new Put(k2.getBytes());
            for(Text v2 : v2s)
            {
                String[] splis = v2.toString().split(",");
                if(splis[0] != null && !"NULL".equals(splis[0]))
                {
                    put.addColumn("f1".getBytes(),"name".getBytes(),splis[0].getBytes());
                }
                if(splis[1] != null && !"NULL".equals(splis[1]))
                {
                    put.addColumn("f1".getBytes(),"age".getBytes(),splis[1].getBytes());
                }
            }
            context.write(NullWritable.get(),put);
        }
    }
}