package hbase.mapreduce;

/**
 * Created by spark on 16-10-8.
 */

import java.io.IOException;
import java.util.StringTokenizer;

import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil;
import org.apache.hadoop.hbase.mapreduce.TableMapper;
import org.apache.hadoop.hbase.mapreduce.TableReducer;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.util.GenericOptionsParser;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;


/**
 * @author Luxh
 */
public class WordStat extends Configured implements Tool {

    @Override
    public int run(String[] args) throws Exception {

        GenericOptionsParser parser = new GenericOptionsParser(args);
        System.out.println(parser.getCommandLine().getOptionValue("c"));


        System.setProperty("HADOOP_USER_NAME", "root");
        Job job = Job.getInstance(getConf(), "wordStat");
                //new Job(getConf(), "wordstat");
        job.setJarByClass(WordStat.class);
        job.setJar("./target/hadoop-test-1.0-SNAPSHOT.jar");

        Scan scan = new Scan();
        //指定要查询的列族
        scan.addColumn(Bytes.toBytes("content"), null);
        //指定Mapper读取的表为word
        TableMapReduceUtil.initTableMapperJob("word", scan, MyMapper.class, Text.class, IntWritable.class, job);
        //指定Reducer写入的表为stat
        TableMapReduceUtil.initTableReducerJob("stat", MyReducer.class, job);

        return job.waitForCompletion(true) ? 0 : 1;
    }

    /**
     * TableMapper<Text,IntWritable>  Text:输出的key类型，IntWritable：输出的value类型
     */
    public static class MyMapper extends TableMapper<Text, IntWritable> {

        private static IntWritable one = new IntWritable(1);
        private static Text word = new Text();

        @Override
        protected void map(ImmutableBytesWritable key, Result value,
                           Context context)
                throws IOException, InterruptedException {
            //表里面只有一个列族，所以我就直接获取每一行的值
            String words = Bytes.toString(value.listCells().get(0).getValueArray());
            StringTokenizer st = new StringTokenizer(words);
            while (st.hasMoreTokens()) {
                String s = st.nextToken();
                word.set(s);
                context.write(word, one);
            }
        }
    }

    /**
     * TableReducer<Text,IntWritable>  Text:输入的key类型，IntWritable：输入的value类型，ImmutableBytesWritable：输出类型
     */
    public static class MyReducer extends TableReducer<Text, IntWritable, ImmutableBytesWritable> {

        @Override
        protected void reduce(Text key, Iterable<IntWritable> values,
                              Context context)
                throws IOException, InterruptedException {

            int sum = 0;
            for (IntWritable val : values) {
                sum += val.get();
            }
            //添加一行记录，每一个单词作为行键
            Put put = new Put(Bytes.toBytes(key.toString()));
            //在列族result中添加一个标识符num,赋值为每个单词出现的次数
            //String.valueOf(sum)先将数字转化为字符串，否则存到数据库后会变成\x00\x00\x00\x这种形式
            //然后再转二进制存到hbase。
            put.addColumn(Bytes.toBytes("result"), Bytes.toBytes("num"), Bytes.toBytes(String.valueOf(sum)));
            context.write(new ImmutableBytesWritable(Bytes.toBytes(key.toString())), put);
        }
    }

    public static void main(String[] args) throws Exception {

        int exitCode = ToolRunner.run(new WordStat(), args);
        System.exit(exitCode);

//        Configuration conf = HBaseConfiguration.create();
//        Job job = new Job(conf,"wordstat");
//        job.setJarByClass(WordStat.class);
//        job.setJar("./target/hadoop-test-1.0-SNAPSHOT.jar");
//
//
//        Scan scan = new Scan();
//        //指定要查询的列族
//        scan.addColumn(Bytes.toBytes("content"),null);
//        //指定Mapper读取的表为word
//        TableMapReduceUtil.initTableMapperJob("word", scan, MyMapper.class, Text.class, IntWritable.class, job);
//        //指定Reducer写入的表为stat
//        TableMapReduceUtil.initTableReducerJob("stat", MyReducer.class, job);
//        System.exit(job.waitForCompletion(true)?0:1);
    }
}
