package com.oreilly.learningsparkexamples.scala

import org.apache.spark.SparkContext
import org.apache.hadoop.hbase.HBaseConfiguration
import org.apache.hadoop.hbase.io.ImmutableBytesWritable
import org.apache.hadoop.hbase.mapreduce.TableInputFormat

object LoadSimpleHBase{

  def main(args: Array[String]): Unit = {

    val master = args(0)
    val sc = new SparkContext(master,"LoadSimpleHBase",System.getenv("SPARK_HOME"))
    val conf = HBaseConfiguration.create()
    conf.set("hbase.zookeeper.quorum","centos1,centos2,centos3")
    conf.set("hbase.zookeeper.property.clientPort","2181")
    conf.set(TableInputFormat.INPUT_TABLE,"users")  // which table to scan

    println("-=============--- start reading hbase ---==============")

    val rdd = sc.newAPIHadoopRDD(conf,
      classOf[TableInputFormat],
      classOf[ImmutableBytesWritable],
      classOf[org.apache.hadoop.hbase.client.Result])

    println("-=============--- readed hbase ---==============")

    println("-=============--- get data count: " + rdd.count())
  }

}