package cn.lecosa.spark.hbase

import org.apache.hadoop.hbase.HBaseConfiguration
import org.apache.hadoop.hbase.mapreduce.TableOutputFormat
import org.apache.spark._
import org.apache.hadoop.mapreduce.Job
import org.apache.hadoop.hbase.io.ImmutableBytesWritable
import org.apache.hadoop.hbase.client.Result
import org.apache.hadoop.hbase.client.Put
import org.apache.hadoop.hbase.util.Bytes
//使用saveAsNewAPIHadoopDataset写入数据
object TestHBase3 {

  def main(args: Array[String]): Unit = {
    val hbaseconf = new SparkConf().setAppName("HBaseTest").setMaster("local")
    val sc = new SparkContext(hbaseconf)
    
    val tablename = "account"
    
hbaseconf.set("hbase.zookeeper.quorum", "cdh2,cdh3,cdh4")
hbaseconf.set("hbase.zookeeper.property.clientPort", "2181")
    sc.hadoopConfiguration.set(TableOutputFormat.OUTPUT_TABLE, tablename)
    
    val job = new Job(sc.hadoopConfiguration)
    job.setOutputKeyClass(classOf[ImmutableBytesWritable])
    job.setOutputValueClass(classOf[Result])  
    job.setOutputFormatClass(classOf[TableOutputFormat[ImmutableBytesWritable]])  

    val indataRDD = sc.makeRDD(Array("1,jack,15","2,Lily,16","3,mike,16"))
    val rdd = indataRDD.map(_.split(',')).map{arr=>{
      val put = new Put(Bytes.toBytes(arr(0)))
      put.add(Bytes.toBytes("cf"),Bytes.toBytes("name"),Bytes.toBytes(arr(1)))
      put.add(Bytes.toBytes("cf"),Bytes.toBytes("age"),Bytes.toBytes(arr(2).toInt))
      (new ImmutableBytesWritable, put) 
    }}
    
    rdd.saveAsNewAPIHadoopDataset(job.getConfiguration())
  }

}