package com.yuanzheng.demo.sparkhbase

import org.apache.hadoop.hbase.client.{Put, Result}
import org.apache.hadoop.hbase.io.ImmutableBytesWritable
import org.apache.hadoop.hbase.mapred.TableOutputFormat
import org.apache.hadoop.hbase.util.Bytes
import org.apache.hadoop.mapreduce.Job
import org.apache.spark.{SparkConf, SparkContext}

/**
 * @author yuanzheng
 * @since 2021/4/18
 */
object SparkWriteHBase {
  def main(args: Array[String]): Unit = {
    val sparkConf = new SparkConf().setAppName("SparkWriteHBase").setMaster("local")
    val sc = new SparkContext(sparkConf)
    val tableName = "student" //写入表名称
    sc.hadoopConfiguration.set(TableOutputFormat.OUTPUT_TABLE, tableName)
    val job = new Job(sc.hadoopConfiguration)
    job.setOutputKeyClass(classOf[ImmutableBytesWritable])
    job.setOutputValueClass(classOf[Result])
    //job.setOutputFormatClass(classOf[TableOutputFormat[ImmutableBytesWritable]])
    val inDataRDD = sc.makeRDD(Array("2,Tom,M,26", "3,Jack,F,30")) //构建2行记录
    val rdd = inDataRDD.map(_.split(",")).map { arr => {
      val put = new Put(Bytes.toBytes(arr(0))) //行键的值
      put.add(Bytes.toBytes("info"), Bytes.toBytes("name"), Bytes.toBytes(arr(1))) //列族，列名，值
      put.add(Bytes.toBytes("info"), Bytes.toBytes("gender"), Bytes.toBytes(arr(2)))
      put.add(Bytes.toBytes("info"), Bytes.toBytes("age"), Bytes.toBytes(arr(3).toInt))
      (new ImmutableBytesWritable, put) // 生成对象
    }
    }
    rdd.saveAsNewAPIHadoopDataset(job.getConfiguration)
  }
}
