package com.zhao

import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.hbase.HBaseConfiguration
import org.apache.hadoop.hbase.client.{Put, Result}
import org.apache.hadoop.hbase.io.ImmutableBytesWritable
import org.apache.hadoop.hbase.mapred.TableOutputFormat
import org.apache.hadoop.hbase.util.Bytes
import org.apache.hadoop.mapred.JobConf
import org.apache.hadoop.mapreduce.Job
import org.apache.log4j.{Level, Logger}
import org.apache.spark.SparkContext
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.SparkSession

/**
 * Description: 使用新版saveAsNewAPIHadoopDataset保存数据到Hbase<br/>
 * Copyright (c) ，2021 ， 赵 <br/>
 * A wet person does not fear the rain. <br/>
 * Date： 2021/2/3 17:01
 *
 * @author 柒柒
 * @version : 1.0
 */

object writeToHBaseNewAPI {
  def main(args: Array[String]): Unit = {
    //屏蔽不必要的日志显示在终端上
    Logger.getLogger("org.apache.spark").setLevel(Level.WARN)

    //spark2.0以前的想法
    //    val conf: SparkConf = new SparkConf()
    //      .setAppName("SparkToHbase")
    //      .setMaster("local[*]")
    //    val sc: SparkContext = new SparkContext(conf)

    val sparkSession: SparkSession = SparkSession
      .builder()
      .appName("SparkToHBase")
      .master("local[*]")
      .getOrCreate()
    val sc: SparkContext = sparkSession.sparkContext

    val tableName = "test"

    //创建HBase配置
    val hbaseConf: Configuration = HBaseConfiguration.create()
    hbaseConf.set("hbase.zookeeper.quorum", "node01,node02,node03")//zookeeper集群
    hbaseConf.set("hbase.zookeeper.property.clientPort", "2181")//zookeeper连接端口
    hbaseConf.set(TableOutputFormat.OUTPUT_TABLE,tableName)

    val jobConf: JobConf = new JobConf(hbaseConf)
    //设置job的输出格式
    val job: Job = Job.getInstance(jobConf)
    job.setOutputKeyClass(classOf[ImmutableBytesWritable])
    job.setOutputValueClass(classOf[Result])
    job.setOutputFormatClass(classOf[org.apache.hadoop.hbase.mapreduce.TableOutputFormat[ImmutableBytesWritable]])

    val input = sc.textFile("a_data/a.txt")

    val data: RDD[(ImmutableBytesWritable, Put)] = input.map { item =>
      val Array(key, name, age) = item.split(",")
      val rowkey = key.reverse
      val put: Put = new Put(Bytes.toBytes(rowkey))
      put.addColumn(Bytes.toBytes("cf1"), Bytes.toBytes("name"), Bytes.toBytes(name))
      put.addColumn(Bytes.toBytes("cf1"), Bytes.toBytes("age"), Bytes.toBytes(age))
      (new ImmutableBytesWritable, put)
    }
    //保存到hbase表
    data.saveAsNewAPIHadoopDataset(job.getConfiguration)
    sparkSession.stop()
  }
}

























