package com.shujia.utils

import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.hbase.{HBaseConfiguration, TableName}
import org.apache.hadoop.hbase.client.{Connection, ConnectionFactory, Put, Table}
import org.apache.spark.sql.{DataFrame, Row}

object HBaseTools {
  def saveToHBase(df: DataFrame, tableName: String, cf: String, tagName: String): Unit = {
    // 将数据保存到HBase中
    df.foreachPartition((iter: Iterator[Row]) => {
      // 建立HBase的连接
      val conf: Configuration = HBaseConfiguration.create()
      conf.set("hbase.zookeeper.quorum", "master:2181,node1:2181,node2:2181")
      val conn: Connection = ConnectionFactory.createConnection(conf)
      // 需要提前在HBase中将表创建
      val userProfie: Table = conn.getTable(TableName.valueOf(tableName))

      iter.foreach(row => {
        val id: String = row.getAs[String]("id")
        val tag: String = row.getAs[String](tagName)
        val put = new Put(id.getBytes())
        put.addColumn(cf.getBytes(), tagName.getBytes(), tag.getBytes())
        // 如果数据量较大可以考虑使用批量插入的方式写HBase
        userProfie.put(put)
      })
    })
  }
}
