package cn.itcast.tags.tools

import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.hbase.HBaseConfiguration
import org.apache.hadoop.hbase.client.{Put, Result, Scan}
import org.apache.hadoop.hbase.io.ImmutableBytesWritable
import org.apache.hadoop.hbase.mapreduce.{TableInputFormat, TableOutputFormat}
import org.apache.hadoop.hbase.protobuf.ProtobufUtil
import org.apache.hadoop.hbase.util.{Base64, Bytes}
import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.types.{StringType, StructField, StructType}
import org.apache.spark.sql.{DataFrame, Row, SparkSession}

/**
 * 从hbase读取和写入
 */
object HBaseTools {

  /**
   * @param spark
   * @param zkHosts
   * @param zkPort
   * @param tableName
   * @param family
   * @param fields
   * @return
   */
  def read(spark:SparkSession,zkHosts:String,zkPort:String, table:String,family:String,fields:Seq[String]): DataFrame ={

    val sc: SparkContext = spark.sparkContext

    // 1. 读取配置信息，加载HBaseClient配置（主要ZK地址和端口号）
    val conf = HBaseConfiguration.create()
    conf.set("hbase.zookeeper.quorum", zkHosts)
    conf.set("hbase.zookeeper.property.clientPort", zkPort)

    //设置表名称
    // 2. 设置表的名称
    conf.set(TableInputFormat.INPUT_TABLE, table)

    val scan = new Scan()
    val cfBytes: Array[Byte] = Bytes.toBytes(family)
    scan.addFamily(cfBytes)
    fields.foreach(field=>{
      scan.addColumn(cfBytes,Bytes.toBytes(field))
    })

    //将scan转换为string类型
    val scanStr: String = Base64.encodeBytes(ProtobufUtil.toScan(scan).toByteArray)
    conf.set(TableInputFormat.SCAN,scanStr)

    // 3. 从HBase表加载数据
    val dataRDD: RDD[(ImmutableBytesWritable, Result)] = sc.newAPIHadoopRDD(
      conf, //
      classOf[TableInputFormat], //
      classOf[ImmutableBytesWritable], //rowkey
      classOf[Result]
    )

    //DataFrame= RDD[Row] + Schema
    //解析获取的hbase表的每行数据Result,封装到row对象中
    val rowsRDD: RDD[Row] = dataRDD.map{
      case (_,result)=>{
        val values: Seq[String] = fields.map {
          filed => {
            val value: Array[Byte] = result.getValue(cfBytes, Bytes.toBytes(filed))
            Bytes.toString(value)
          }
        }
        //将seq序列转为row对象
        Row.fromSeq(values)
      }
    }
    //自定义schame
    val schema: StructType = StructType(
      fields.map(field => {
        StructField(field, StringType, nullable = true)
      })
    )

    //返回df
    spark.createDataFrame(rowsRDD,schema)

  }

  def write(dataFrame: DataFrame,zkHosts:String,zkPort:String, table:String,family:String,rowKeyColumn:String): Unit ={

    // 1. 设置HBase依赖Zookeeper相关配置信息
    val conf: Configuration = HBaseConfiguration.create()
    conf.set("hbase.zookeeper.quorum", zkHosts)
    conf.set("hbase.zookeeper.property.clientPort", zkPort)
    conf.set(TableOutputFormat.OUTPUT_TABLE, table)

    //2.将df转换成rdd[(rowkey, Put)]

    val cfBytes: Array[Byte] = Bytes.toBytes(family)
    val columns: Array[String] = dataFrame.columns

    val datasRDD: RDD[(ImmutableBytesWritable, Put)] = dataFrame.rdd.map{ row=>
      val rowKey: String = row.getAs[String](rowKeyColumn)
      val rkBytes: Array[Byte] = Bytes.toBytes(rowKey)

      //构建put对象
      val put = new Put(rkBytes)
      //设置列值
      columns.foreach(column=> {
        val value: String = row.getAs[String](column)
        put.addColumn(cfBytes,Bytes.toBytes(column),Bytes.toBytes(value))
      })
      (new ImmutableBytesWritable(rkBytes),put)
    }

    datasRDD.saveAsNewAPIHadoopFile(
      s"datas/hbase/output-${System.nanoTime()}", //
      classOf[ImmutableBytesWritable], //
      classOf[Put], //
      classOf[TableOutputFormat[ImmutableBytesWritable]], //
      conf
    )
  }

}
