package cn.itcast.tags.spark.hbase

import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.hbase.HBaseConfiguration
import org.apache.hadoop.hbase.client.{Put, Result, Scan}
import org.apache.hadoop.hbase.io.ImmutableBytesWritable
import org.apache.hadoop.hbase.mapreduce.{TableInputFormat, TableOutputFormat}
import org.apache.hadoop.hbase.protobuf.ProtobufUtil
import org.apache.hadoop.hbase.util.{Base64, Bytes}
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{DataFrame, Row, SQLContext}
import org.apache.spark.sql.sources.{BaseRelation, InsertableRelation, TableScan}
import org.apache.spark.sql.types.StructType


/**
 * 自定义外部数据源：从HBase表加载数据和保存数据到HBase表的Relation实现
 */
class HBaseRelation(context:SQLContext,params:Map[String,String],userSchema:StructType)
  extends BaseRelation with TableScan with InsertableRelation with Serializable {

  // 连接HBase数据库的属性名称
  val HBASE_ZK_QUORUM_KEY: String = "hbase.zookeeper.quorum"
  val HBASE_ZK_QUORUM_VALUE: String = "zkHosts"
  val HBASE_ZK_PORT_KEY: String = "hbase.zookeeper.property.clientPort"
  val HBASE_ZK_PORT_VALUE: String = "zkPort"
  val HBASE_TABLE: String = "hbaseTable"
  val HBASE_TABLE_FAMILY: String = "family"
  val SPERATOR: String = ","
  val HBASE_TABLE_SELECT_FIELDS: String = "selectFields"
  val HBASE_TABLE_ROWKEY_NAME: String = "rowKeyColumn"



  /**
   * 表示SparkSql加载数据和保存数据的入口，相当于SparkSession
   * @return
   */
  override def sqlContext: SQLContext = context


  /**
   * 在SparkSQL中数据封装在DataFrame或者DataSet中Schema信息
   * @return
   */
  override def schema: StructType = userSchema


  /**
   * 表示从数据源加载数据封装在RDD,每条数据在RDD中，结合Schema转换为DataFrame
   * @return
   */
  override def buildScan(): RDD[Row] = {

    // 1. 读取配置信息，加载HBaseClient配置（主要ZK地址和端口号）
    val conf = HBaseConfiguration.create()
    conf.set(HBASE_ZK_QUORUM_KEY, params(HBASE_ZK_QUORUM_VALUE))
    conf.set(HBASE_ZK_PORT_KEY, params(HBASE_ZK_PORT_VALUE))

    //设置表名称
    // 2. 设置表的名称
    conf.set(TableInputFormat.INPUT_TABLE, params(HBASE_TABLE))

    val scan = new Scan()
    val cfBytes: Array[Byte] = Bytes.toBytes(params(HBASE_TABLE_FAMILY))
    scan.addFamily(cfBytes)
    val fields: Array[String] = params(HBASE_TABLE_SELECT_FIELDS).split(SPERATOR)
    fields.foreach(field=>{
      scan.addColumn(cfBytes,Bytes.toBytes(field))
    })

    //将scan转换为string类型
    val scanStr: String = Base64.encodeBytes(ProtobufUtil.toScan(scan).toByteArray)
    conf.set(TableInputFormat.SCAN,scanStr)

    // 3. 从HBase表加载数据
    val dataRDD: RDD[(ImmutableBytesWritable, Result)] = sqlContext.sparkContext.newAPIHadoopRDD(
      conf, //
      classOf[TableInputFormat], //
      classOf[ImmutableBytesWritable], //rowkey
      classOf[Result]
    )

    //DataFrame= RDD[Row] + Schema
    //解析获取的hbase表的每行数据Result,封装到row对象中
    val rowsRDD: RDD[Row] = dataRDD.map{
      case (_,result)=>{
        val values: Seq[String] = fields.map {
          filed => {
            val value: Array[Byte] = result.getValue(cfBytes, Bytes.toBytes(filed))
            Bytes.toString(value)
          }
        }
        //将seq序列转为row对象
        Row.fromSeq(values)
      }
    }
    rowsRDD
  }


  /**
   * 将dataframe数据保存在数据源
   * @param data 数据集
   * @param overwrite 是否覆盖
   */
  override def insert(data: DataFrame, overwrite: Boolean): Unit = {
    // 1. 设置HBase依赖Zookeeper相关配置信息
    val conf: Configuration = HBaseConfiguration.create()
    conf.set(HBASE_ZK_QUORUM_KEY, params(HBASE_ZK_QUORUM_VALUE))
    conf.set(HBASE_ZK_PORT_KEY, params(HBASE_ZK_PORT_VALUE))
    conf.set(TableOutputFormat.OUTPUT_TABLE, params(HBASE_TABLE))

    //2.将df转换成rdd[(rowkey, Put)]

    val cfBytes: Array[Byte] = Bytes.toBytes(params(HBASE_TABLE_FAMILY))
    val columns: Array[String] = data.columns

    val datasRDD: RDD[(ImmutableBytesWritable, Put)] = data.rdd.map{ row=>
      val rowKey: String = row.getAs[String](params(HBASE_TABLE_ROWKEY_NAME))
      val rkBytes: Array[Byte] = Bytes.toBytes(rowKey)

      //构建put对象
      val put = new Put(rkBytes)
      //设置列值
      columns.foreach(column=> {
        val value: String = row.getAs[String](column)
        put.addColumn(cfBytes,Bytes.toBytes(column),Bytes.toBytes(value))
      })
      (new ImmutableBytesWritable(rkBytes),put)
    }

    datasRDD.saveAsNewAPIHadoopFile(
      s"datas/hbase/output-${System.nanoTime()}", //
      classOf[ImmutableBytesWritable], //
      classOf[Put], //
      classOf[TableOutputFormat[ImmutableBytesWritable]], //
      conf
    )
  }
}
