package cn.itcast.tags.spark.sql

import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.hbase.client.{Put, Result, Scan}
import org.apache.hadoop.hbase.io.ImmutableBytesWritable
import org.apache.hadoop.hbase.mapreduce.{TableInputFormat, TableOutputFormat}
import org.apache.hadoop.hbase.protobuf.ProtobufUtil
import org.apache.hadoop.hbase.util.{Base64, Bytes}
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{DataFrame, Row, SQLContext}
import org.apache.spark.sql.sources.{BaseRelation, InsertableRelation, TableScan}
import org.apache.spark.sql.types.StructType

class HBaseRelation(context: SQLContext, params: Map[String, String], userSchema: StructType)
  extends BaseRelation with TableScan with InsertableRelation with Serializable {

  val HBASE_ZK_QUORUM_KEY: String = "hbase.zookeeper.quorum"
  val HBASE_ZK_QUORUM_VALUE: String = "zkHosts"
  val HBASE_ZK_PORT_KEY: String = "hbase.zookeeper.property.clientPort"
  val HBASE_ZK_PORT_VALUE: String = "zkPort"
  val HBASE_TABLE: String = "hbaseTable"
  val HBASE_TABLE_FAMILY: String = "family"
  val SPERATOR: String = ","
  val HBASE_TABLE_SELECT_FIELDS: String = "selectFields"
  val HBASE_TABLE_ROWKEY_NAME: String = "rowKeyColumn"

  override def sqlContext: SQLContext = context

  override def schema: StructType = userSchema

  override def buildScan(): RDD[Row] = {
    val fields: Array[String] = params(HBASE_TABLE_SELECT_FIELDS).split(SPERATOR)

    val conf: Configuration = new Configuration()
    // 1.a 设置HBase依赖Zookeeper
    conf.set(HBASE_ZK_QUORUM_KEY, params(HBASE_ZK_QUORUM_VALUE))
    conf.set(HBASE_ZK_PORT_KEY, params(HBASE_ZK_PORT_VALUE))
    // 1.b 设置读取表的名称
    conf.set(TableInputFormat.INPUT_TABLE, params(HBASE_TABLE))

    val scan: Scan = new Scan()
    val familyBytes: Array[Byte] = Bytes.toBytes(params(HBASE_TABLE_FAMILY))
    scan.addFamily(familyBytes)

    fields.foreach {
      field => scan.addColumn(familyBytes, Bytes.toBytes(field))
    }
    conf.set(TableInputFormat.SCAN, Base64.encodeBytes(ProtobufUtil.toScan(scan).toByteArray))


    /**
     * def newAPIHadoopRDD[K, V, F <: NewInputFormat[K, V]](
     * conf: Configuration = hadoopConfiguration,
     * fClass: Class[F],
     * kClass: Class[K],
     * vClass: Class[V]): RDD[(K, V)]
     */
    val datasRDD: RDD[(ImmutableBytesWritable, Result)] = context.sparkContext.newAPIHadoopRDD(
      conf,
      classOf[TableInputFormat],
      classOf[ImmutableBytesWritable],
      classOf[Result]
    )

    val rowsRDD: RDD[Row] = datasRDD.map {
      case (_, result) => {
        val values: Seq[String] = fields.map {
          field => Bytes.toString(result.getValue(familyBytes, Bytes.toBytes(field)))
        }
        Row.fromSeq(values)
      }
    }

    rowsRDD

  }

  override def insert(dataFrame: DataFrame, overwrite: Boolean): Unit = {
    val columns: Array[String] = dataFrame.columns
    val putsRDD: RDD[(ImmutableBytesWritable, Put)] = dataFrame.rdd.map {
      row =>
        val rowKey: Array[Byte] = Bytes.toBytes(row.getAs[String](params(HBASE_TABLE_ROWKEY_NAME)))

        val put = new Put(rowKey)
        val familyBytes: Array[Byte] = Bytes.toBytes(params(HBASE_TABLE_FAMILY))
        columns.foreach {
          column =>
            put.addColumn(familyBytes, Bytes.toBytes(column), Bytes.toBytes(row.getAs[String](column)))
        }
        (new ImmutableBytesWritable(put.getRow), put)
    }

    val conf: Configuration = new Configuration()
    conf.set(HBASE_ZK_QUORUM_KEY, params(HBASE_ZK_QUORUM_VALUE))
    conf.set(HBASE_ZK_PORT_KEY, params(HBASE_ZK_PORT_VALUE))
    // 2.2 设置表的名称
    conf.set(TableOutputFormat.OUTPUT_TABLE, params(HBASE_TABLE))

    putsRDD.saveAsNewAPIHadoopFile(
      s"/apps/hbase/$HBASE_TABLE-" + System.currentTimeMillis(),
      classOf[ImmutableBytesWritable],
      classOf[Put],
      classOf[TableOutputFormat[ImmutableBytesWritable]],
      conf
    )

  }
}
