package cn.itcast.tags.tools


import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.hbase.client.{Put, Result, Scan}
import org.apache.hadoop.hbase.io.ImmutableBytesWritable
import org.apache.hadoop.hbase.mapreduce.{TableInputFormat, TableOutputFormat}
import org.apache.hadoop.hbase.protobuf.ProtobufUtil
import org.apache.hadoop.hbase.util.{Base64, Bytes}
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.types.{StringType, StructField, StructType}
import org.apache.spark.sql.{DataFrame, Row, SparkSession}

object HBaseTools {
  def write(dataFrame: DataFrame, zks: String, port: String, table: String, family: String, rowKeyColumn: String) = {

    val columns: Array[String] = dataFrame.columns
    val putsRDD: RDD[(ImmutableBytesWritable, Put)] = dataFrame.rdd.map {
      row =>
        val rowKey: Array[Byte] = Bytes.toBytes(row.getAs[String](rowKeyColumn))

        val put = new Put(rowKey)
        val familyBytes: Array[Byte] = Bytes.toBytes(family)
        columns.foreach {
          column =>
            put.addColumn(familyBytes, Bytes.toBytes(column), Bytes.toBytes(row.getAs[String](column)))
        }
        (new ImmutableBytesWritable(put.getRow), put)
    }

    val conf: Configuration = new Configuration()
    conf.set("hbase.zookeeper.quorum", zks)
    conf.set("hbase.zookeeper.property.clientPort", port)
    conf.set(TableOutputFormat.OUTPUT_TABLE, table)

    putsRDD.saveAsNewAPIHadoopFile(s"/apps/hbase/$table-" + System.currentTimeMillis(),
      classOf[ImmutableBytesWritable],
      classOf[Put],
      classOf[TableOutputFormat[ImmutableBytesWritable]],
      conf
    )
  }

  def read(spark: SparkSession, zks: String,
           port: String, table: String,
           family: String, fields: Seq[String]): DataFrame = {


    val conf: Configuration = new Configuration()
    conf.set("hbase.zookeeper.quorum", zks)
    conf.set("hbase.zookeeper.property.clientPort", port.toString)
    conf.set(TableInputFormat.INPUT_TABLE, table)

    val scan: Scan = new Scan()
    val familyBytes: Array[Byte] = Bytes.toBytes(family)
    scan.addFamily(familyBytes)

    fields.foreach {
      field => scan.addColumn(familyBytes, Bytes.toBytes(field))
    }
    conf.set(TableInputFormat.SCAN, Base64.encodeBytes(ProtobufUtil.toScan(scan).toByteArray))


    /**
     * def newAPIHadoopRDD[K, V, F <: NewInputFormat[K, V]](
     * conf: Configuration = hadoopConfiguration,
     * fClass: Class[F],
     * kClass: Class[K],
     * vClass: Class[V]): RDD[(K, V)]
     */
    val datasRDD: RDD[(ImmutableBytesWritable, Result)] = spark.sparkContext.newAPIHadoopRDD(
      conf,
      classOf[TableInputFormat],
      classOf[ImmutableBytesWritable],
      classOf[Result]
    )

    val rowsRDD: RDD[Row] = datasRDD.map {
      case (_, result) => {
        val values: Seq[String] = fields.map {
          field => Bytes.toString(result.getValue(familyBytes, Bytes.toBytes(field)))
        }
        Row.fromSeq(values)
      }
    }

    val rowSchema: StructType = StructType(
      fields.map { field => StructField(field, StringType, nullable = true) }
    )
    spark.createDataFrame(rowsRDD, rowSchema)
  }

}
