package com.xx.sparkhbase

import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.hbase.HBaseConfiguration
import org.apache.hadoop.hbase.client.Put
import org.apache.hadoop.hbase.io.ImmutableBytesWritable
import org.apache.hadoop.hbase.mapreduce.TableOutputFormat
import org.apache.hadoop.hbase.util.Bytes
import org.apache.hadoop.mapreduce.Job
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{Row, SparkSession}

/**
 * saveAsNewAPIHadoopDataset + TableOutputFormat
 * safe集群
 * @author tzp
 * @since 2022/6/20
 */
object TableWrite {
  val cols = Array(
    "user_no",
    "mobile_no",
    "deviceid",
    "id_type",
    "is_ps",
    "is_p18",
    "is_p24",
    "is_p36",
    "is_p18_peipao",
    "is_p24_peipao",
    "is_p36_peipao",
    "is_big",
    "sx_amt",
    "max_duotou",
    "product_code",
    "dt",
    "media",
    "jf_risk_price",
    "jf_risk_price_peipao",
    "sc_risk_price"
  )
  val cf = Bytes.toBytes("c")

  def rowToPuts(r: Row): (ImmutableBytesWritable, Put) = {
    val rk = Bytes.toBytes(r.getAs[String]("user_no"))
    val put = new Put(rk)
    cols.foreach(col => {
      val fieldValue = r.get(r.fieldIndex(col))
      if (fieldValue != null) {
        put.addColumn(cf, Bytes.toBytes(col),
          Bytes.toBytes(fieldValue.toString))
      }
    })
    (new ImmutableBytesWritable(rk), put)
  }

  def main(args: Array[String]): Unit = {
    val spark = SparkSession.builder()
      .getOrCreate()
    val sc = spark.sparkContext

    val df = spark.sql(
      """select
        | *
        | from hdp_credit.oot_wj_sx_v2
        |""".stripMargin)

    val prepareHBaseToLoad: RDD[(ImmutableBytesWritable, Put)] = df.
      rdd.map(row => rowToPuts(row: Row))

//    val hbaseConf = HBaseConfiguration.create()
//    hbaseConf.set("hbase.mapred.outputable", "fe2")
//    hbaseConf.set("mapreduce.job.outputformat.class", "org.apache.hadoop.hbase.mapreduce.TableOutputFormat")

    val tableName = "dataeco-test"

    val conf: Configuration = HBaseConfiguration.create()
    val job: Job = Job.getInstance(conf)
    job.setOutputFormatClass(classOf[TableOutputFormat[ImmutableBytesWritable]])
    job.getConfiguration.set(TableOutputFormat.OUTPUT_TABLE, tableName)

    // Save the data to HBase
    try {
      prepareHBaseToLoad.saveAsNewAPIHadoopDataset(job.getConfiguration)
    } catch {
      case e: Exception => {
        if (e.getMessage.equals("Can not create a Path from a null string")) {
          println(" saveAsNewAPIHadoopDataset - Exception caused due to a bug in spark 2.2 - Data is saved in HBASE but still excepton is thrown - java.lang.IllegalArgumentException: Can not create a Path from a null string at org.apache.hadoop.fs.Path.checkPathArg ")
        } else {
          throw (e)
        }
      }
    }
  }
}
