package com.xx.sparkhbase

import org.apache.spark.sql.SparkSession

/**
 * shc catalog "org.apache.hadoop.hbase.spark writer-spi"
 *
 * @author tzp
 * @since 2022/6/20
 */
object UserFeature {

  case class Employee(key: String, fName: String, lName: String,
                      mName: String, addressLine: String, city: String,
                      state: String, zipCode: String)

  val data = Seq(
    Employee("1", "Abby", "Smith", "K", "3456 main", "Orlando", "FL", "45235"),
    Employee("2", "Amaya", "Williams", "L", "123 Orange", "Newark", "NJ", "27656"),
    Employee("3", "Alchemy", "Davis", "P", "Warners", "Sanjose", "CA", "34789")
  )

  def main(args: Array[String]): Unit = {
    val spark = SparkSession.builder()
      .getOrCreate()
    val sc = spark.sparkContext
    import spark.implicits._

    val df = spark.sparkContext.parallelize(data).toDF

    def catalog =
      s"""{
         |"table":{"namespace":"default", "name":"employee"},
         |"rowkey":"key",
         |"columns":{
         |"key":{"cf":"rowkey", "col":"key", "type":"string"},
         |"fName":{"cf":"cf", "col":"firstName", "type":"string"},
         |"lName":{"cf":"cf", "col":"lastName", "type":"string"},
         |"mName":{"cf":"cf", "col":"middleName", "type":"string"},
         |"addressLine":{"cf":"cf", "col":"addressLine", "type":"string"},
         |"city":{"cf":"cf", "col":"city", "type":"string"},
         |"state":{"cf":"cf", "col":"state", "type":"string"},
         |"zipCode":{"cf":"cf", "col":"zipCode", "type":"string"}
         |}
         |}""".stripMargin
    //
    import org.apache.hadoop.hbase.spark.datasources.{HBaseSparkConf, HBaseTableCatalog}
    df.write.options(
      Map(HBaseTableCatalog.tableCatalog -> catalog,
        HBaseTableCatalog.newTable -> "4",
        HBaseSparkConf.USE_HBASECONTEXT -> "false"
      )
    ).format("org.apache.hadoop.hbase.spark").save()

  }
}
