package cn.itcast.xc.analysis.demo

import cn.itcast.xc.analysis.common.EtlEnvironment
import cn.itcast.xc.utils.HbaseUtils
import org.apache.hadoop.hbase.client.Put
import org.apache.hadoop.hbase.io.ImmutableBytesWritable
import org.apache.hadoop.hbase.util.Bytes
import org.elasticsearch.spark.sql.EsSparkSQL

/**
 * <p>
 * hbase、es 保存练习
 * </p>
 **/
object Save {

  case class Person(id: String, name: String, age: String)

  def main(args: Array[String]): Unit = {
    // es 表名
    val esTableName = "demo/doc"
    val spark = EtlEnvironment.getSparkSession(this.getClass.getSimpleName, esTableName)
    import spark.implicits._

    val sc = spark.sparkContext
    // hbase表名
    val hbaseTableName = "demo"

    // hbase 列
    val hbaseColumnName = "demo_col1"


    // 初始化hbase
    val hbaseUtils = new HbaseUtils

    // 创建表如果表不存在
    hbaseUtils.createTable(hbaseTableName, hbaseColumnName)
    val jobConf = hbaseUtils.getJobConf(hbaseTableName)
    // 准备基础数据
    val rdd = sc.parallelize(Array(Person("a1", "张三", "30岁"), Person("a2", "李四", "29岁"), Person("a3", "王五", "21岁")))

    // 保存hbase、es
    // 保存es
    EsSparkSQL.saveToEs(spark.createDataset(rdd), esTableName)

    // 保存hbase
    rdd.map(p => {
      // 转为put
      // 设置row key
      val put = new Put(Bytes.toBytes(p.id))
      // 设置列
      put.addColumn(Bytes.toBytes(hbaseColumnName), Bytes.toBytes("name"), Bytes.toBytes(p.name))
      put.addColumn(Bytes.toBytes(hbaseColumnName), Bytes.toBytes("age"), Bytes.toBytes(p.age))
      (new ImmutableBytesWritable(), put)
    })
      // 进行保存
      .saveAsHadoopDataset(jobConf)


    // 关闭资源
    spark.close()

  }
}
