package cn.itcast.xc.analysis.search

import cn.itcast.xc.analysis.common.EtlEnvironment
import cn.itcast.xc.entity.{LearningCourseOnlineDwm, LearningCourseOnlineEsIndex}
import cn.itcast.xc.utils.HbaseUtils
import org.apache.hadoop.hbase.client.Put
import org.apache.hadoop.hbase.io.ImmutableBytesWritable
import org.apache.hadoop.hbase.util.Bytes
import org.apache.spark.sql.SparkSession
import org.elasticsearch.spark.sql.EsSparkSQL

/**
 * <p>
 * 即时查询 步骤2
 * </p>
 **/
object InstantQueryStep2ToHbaseAndES {

  // es 表名
  val esTableName = "learning_online_detail/doc"

  /**
   * 初始化 SparkSession
   */
  val spark: SparkSession = EtlEnvironment.getSparkSession(this.getClass.getSimpleName, esTableName)


  def main(args: Array[String]): Unit = {
    import spark.implicits._
    // 初始化hbase
    val hbaseUtils = new HbaseUtils

    // hbase表名
    val hbaseTableName = "LearningOnlineDetail"

    // hbase 列
    val hbaseColumnName = "learning_online_detail"

    // 创建表如果表不存在
    hbaseUtils.createTable(hbaseTableName, hbaseColumnName)
    val jobConf = hbaseUtils.getJobConf(hbaseTableName)

    // 表名
    val hiveTableName = "data_course.learning_course_online_dwm"

    //    1. 数据仓库中获取数据（根据日期）
//        val date_info = "2019-12-12"
    val date_info = args(0)
    val learnDS = spark.sql(s"select * from ${hiveTableName} where date_info ='${date_info}'").as[LearningCourseOnlineDwm]
    //    val learnDS = spark.sql(s"select * from ${hiveTableName}").as[LearningCourseOnlineDwm]


    //    2. 数据索引到elasticsearch（索引查询字段及row key）
    val learnES = learnDS.map(obj => {
      LearningCourseOnlineEsIndex(
        obj.learning_course_online_id,
        obj.course_name,
        obj.video_name,
        obj.user_name,
        obj.date_info
      )
    })
    EsSparkSQL.saveToEs(learnES, esTableName)

    //    3. 数据保存到hbase（完成数据）
    learnDS.rdd.map(obj => {
      // 转为put
      // 设置row key
      val put = new Put(Bytes.toBytes(obj.learning_course_online_id))
      // 设置列
      put.addColumn(Bytes.toBytes(hbaseColumnName), Bytes.toBytes("course_id"), Bytes.toBytes(obj.course_id))
      put.addColumn(Bytes.toBytes(hbaseColumnName), Bytes.toBytes("course_name"), Bytes.toBytes(obj.course_name))
      put.addColumn(Bytes.toBytes(hbaseColumnName), Bytes.toBytes("video_name"), Bytes.toBytes(obj.video_name))
      put.addColumn(Bytes.toBytes(hbaseColumnName), Bytes.toBytes("user_id"), Bytes.toBytes(obj.user_id))
      put.addColumn(Bytes.toBytes(hbaseColumnName), Bytes.toBytes("user_name"), Bytes.toBytes(obj.user_name))
      put.addColumn(Bytes.toBytes(hbaseColumnName), Bytes.toBytes("learn_time"), Bytes.toBytes(obj.learn_time))
      put.addColumn(Bytes.toBytes(hbaseColumnName), Bytes.toBytes("learn_count"), Bytes.toBytes(obj.learn_count))
      put.addColumn(Bytes.toBytes(hbaseColumnName), Bytes.toBytes("date_info"), Bytes.toBytes(obj.date_info))
      (new ImmutableBytesWritable(), put)
    })
      // 进行保存
      .saveAsHadoopDataset(jobConf)


    // 关闭资源
    spark.close()

  }


}
