package com.lqfan.bigdata.Project.business

import com.lqfan.bigdata.Project.`trait`.DataProcess
import com.lqfan.bigdata.Project.utils.{KuduUtils, SQLUtils, SchemaUtils}
import com.lqfan.bigdata.Project.`trait`.DataProcess
import com.lqfan.bigdata.Project.utils.{KuduUtils, SQLUtils, SchemaUtils}
import org.apache.spark.sql.{DataFrame, SparkSession}

object APPStatProcessor extends DataProcess{
  override def process(spark: SparkSession): Unit = {
    //数据源是kudu的ods表
    val odsDF: DataFrame = spark.read.format("org.apache.kudu.spark.kudu")
      .option("kudu.table", "ods")
      .option("kudu.master", "hadoop000")
      .load()
    //    odsDF.show()

    odsDF.createOrReplaceTempView("ods")
    //TODO...  STEP1
    val resultTmp: DataFrame = spark.sql(SQLUtils.APP_SQL_STEP1)
    //    resultTmp.show(false)
    resultTmp.createOrReplaceTempView("app_tmp")

    //TODO...   STEP2
    val result: DataFrame = spark.sql(SQLUtils.APP_SQL_STEP2)
    //    result.show(false)

    //TODO...  将统计后的数据写入 kudu的 app_stat表中
    val kuduMasters = "hadoop000"
    val kuduTables  = "app_stat"
    val partitonKey = "provincename"

    val schema = SchemaUtils.APPSchema

    //只需要定义表相关的信息，  创建表、删除表等操作都封装到KuduUtils.sink方法中
    KuduUtils.sink(kuduMasters, kuduTables, partitonKey, schema, result)
  }
}
