package com.lqfan.bigdata.Project.business

import com.lqfan.bigdata.Project.`trait`.DataProcess
import com.lqfan.bigdata.Project.utils.{KuduUtils, SQLUtils, SchemaUtils}
import com.lqfan.bigdata.Project.`trait`.DataProcess
import com.lqfan.bigdata.Project.utils.{KuduUtils, SQLUtils, SchemaUtils}
import org.apache.spark.sql.{DataFrame, SparkSession}

object ProvinceCityStatProcessor extends DataProcess{
  override def process(spark: SparkSession): Unit = {
    //数据源是kudu的ods表
    val odsDF: DataFrame = spark.read.format("org.apache.kudu.spark.kudu")
      .option("kudu.table", "ods")
      .option("kudu.master", "hadoop000")
      .load()
    //    odsDF.show()

    //对数据进行统计：统计省份、城市数量分布情况
    odsDF.createOrReplaceTempView("ods")
    val result: DataFrame = spark.sql(SQLUtils.PROVINCE_CITY_SQL)

    //TODO...  将统计后的数据写入 kudu的 province_city_stat表中
    val kuduMasters = "hadoop000"
    val kuduTables  = "province_city_stat"
    val partitonKey = "provincename"

    val schema = SchemaUtils.ProvinceCitySchema

    //只需要定义表相关的信息，  创建表、删除表等操作都封装到KuduUtils.sink方法中
    KuduUtils.sink(kuduMasters, kuduTables, partitonKey, schema, result)
  }
}
