package my_project.business

import my_project.`trait`.DataProcess
import my_project.util.{DateUtils, KuduUtils, SQLUtils, SchemaUtils}
import org.apache.spark.sql.{DataFrame, SparkSession}

object ProvinceCityStatProcessor extends DataProcess{
  override def process(spark: SparkSession): Unit = {

    //从kudu中读取数据，按城市、省份分组统计即可
    val sourceTableName=DateUtils.getTableName("ods",spark)
    val masterAddresses = "hadoop000"

    val odsDF: DataFrame = spark.read.format("org.apache.kudu.spark.kudu")
      .option("kudu.master", masterAddresses)
      .option("kudu.table", sourceTableName)
      .load()

    odsDF.createOrReplaceTempView("ods")
    val result: DataFrame = spark.sql(SQLUtils.PROVINCE_CITY_SQL)


    val tableName = DateUtils.getTableName("provincename_city_stat",spark)
    val partitionId = "provincename"
    KuduUtils.sink(result, tableName, masterAddresses, SchemaUtils.ProvinceCitySchema, partitionId)


  }
}
