package com.yanduo.report

import java.util.Properties

import com.typesafe.config.{Config, ConfigFactory}
import org.apache.spark.SparkConf
import org.apache.spark.sql.{DataFrame, SaveMode, SparkSession}

/**
  * P152.05.统计各省市的数据量分布情况----结果输出到mysql中
  * https://www.bilibili.com/video/BV1F4411i7jK?p=15
  *
  * @author Gerry chan
  * @version 1.0
  */
object ProCityRptV2 {
  def main(args: Array[String]): Unit = {
    //0 参数校验
    if (args.length != 3) {
      println(
        """
          |cn.dmp.tools.Bzip2Parquet
          |参数：
          | logInputPath
          |
        """.stripMargin)
      sys.exit()
    }

    //1 接收程序参数
    val Array(logInputPath) = args

    //2 创建sparkconf --> sparkContext

    val sparkConf = new SparkConf()
    sparkConf.setAppName(s"${this.getClass.getSimpleName}")
    sparkConf.setMaster("local[*]")
    //RDD 序列化到磁盘 worker 与 worker之前的数据传输
    sparkConf.set("spark.serializer", "org.apache.spark.serializer.KryoSerializer")

    val spark: SparkSession = SparkSession.builder().config(sparkConf).getOrCreate()

    // 读取数据--》parquet文件
    val df: DataFrame = spark.read.parquet(logInputPath)
    //将dataframe 注册成一张临时表
    df.createOrReplaceTempView("log")
    // 按照省市进行分组聚合
    val result: DataFrame = spark.sql("select provicename,cityname,count(*) ct " +
      "from log group by provicename,cityname")

    // 知识点：使用com.typesafe 加载配置文件，
    // 查找顺序：application.conf --》 application.json -->application.properties
    val config: Config = ConfigFactory.load()
    val props: Properties = new Properties()
    props.setProperty("user", config.getString("jdbc.user"))
    props.setProperty("password", config.getString("jdbc.password"))

    //将结果写入mysql ,config.getString("jdbc.tableName").split(",")(0) 取第一个表
    result.write.mode(SaveMode.Append).jdbc(config.getString("jdbc.url"),
      config.getString("jdbc.tableName").split(",")(0), props)


    spark.stop()

  }

}
