package com.yanduo.report

import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.{FileSystem, Path}
import org.apache.spark.SparkConf
import org.apache.spark.sql.{DataFrame, SparkSession}

/**
  * P142.04.统计各省市的数据量分布情况----结果输出成json
  * 本次统计基于parquet文件
  * 将结果存储到MySQL
  *
  * 课程地址：https://www.bilibili.com/video/BV1F4411i7jK?p=14
  *
  * maven快速导入包的方式 alt + insert
  *
  * @author Gerry chan
  * @version 1.0
  */
object ProCityRpt {
  def main(args: Array[String]): Unit = {
    //0 参数校验
    if (args.length != 3) {
      println(
        """
          |cn.dmp.tools.Bzip2Parquet
          |参数：
          | logInputPath
          | resultOutputPath
          |
        """.stripMargin)
      sys.exit()
    }

    //1 接收程序参数
    val Array(logInputPath, resultOutputPath) = args

    //2 创建sparkconf --> sparkContext

    val sparkConf = new SparkConf()
    sparkConf.setAppName(s"${this.getClass.getSimpleName}")
    sparkConf.setMaster("local[*]")
    //RDD 序列化到磁盘 worker 与 worker之前的数据传输
    sparkConf.set("spark.serializer", "org.apache.spark.serializer.KryoSerializer")

    val spark: SparkSession = SparkSession.builder().config(sparkConf).getOrCreate()

    // 读取数据--》parquet文件
    val df: DataFrame = spark.read.parquet(logInputPath)
    //将dataframe 注册成一张临时表
    df.createOrReplaceTempView("log")
    // 按照省市进行分组聚合
    val result: DataFrame = spark.sql("select provicename,cityname,count(*) ct " +
      "from log group by provicename,cityname")

    //判断结果的存储路径是否存在，存在则先删除
    val configuration: Configuration = spark.sparkContext.hadoopConfiguration
    val fs: FileSystem = FileSystem.get(configuration)

    val resultPath: Path = new Path(resultOutputPath)

    if(fs.exists(resultPath)) {
      fs.delete(resultPath,true)
    }


    //将结果存储json文件 ,使用coalesce 合并分区，不然会生成很多小文件
    result.coalesce(1).write.json(resultOutputPath)


    spark.stop()

  }

}
