package cn.ctGroup

import org.apache.hadoop.fs.{FileSystem, Path}
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{DataFrame, SQLContext}
import org.apache.spark.{SparkConf, SparkContext}

object sql {

  def main(args: Array[String]): Unit = {

    // 0 校验参数个数
    if (args.length != 2) {
      println(
        """
          |cn.ctGroup.sql
          |参数：
          | logInputPath
          | resultOutputPath
        """.stripMargin)
      sys.exit()
    }

    // 1 接受程序参数
    val Array(logInputPath, resultOutputPath) = args

    // 2 创建sparkconf->sparkContext
    val sparkConf = new SparkConf()
    sparkConf.setAppName(s"${this.getClass.getSimpleName}")
    sparkConf.setMaster("local[*]")
    // RDD 序列化到磁盘 worker与worker之间的数据传输
    sparkConf.set("spark.serializer", "org.apache.spark.serializer.KryoSerializer")

    val sc = new SparkContext(sparkConf)

    val sQLContext = new SQLContext(sc)

    //读取parqute文件
    val parquet: DataFrame = sQLContext.read.parquet(logInputPath)

    //注册视图
    parquet.registerTempTable("ct_v1")

    //按照省份进行聚合=>> 统计分组后的各省市日志记录条数
    val da: DataFrame = sQLContext.sql("select count(*) ct , provincename , cityname from ct_v1 group by provincename , cityname  order by provincename ")

    // 判断结果存储路径是否存在，如果存在则删除
    val hadoopConfiguration = sc.hadoopConfiguration
    val fs = FileSystem.get(hadoopConfiguration)

    val resultPath = new Path(resultOutputPath)
    if(fs.exists(resultPath)) {
      fs.delete(resultPath, true)
    }

    //写出结果为json文件
    da.coalesce(1).write.json(resultOutputPath)

    sc.stop()

  }

}
