package com.edu360.select

import com.edu360.utils.ToMysqlUtils
import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.sql.{DataFrame, SQLContext}

object SelectProCityCountSQL {
  def main(args: Array[String]): Unit = {
    // 0 校验参数个数
    if (args.length != 4) {
      println(
        """
          |cn.dmp.tools.Bzip2Parquet
          |参数：
          | logInputPath
          | compressionCode <snappy, gzip, lzo>
          | tableName
          | resultOutputPath
        """.stripMargin)
      sys.exit()
    }

    // 1 接受程序参数
    val Array(logInputPath,compressionCode,tableName,resultOutputPath) = args
    // 2 创建sparkconf->sparkContext
    val sparkConf = new SparkConf()
    sparkConf.setAppName(s"${this.getClass.getSimpleName}")
    sparkConf.setMaster("local[*]")
    val sc = new SparkContext(sparkConf)
    // RDD 序列化到磁盘 worker与worker之间的数据传输
    sparkConf.set("spark.serializer", "org.apache.spark.serializer.KryoSerializer")
    val sQLContext = new SQLContext(sc)
    sQLContext.setConf("spark.sql.parquet.compression.codec", compressionCode)
    //读取parquet文件
    val parquet: DataFrame = sQLContext.read.parquet(logInputPath)
    ////把DataFrame先注册临时表
    parquet.registerTempTable("logs")
    //写sql查询各省市数据量分布
    val sql: DataFrame = sQLContext.sql("select count(1) ct,provincename,cityname from logs group by provincename,cityname order by ct")

    //sql.show()

   // sql.write.partitionBy("provincename").json(resultOutputPath)
    sql.coalesce(1).write.json(resultOutputPath)
    ToMysqlUtils.dfToSql(sql,tableName)
    sc.stop()
  }
}
