package cn.ctGroup

import java.util.Properties
import com.typesafe.config.{Config, ConfigFactory}
import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.sql.{DataFrame, SQLContext}

import scala.tools.cmd.Property

object sql_V2 {

  def main(args: Array[String]): Unit = {

    // 0 校验参数个数
    if (args.length != 1) {
      println(
        """
          |cn.ctGroup.sql_V2
          |参数：
          | logInputPath
        """.stripMargin)
      sys.exit()
    }

    // 1 接受程序参数
    val Array(logInputPath) = args

    // 2 创建sparkconf->sparkContext
    val sparkConf = new SparkConf()
    sparkConf.setAppName(s"${this.getClass.getSimpleName}")
    sparkConf.setMaster("local[*]")
    // RDD 序列化到磁盘 worker与worker之间的数据传输
    sparkConf.set("spark.serializer", "org.apache.spark.serializer.KryoSerializer")

    val sc = new SparkContext(sparkConf)

    val sQLContext = new SQLContext(sc)

    //读取parqute文件
    val parquet: DataFrame = sQLContext.read.parquet(logInputPath)

    //注册视图
    parquet.registerTempTable("ct_v1")

    //按照省份进行聚合=>> 统计分组后的各省市日志记录条数
    val da: DataFrame = sQLContext.sql("select count(*) ct , provincename , cityname from ct_v1 group by provincename , cityname")

    val load: Config = ConfigFactory.load()
    val props = new Properties()
    props.setProperty("user", load.getString("jdbc.user"))
    props.setProperty("password", load.getString("jdbc.password"))

    da.write.jdbc(load.getString("jdbc.url"), load.getString("jdbc.tableName"), props)

    sc.stop()

  }

}
