package com.spark.prepareJob.report

import java.util.Properties

import org.apache.log4j.{Level, Logger}
import org.apache.spark.SparkConf
import org.apache.spark.sql.{SaveMode, SparkSession}

/**
  * 统计各省市数据量的分布情况
  */
object StatisticProvinceCityQuantity {
  def main(args: Array[String]): Unit = {
    Logger.getLogger("org.apache.hadoop").setLevel(Level.WARN)
    Logger.getLogger("org.apache.spark").setLevel(Level.WARN)
    Logger.getLogger("org.spark-project").setLevel(Level.WARN)

    val conf = new SparkConf().setAppName("StatisticProvinceCityQuantity")
      .setMaster("local[*]")

    val sparkSession = SparkSession.builder().config(conf).getOrCreate()

    //通过sparkContext对象读取数据
    val adDataFrame = sparkSession.read.load("file:///e:/out/standard/ad/")

    /* adDataFrame.printSchema()
     adDataFrame.show()*/
    adDataFrame.createOrReplaceTempView("ad_log")

    val sql =
      """
        |select
        |reqdate as date,
        |provincename as province,
        |cityname as city,
        |count(*) as quality
        |from ad_log
        |group by provincename,cityname,reqdate
      """.stripMargin
    //sparkSession.sql(sql).show()
    val url = "jdbc:mysql:///test"
    val table = "ad_log"
    val properties = new Properties()
    properties.put("user","root")
    properties.put("password","123456")
    val provinceDataFrame = sparkSession.sql(sql)
    provinceDataFrame.show()
    provinceDataFrame.write.mode(SaveMode.Append).jdbc(url,table,properties)

    sparkSession.stop()
  }
}
