package com.imooc.scala.log

import org.apache.spark.sql.SparkSession

import scala.collection.mutable.ListBuffer

object TopNStatJob {
  //3
  def main(args: Array[String]): Unit = {
    val spark = SparkSession.builder()
      .appName("SparkStatCleanJob")
      .config("spark.sql.sources.partitionColumnTypeInference.enabled", "false")
      .master("local[*]")
      .getOrCreate()

    val accessDF = spark.read.format("parquet").load("clean")
    accessDF.printSchema()
    accessDF.createOrReplaceTempView("access_logs")
//    val topDF = spark.sql( "select city,count(1) as times from access_logs where day='20190819' group by city order by times desc")
    val topDF = spark.sql( "select city,count(1) as times from access_logs group by city order by times desc")

    //    topDF.show()

    topDF.foreachPartition(partionOfRecords => {
      val list = new ListBuffer[CityStat]
      partionOfRecords.foreach(info => {
        val city = info.getAs[String]("city")
        val times = info.getAs[Long]("times")
        list.append(CityStat(city, times))
      })
      MySQLUtils.insert(list)
    })


    //    accessDF.printSchema()
    //    accessDF.show(false)
  }
}
