package com.imooc.scala.log

import org.apache.spark.sql.SparkSession

import scala.collection.mutable.ListBuffer

object AlphaLog {

  def main(args: Array[String]): Unit = {
    if (args.length != 1) {
      println("Usage: SparkStatCleanJobJar <inputPath>")
      System.exit(1)
    }
//    val Array(inputPath, outputPath) = args
    val inputPath= args(0)

    val spark = SparkSession.builder()
      .appName("stat")
      .master("local[*]")
      .getOrCreate()
    val log = spark.sparkContext.textFile(inputPath)

    val accessRDD = log.map(line => {
      val lines = line.split(" ")
      val ip = lines(0)
      val time = lines(3) + " " + lines(4)
      val url = lines(6).replaceAll("\"", "")
      val traffic = lines(9)
      DateUtils.parse(time) + "\t" + url + "\t" + traffic + "\t" + ip
    })

    val accessDF = spark.createDataFrame(accessRDD.map(x => AccessConvertUtil.parseLog(x)), AccessConvertUtil.struct)
    accessDF.printSchema()
    accessDF.createOrReplaceTempView("access_logs")
    val topDF = spark.sql("select city,count(1) as times from access_logs group by city order by times desc")

    topDF.foreachPartition(partionOfRecords => {
      val list = new ListBuffer[CityStat]
      partionOfRecords.foreach(info => {
        val city = info.getAs[String]("city")
        val times = info.getAs[Long]("times")
        list.append(CityStat(city, times))
      })
      MySQLUtils.insert(list)
    })
  }
}
