package com.imooc.log

import org.apache.spark.sql.expressions.Window
import org.apache.spark.sql.{DataFrame, SparkSession}
import org.apache.spark.sql.functions._

import scala.collection.mutable.ListBuffer

/**
 * TopN统计的Spark作业
 */
object TopNStatJob {
  def main(args: Array[String]): Unit = {
    val spark = SparkSession
      .builder()
      .config("spark.sql.sources.partitionColumnTypeInference.enabled", false)
      .appName("SparkStatFormatJob")
      .master("local[2]")
      .getOrCreate()


    val accessDF = spark
      .read
      .format("parquet")
      .load("file:///D:\\CodingFiles\\IdeaProjects\\ImoocSparkSQLProject\\data\\access_clean.parquet")

    //    accessDF.printSchema()
    //        accessDF.show()

    val day = "20170511"

    StatDAO.deleteData(day)

    //按访问次数，最受欢迎的TopN课程
    videoAccessTopNStat(spark, accessDF, day)

    //按照地市, 进行统计TopN课程
    cityAccessTopNStat(spark, accessDF, day)

    //按照流量进行统计
    videoTrafficsTopNStat(spark, accessDF, day)

    spark.close()
  }


  def videoTrafficsTopNStat(spark: SparkSession, accessDF: DataFrame, day: String): Unit = {
    import spark.implicits._

    val videoTrafficsTopNDF = accessDF
      .filter($"day" === day && $"cmsType" === "video")
      .groupBy("day", "cmsId")
      .agg(sum("traffic").as("traffics"))
      .orderBy($"traffics".desc)
    //      .show(false)


    /**
     * 将统计结果写入到MySQL中
     */
    try {
      videoTrafficsTopNDF.foreachPartition(partitionOfRecords => {
        val list = new ListBuffer[DayVideoTrafficsStat]

        partitionOfRecords.foreach(info => {
          val day = info.getAs[String]("day")
          val cmsId = info.getAs[Long]("cmsId")
          val traffics = info.getAs[Long]("traffics")

          list.append(DayVideoTrafficsStat(day, cmsId, traffics))

        })

        StatDAO.insertDayVideoTrafficsAccessTopN(list)

      })

    } catch {
      case e: Exception => e.printStackTrace()
    }
  }

  def cityAccessTopNStat(spark: SparkSession, accessDF: DataFrame, day: String): Unit = {
    import spark.implicits._

    val cityAccessTopNDF = accessDF
      .filter($"day" === day && $"cmsType" === "video")
      .groupBy("day", "city", "cmsId")
      .agg(count("cmsId").as("times"))

    //    cityAccessTopNDF.show(10)

    //Window函数在Spark SQL的使用
    //$"colname"等价于DF.col("colname")
    val top3DF = cityAccessTopNDF.select(
      $"day", $"city", $"cmsId", cityAccessTopNDF.col("times"),
      row_number().over(Window.partitionBy(cityAccessTopNDF("city"))
        .orderBy($"times".desc)
      ).as("times_rank")
    ).filter("times_rank <= 3")

    //    top3DF.show(false)

    /**
     * 将统计结果写入
     */
    try {
      top3DF.foreachPartition(partitionOfRecords => {
        val list = new ListBuffer[DayCityVideoAccessStat]

        partitionOfRecords.foreach(info => {
          val day = info.getAs[String]("day")
          val cmsId = info.getAs[Long]("cmsId")
          val city = info.getAs[String]("city")
          val times = info.getAs[Long]("times")
          val times_rank = info.getAs[Int]("times_rank")

          list.append(DayCityVideoAccessStat(day, cmsId, city, times, times_rank))
        })

        StatDAO.insertDayCityVideoAccessTopN(list)

      })

    } catch {
      case e: Exception => e.printStackTrace()
    }
  }

  def videoAccessTopNStat(spark: SparkSession, accessDF: DataFrame, day: String): Unit = {
    /**
     * 使用DataFrame的方式进行统计
     */
    import spark.implicits._

    val videoAccessTopNDF = accessDF
      .filter($"day" === day && $"cmsType" === "video")
      .groupBy("day", "cmsId")
      .agg(count("cmsId").as("times"))
      .orderBy($"times".desc)

//    videoAccessTopNDF.show(10)

    /**
     * 使用SQL的方式进行统计
     */
    //    accessDF.createOrReplaceTempView("access_logs")
    //    //    val sql = "select day, cmsId, count(1) as times " +
    //    //        "from access_logs " +
    //    //        "where day='%s' and cmsType='video' ".format(day) +
    //    //        "group by day, cmsId " +
    //    //        "order by times desc"
    //    val sql = """select day, cmsId, count(1) as times
    //      from access_logs
    //      where day='%s' and cmsType='video'
    //      group by day, cmsId
    //      order by times desc""".format(day)
    //    println(sql)
    //    val videoAccessTopNDF = spark.sql(sql)
    //
    //    videoAccessTopNDF.show(10)

    /**
     * 将统计结果写入到MySQL中
     */
    try {
      videoAccessTopNDF.foreachPartition(partitionOfRecords => {
        val list = new ListBuffer[DayVideoAccessStat]
        partitionOfRecords.foreach(info => {
          val day = info.getAs[String]("day")
          val cmsId = info.getAs[Long]("cmsId")
          val times = info.getAs[Long]("times")

          /**
           * 不建议大家在此处进行数据库的数据插入
           */
          list.append(DayVideoAccessStat(day, cmsId, times))
        })
        StatDAO.insertDayVideoAccessTopN(list)
      })

    } catch {
      case e: Exception => e.printStackTrace()
    }
  }

}
