package com.jinghang.logProject.App_local

import com.jinghang.logProject.dao.StatDAO
import com.jinghang.logProject.entity.{DayCityVideoAccessStat, DayVideoAccessStat, DayVideoTrafficsStat}
import org.apache.spark.sql.expressions.Window
import org.apache.spark.sql.functions._
import org.apache.spark.sql.{DataFrame, SparkSession, expressions}

import scala.collection.mutable.ListBuffer

object _030_TopNStatApp {
  def main(args: Array[String]) {
    val spark = SparkSession.builder()
      .appName("TopNStatJob")
      .config("spark.sql.sources.partitionColumnTypeInference.enabled", "false") //关闭数据格式推测
      .master("local[2]")
      .getOrCreate()

    spark.sparkContext.setLogLevel("ERROR")

    val sourcePath = "data/output/SparkStatCleanApp/json"

    //url cmsType cmsId traffic ip city time day
    val accessDF =
      spark
        .read
        .format("json")
        .load(sourcePath)

    accessDF.printSchema()

    accessDF.show(false)

    val day = "20161110"
    //StatDAO.deleteData(day)

    //最受欢迎的TopN课程
    //videoAccessTopNStat(spark, accessDF, day)

    //按照地市进行统计TopN课程
    cityAccessTopNStat(spark, accessDF, day)

    //按照流量进行统计
    //videoTrafficsTopNStat(spark, accessDF, day)

    spark.stop()
  }

  /**
    * 按照流量进行统计
    */
  def videoTrafficsTopNStat(spark: SparkSession, accessDF: DataFrame, day: String): Unit = {
    //需要导入隐式转换解决 filter($"day" === day && $"cmsType" === "video")报错
    import spark.implicits._

    val cityAccessTopNDF = accessDF
      .filter($"day" === day && $"cmsType" === "video")
      .groupBy("day", "cmsId")
      .agg(sum("traffic")//聚合函数的使用
        .as("traffics"))
      .orderBy($"traffics".desc)

      cityAccessTopNDF.show(false)

    /**
          +--------+-----+--------+
      |day     |cmsId|traffics|
      +--------+-----+--------+
      |20180511|14540|1462179 |
      |20180511|14390|742896  |
      |20180511|4000 |739658  |
      |20180511|4600 |735648  |
      |20180511|4500 |726454  |
      |20180511|14704|719389  |
      |20180511|14623|697955  |
      |20180511|14322|693780  |
      +--------+-----+--------+

      */




    /**
      * 将统计结果写入到MySQL中
      */
//    try {
//      cityAccessTopNDF.foreachPartition(partitionOfRecords => {
//
//        /**partitionOfRecords
//          * |20180511|14540|1462179 |
//          * |20180511|14390|742896  |
//          * |20180511|4000 |739658  |
//          */
//
//        val list = new ListBuffer[DayVideoTrafficsStat]
//
//        partitionOfRecords.foreach(info => {
//          //info |20180511|14540|1462179 |
//          val day = info.getAs[String]("day")
//          val cmsId = info.getAs[Long]("cmsId")
//          val traffics = info.getAs[Long]("traffics")
//          val entity = DayVideoTrafficsStat(day, cmsId, traffics)
//          list.append(entity)
//        })
//
//        StatDAO.insertDayVideoTrafficsAccessTopN(list)
//      })
//    } catch {
//      case e: Exception => e.printStackTrace()
//    }

  }


  /**
    * 每个省份课程受欢迎程度TOPN
    */
  def cityAccessTopNStat(spark: SparkSession, accessDF: DataFrame, day: String): Unit = {
    import spark.implicits._

    val cityAccessTopNDF =
      accessDF
        .filter($"day" === day && $"cmsType" === "video")
        .groupBy("day", "city", "cmsId")
        .agg(
          count("cmsId").as("times")
        )

    cityAccessTopNDF.show(false)

    //Window函数在Spark SQL的使用
    //dataframe的每一行返回一个值
    val top3DF = cityAccessTopNDF.select(
      cityAccessTopNDF("day"),
      cityAccessTopNDF("city"),
      cityAccessTopNDF("cmsId"),
      cityAccessTopNDF("times"),
      row_number()
        .over(Window.partitionBy(cityAccessTopNDF("city"))
          .orderBy(cityAccessTopNDF("times").desc))
        .as("times_rank")
    )
      .filter("times_rank <=3")

    top3DF.show(false) //Top3


    /**
      * 将统计结果写入到MySQL中
      */
//    try {
//      top3DF.foreachPartition(partitionOfRecords => {
//        val list = new ListBuffer[DayCityVideoAccessStat]
//
//        partitionOfRecords.foreach(info => {
//          val day = info.getAs[String]("day")
//          val cmsId = info.getAs[Long]("cmsId")
//          val city = info.getAs[String]("city")
//          val times = info.getAs[Long]("times")
//          val timesRank = info.getAs[Int]("times_rank")
//          list.append(DayCityVideoAccessStat(day, cmsId, city, times, timesRank))
//        })
//
//        StatDAO.insertDayCityVideoAccessTopN(list)
//      })
//    } catch {
//      case e: Exception => e.printStackTrace()
//    }

  }


  /**
    * 最受欢迎的TopN课程
    */
  def videoAccessTopNStat(spark: SparkSession, accessDF: DataFrame, day: String): Unit = {

    /**
      * 使用DataFrame的方式进行统计
      */
    import spark.implicits._

    val videoAccessTopNDF =
      accessDF
        .filter($"day" === day && $"cmsType" === "video")
        .groupBy("day", "cmsId")
        .agg(count("cmsId")
          .as("times"))
        .orderBy($"times".desc)

    videoAccessTopNDF.show(false)

    /**
      * 使用SQL的方式进行统计
      */
    //    accessDF.createOrReplaceTempView("access_logs")
    //    val videoAccessTopNDF = spark.sql("select day,cmsId, count(1) as times from access_logs " +
    //      "where day='20170511' and cmsType='video' " +
    //      "group by day,cmsId order by times desc")
    //
    //    videoAccessTopNDF.show(false)

    /**
      * 将统计结果写入到MySQL中
      */
//    try {
//      videoAccessTopNDF.foreachPartition(partitionOfRecords => {
//        val list = new ListBuffer[DayVideoAccessStat]
//
//        partitionOfRecords.foreach(info => {
//          val day = info.getAs[String]("day")
//          val cmsId = info.getAs[Long]("cmsId")
//          val times = info.getAs[Long]("times")
//
//          /**
//            * 不建议大家在此处进行数据库的数据插入
//            */
//
//          list.append(DayVideoAccessStat(day, cmsId, times))
//        })
//
//        //StatDAO.insertDayVideoAccessTopN(list)
//      })
//    } catch {
//      case e: Exception => e.printStackTrace()
//    }

  }
}
