package com.imooc.log

import com.imooc.log.SparkStatFormatJob.SetLogger
import com.imooc.log.dao.StatDAO
import com.imooc.log.entity.{DayCityVideoAccessStat, DayVideoAccessStat, DayVideoTrafficsStat}
import org.apache.spark.sql.expressions.Window
import org.apache.spark.sql.{DataFrame, SparkSession}
import org.apache.spark.sql.functions._

import scala.collection.mutable.ListBuffer

/**
  * Created by zghgchao 2017/12/25 13:23
  * TopN统计Spark作业
  */
object TopNStatJob {
  def main(args: Array[String]): Unit = {
    SetLogger

    val spark = SparkSession.builder()
      .config("spark.sql.sources.partitionColumnTypeInference.enabled", "false") //分区字段的数据类型调整【禁用】
      .master("local[2]")
      .appName("SparkStatCleanJob").getOrCreate()

    //--------------------读取清洗过后的数据------------------------
    val cleanDF = spark.read.format("parquet").load("src/data/clean")

    //    cleanDF.printSchema()
    /**
      * root
      * |-- url: string (nullable = true)
      * |-- cmsType: string (nullable = true)
      * |-- cmsId: long (nullable = true)
      * |-- traffic: long (nullable = true)
      * |-- ip: string (nullable = true)
      * |-- city: string (nullable = true)
      * |-- time: string (nullable = true)
      * |-- day: integer (nullable = true)         //day: integer【day的类型发生变化】  分区字段的数据类型调整【禁用】
      * //需要将 spark.sql.sources.partitionColumnTypeInference.enabled 设置为 false
      * |-- day: string (nullable = true)          //禁用后输出结果
      */
    //    cleanDF.show(10,false)
    /**
      * +----------------------------------+-------+-----+-------+---------------+----+-------------------+--------+
      * |url                               |cmsType|cmsId|traffic|ip             |city|time               |day     |
      * +----------------------------------+-------+-----+-------+---------------+----+-------------------+--------+
      * |http://www.imooc.com/video/4500   |video  |4500 |304    |218.75.35.226  |浙江省 |2017-05-11 14:09:14|20170511|
      * |http://www.imooc.com/video/14623  |video  |14623|69     |202.96.134.133 |广东省 |2017-05-11 15:25:05|20170511|
      * |http://www.imooc.com/article/17894|article|17894|115    |202.96.134.133 |广东省 |2017-05-11 07:50:01|20170511|
      * |http://www.imooc.com/article/17896|article|17896|804    |218.75.35.226  |浙江省 |2017-05-11 02:46:43|20170511|
      * |http://www.imooc.com/article/17893|article|17893|893    |222.129.235.182|北京市 |2017-05-11 09:30:25|20170511|
      * |http://www.imooc.com/article/17891|article|17891|407    |218.75.35.226  |浙江省 |2017-05-11 08:07:35|20170511|
      * |http://www.imooc.com/article/17897|article|17897|78     |202.96.134.133 |广东省 |2017-05-11 19:08:13|20170511|
      * |http://www.imooc.com/article/17894|article|17894|658    |222.129.235.182|北京市 |2017-05-11 04:18:47|20170511|
      * |http://www.imooc.com/article/17893|article|17893|161    |58.32.19.255   |上海市 |2017-05-11 01:25:21|20170511|
      * |http://www.imooc.com/article/17895|article|17895|701    |218.22.9.56    |安徽省 |2017-05-11 13:37:22|20170511|
      * +----------------------------------+-------+-----+-------+---------------+----+-------------------+--------+
      */
    //--------------------------执行业务前先清空当天表中的数据------------------------------
    val day = "20170511"
    StatDAO.delete(day)
    
    //--------------------------具体业务---------------------------
    //需求一：主站最受欢迎的TopN课程统计
    videoAccessTopNStat(spark, cleanDF, day)

    //需求二：按地市统计主站最受欢迎的Top N课程
    cityAccessTopSata(spark, cleanDF, day)

    //需求三：按照流量统计主站最受欢迎的Top N课程
    videoTraffsTopStat(spark, cleanDF, day)

    spark.stop()
  }

  /**
    * 需求一：主站最受欢迎的TopN课程统计
    *
    * @param spark
    * @param cleanDF
    */
  def videoAccessTopNStat(spark: SparkSession, cleanDF: DataFrame, day:String): Unit = {

    //------------------使用DataFrame API完成统计操作--------------------------------------------
    /* import spark.implicits._

     val videoAccessTopNDF = cleanDF.filter($"day" === day && $"cmsType" === "video")
       .groupBy("day","cmsId").agg(count("cmsId").as("times")).orderBy($"times".desc)*/

    //    videoAccessTopNDF.printSchema()
    /**
      * root
      * |-- day: string (nullable = true)
      * |-- cmsId: long (nullable = true)
      * |-- times: long (nullable = false)
      */

    //    videoAccessTopNDF.show(10,false)
    /**
      * +--------+-----+------+
      * |day     |cmsId|times |
      * +--------+-----+------+
      * |20170511|14540|111027|
      * |20170511|4000 |55734 |
      * |20170511|14704|55701 |
      * |20170511|14390|55683 |
      * |20170511|14623|55621 |
      * |20170511|4600 |55501 |
      * |20170511|4500 |55366 |
      * |20170511|14322|55102 |
      * +--------+-----+------+
      */

    //-------------------------使用SQL API完成操作-------------------------
    cleanDF.createOrReplaceTempView("access_logs")
    //创建临时表 access_logs
    val videoAccessTopNDF = spark.sql("select day,cmsId,count(1) as times from access_logs" +
      " where day="+day+" and cmsType='video'" +
      " group by day,cmsId order by times desc")

    videoAccessTopNDF.show(10, false)
    /**
      * +--------+-----+------+
      * |day     |cmsId|times |
      * +--------+-----+------+
      * |20170511|14540|111027|
      * |20170511|4000 |55734 |
      * |20170511|14704|55701 |
      * |20170511|14390|55683 |
      * |20170511|14623|55621 |
      * |20170511|4600 |55501 |
      * |20170511|4500 |55366 |
      * |20170511|14322|55102 |
      * +--------+-----+------+
      */
    //-------------------将统计结果写入数据库-------------------
    try {
      videoAccessTopNDF.foreachPartition(partitionOfRecords => {
        val list = new ListBuffer[DayVideoAccessStat]

        partitionOfRecords.foreach(info => {
          val day = info.getAs[String]("day")
          val cmsId = info.getAs[Long]("cmsId")
          val times = info.getAs[Long]("times")

          /**
            * 不建议在此处进行数据库的数据插入
            */

          list.append(DayVideoAccessStat(day, cmsId, times))
        })
        StatDAO.insertDayVideoAccessTopN(list)
      })
    }catch {
      case e:Exception => e.printStackTrace()
    }
    /**
      * 在mysql中创建day_video_access_topn_stat，主站最受欢迎的Top N课程
      *  create table day_video_access_topn_stat (
      *      day varchar(8) not null,
      *      cms_id bigint(10) not null,
      *      times bigint(10) not null,
      *      primary key (day, cms_id)
      *  );
      */
  }

  /**
    * 需求二：按地市统计主站最受欢迎的Top N课程
    * @param spark
    * @param cleanDF
    */
  def cityAccessTopSata(spark: SparkSession, cleanDF: DataFrame, day:String): Unit = {
    //------------------使用DataFrame API完成统计操作--------------------------------------------
     import spark.implicits._

     val cityAccessTopNDF = cleanDF.filter($"day" === day && $"cmsType" === "video")
       .groupBy("day","city","cmsId").agg(count("cmsId").as("times")).orderBy($"times".desc)

//    cityAccessTopNDF.printSchema()
    /**
      * root
      *    |-- day: string (nullable = true)
      *    |-- city: string (nullable = true)
      *    |-- cmsId: long (nullable = true)
      *    |-- times: long (nullable = false)
      */

//    cityAccessTopNDF.show(false)
    /**
      *  +--------+----+-----+-----+
      *  |day     |city|cmsId|times|
      *  +--------+----+-----+-----+
      *  |20170511|浙江省 |14540|22435|
      *  |20170511|北京市 |14540|22270|
      *  |20170511|安徽省 |14540|22149|
      *  |20170511|广东省 |14540|22115|
      *  |20170511|上海市 |14540|22058|
      *  |20170511|北京市 |4600 |11271|
      *  |20170511|安徽省 |14390|11229|
      *  |20170511|广东省 |14623|11226|
      *  |20170511|上海市 |14704|11219|
      *  |20170511|广东省 |14704|11216|
      *  |20170511|广东省 |4600 |11215|
      *  |20170511|上海市 |4000 |11182|
      *  |20170511|北京市 |14390|11175|
      *  |20170511|广东省 |4000 |11169|
      *  |20170511|上海市 |4500 |11167|
      *  |20170511|安徽省 |14704|11162|
      *  |20170511|北京市 |4000 |11156|
      *  |20170511|浙江省 |14322|11151|
      *  |20170511|上海市 |14623|11149|
      *  |20170511|广东省 |4500 |11136|
      *  +--------+----+-----+-----+
      */
    //-----------Window函数在Spark SQL中的使用--------------------
    val cityTop3DF = cityAccessTopNDF.select(
      cityAccessTopNDF("day"),
      cityAccessTopNDF("city"),
      cityAccessTopNDF("cmsId"),
      cityAccessTopNDF("times"),
      row_number().over(Window.partitionBy(cityAccessTopNDF("city"))
        .orderBy(cityAccessTopNDF("times").desc)).as("times_rank")
    ).filter("times_rank <= 3").orderBy($"city".desc,$"times_rank".asc)

    cityTop3DF.show(false)//展示每个地市的Top3
    /**
      *    +--------+----+-----+-----+----------+
      *    |day     |city|cmsId|times|times_rank|
      *    +--------+----+-----+-----+----------+
      *    |20170511|浙江省 |14540|22435|1         |
      *    |20170511|浙江省 |14322|11151|2         |
      *    |20170511|浙江省 |14390|11110|3         |
      *    |20170511|广东省 |14540|22115|1         |
      *    |20170511|广东省 |14623|11226|2         |
      *    |20170511|广东省 |14704|11216|3         |
      *    |20170511|安徽省 |14540|22149|1         |
      *    |20170511|安徽省 |14390|11229|2         |
      *    |20170511|安徽省 |14704|11162|3         |
      *    |20170511|北京市 |14540|22270|1         |
      *    |20170511|北京市 |4600 |11271|2         |
      *    |20170511|北京市 |14390|11175|3         |
      *    |20170511|上海市 |14540|22058|1         |
      *    |20170511|上海市 |14704|11219|2         |
      *    |20170511|上海市 |4000 |11182|3         |
      *    +--------+----+-----+-----+----------+
      */
    //-------------------将统计结果写入数据库-------------------
    try {
      cityTop3DF.foreachPartition(partitionOfRecords => {
        val list = new ListBuffer[DayCityVideoAccessStat]

        partitionOfRecords.foreach(info => {
          val day = info.getAs[String]("day")
          val cmsId = info.getAs[Long]("cmsId")
          val city = info.getAs[String]("city")
          val times = info.getAs[Long]("times")
          val timesRank = info.getAs[Int]("times_rank")

          list.append(DayCityVideoAccessStat(day, cmsId,city, times,timesRank))
        })
        StatDAO.insertDayCityVideoAccessTopN(list)
      })
    }catch {
      case e:Exception => e.printStackTrace()
    }
    /**
      *   create table day_video_city_access_topn_stat (
      *      day varchar(8) not null,
      *      cms_id bigint(10) not null,
      *      city varchar(20) not null,
      *      times bigint(10) not null,
      *      times_rank int not null,
      *      primary key (day, cms_id, city)
      *   );
      */
  }

  /**
    * 需求三：按照流量统计主站最受欢迎的Top N课程
    * @param spark
    * @param cleanDF
    */
  def videoTraffsTopStat(spark: SparkSession, cleanDF: DataFrame, day:String): Unit = {
    //------------------使用DataFrame API完成统计操作--------------------------------------------
    import spark.implicits._

    val trafficsTopNDF = cleanDF.filter($"day" === day && $"cmsType" === "video")
      .groupBy("day","cmsId").agg(sum("traffic").as("traffics")).orderBy($"traffics".desc)

     trafficsTopNDF.show()
    /**
      *  +--------+-----+--------+
      *  |     day|cmsId|traffics|
      *  +--------+-----+--------+
      *  |20170511|14540|55454898|
      *  |20170511|14390|27895139|
      *  |20170511| 4500|27877433|
      *  |20170511| 4000|27847261|
      *  |20170511|14623|27822312|
      *  |20170511| 4600|27777838|
      *  |20170511|14704|27737876|
      *  |20170511|14322|27592386|
      *  +--------+-----+--------+
      */
    //-------------------将统计结果写入数据库-------------------
    try {
      trafficsTopNDF.foreachPartition(partitionOfRecords => {
        val list = new ListBuffer[DayVideoTrafficsStat]

        partitionOfRecords.foreach(info => {
          val day = info.getAs[String]("day")
          val cmsId = info.getAs[Long]("cmsId")
          val traffics = info.getAs[Long]("traffics")

          list.append(DayVideoTrafficsStat(day, cmsId,traffics))
        })
        StatDAO.insertDayVideoTrafficsTopN(list)
      })
    }catch {
      case e:Exception => e.printStackTrace()
    }
    /**
      *  create table day_video_traffics_topn_stat (
      *      day varchar(8) not null,
      *      cms_id bigint(10) not null,
      *      traffics bigint(20) not null,
      *      primary key (day, cms_id)
      *   );
      */
  }

}