import java.util.Date

import commons.conf.ConfigurationManager
import commons.constant.Constants
import commons.utils.DateUtils
import org.apache.kafka.common.serialization.StringDeserializer
import org.apache.spark.SparkConf
import org.apache.spark.sql.SparkSession
import org.apache.spark.streaming.dstream.DStream
import org.apache.spark.streaming.kafka010.{ConsumerStrategies, KafkaUtils, LocationStrategies}
import org.apache.spark.streaming.{Duration, Minutes, Seconds, StreamingContext}

import scala.collection.mutable.ArrayBuffer
/**
  * 日志格式：
  * timestamp province city userid adid
  * 某个时间点 某个省份 某个城市 某个用户 某个广告
  */
object AdverStat {//消费MockRealTimeData生产的数据并过滤黑名单用户

  def main(args: Array[String]): Unit = {

    val sparkConf = new SparkConf().setMaster("local[*]").setAppName("adver")
    val sparkSession = SparkSession.builder().config(sparkConf).enableHiveSupport().getOrCreate()

    //创建sparkStreamingContext
    val ssc = new StreamingContext(sparkSession.sparkContext, Seconds(5))

    //指定Kafka，在common模块有配置文件
    val kafka_brokers = ConfigurationManager.config.getString(Constants.KAFKA_BROKERS)
    //指定broker
    val kafka_topics = ConfigurationManager.config.getString(Constants.KAFKA_TOPICS)
    //指定主题
    //kafka参数配置
    val kafkaParam = Map(
      "bootstrap.servers" -> kafka_brokers, //kafka服务器集群
      "key.deserializer" -> classOf[StringDeserializer], //key的反序列化
      "value.deserializer" -> classOf[StringDeserializer], //value的反序列化
      "group.id" -> "group1", //消费者组
      // auto.offset.reset
      // latest: 先去Zookeeper获取offset，如果有，直接使用，如果没有，从最新的数据开始消费；
      // earlist: 先去Zookeeper获取offset，如果有，直接使用，如果没有，从最开始的数据开始消费
      // none: 先去Zookeeper获取offset，如果有，直接使用，如果没有，直接报错
      "auto.offset.reset" -> "latest",
      "enable.auto.commit" -> (false: java.lang.Boolean)
    ) //kafka配置Map

    //创建广告实时流DStream。  DStream[RDD RDD RDD RDD ...]   RDD[message]    message:key,value
    val adRealTimeDStream = KafkaUtils.createDirectStream(
      ssc,
      LocationStrategies.PreferConsistent,
      ConsumerStrategies.Subscribe[String, String](Array(kafka_topics), kafkaParam)
    )

    //从adRealTimeDStream获取value。adRealTimeDStream相当于一个大管道，里面放的是一个个的RDD。对于这个map操作而言，直接穿透RDD操作里面的每一行数据进而获得value
    //  adRealTimeValueDStream:DStream[RDD RDD RDD...]    RDD[String]
    // String:timestamp 	  province 	  city        userid         adid
    val adRealTimeValueDStream = adRealTimeDStream.map(item => item.value())

    //每隔BatchDuration，DStream生成一个RDD,最后得到的是没有黑名单用户的DStream
    val adRealTimeFilterDStream=adRealTimeValueDStream.transform {//timestamp province city userid adid
      logRDD =>{//过滤掉黑名单用户id
        //获取黑名单用户集合
        val blacklistArray=AdBlacklistDAO.findAll()//调用findAll查找黑名单用户
        //获取对象中的黑名单用户的id集合
        val userIdArray=blacklistArray.map(_.userid)
        //过滤
        logRDD.filter{
          case log=>
            val logSplit=log.split(" ")
            val userId=logSplit(3).toLong
            !userIdArray.contains(userId)
        }
      }
    }//transform结尾
    //打印一下过滤后的广告实时DStream里每个rdd里的每个元素
//    adRealTimeFilterDStream.foreachRDD(rdd=>rdd.foreach(println))

    ssc.checkpoint("./spark-streaming")//因为要使用到updateStateByKey算子，所以要设置checkpoint
    //10秒，每两个rdd去checkpoint一次
    adRealTimeFilterDStream.checkpoint(Duration(10000))//传入的时间一定是要是创建sparkStreamingContext时指定的时间间隔的倍数
    /**
      * 需求一：实时维护黑名单
      * 从Kafka获取实时数据，对每个用户的点击次数进行累加并写入MySQL，
      * 当一天之内一个用户对一个广告的点击次数超过100次时，将用户加入黑名单中。
      */
      generateBlackList(adRealTimeFilterDStream)//位置在main函数外面

    //需求二：各省各城市一天中广告点击量（累积统计）
    val key2ProvinceCityCountDStream =provinceCityClickStat(adRealTimeFilterDStream)

    //需求三：统计各省Top3热门广告
    proveinceTope3Adver(sparkSession, key2ProvinceCityCountDStream)

    //需求四(十)最近一小时广告点击量实时统计，因为我们指定是5秒发送一个rdd，所以需要用到窗口函数
    //通过窗口操作实现统计一个小时内每个广告每分钟的点击量，窗口步长为一分钟
    getRecentHourClickCount(adRealTimeFilterDStream)

    ssc.start()
    ssc.awaitTermination()
  } //main

  def generateBlackList(adRealTimeFilterDStream: DStream[String]) = {
    //传进来的参数：adRealTimeFilterDStream：DStream[RDD[String]]
    //String：timestamp province city userid adid
    //思路：拿到ds里的rdd数据去更新mysql的“用户点击次数表”，拿到（K,V）
    // K：timestamp province city userid======V：1L，这就可以进行键的聚合获得总次数，然后更新mysql表
    //然后再从表里读出数据若点击次数大于100，就把对应的用户加入黑名单表
    val key2NumDStream = adRealTimeFilterDStream.map{
      //  log : timestamp province city userid adid
      case log =>
        val logSplit = log.split(" ")
        val timeStamp = logSplit(0).toLong
        // yy-mm-dd
        val dateKey = DateUtils.formatDateKey(new Date(timeStamp))
        val userId = logSplit(3).toLong
        val adid = logSplit(4).toLong
        val key = dateKey + "_" + userId + "_" + adid
        (key, 1L)
    }//返回DStream[(String,1L)]  String: timestamp province city userid adid

    val key2CountDStream=key2NumDStream.reduceByKey(_+_)//键对应的点击总数

    // 根据每一个RDD里面的数据，更新用户点击次数表
    key2CountDStream.foreachRDD{
      rdd => rdd.foreachPartition{
        items =>
          val clickCountArray = new ArrayBuffer[AdUserClickCount]()
          for((key, count) <- items){//key:timestamp province city userid adid value:点击总数
            val keySplit = key.split("_")
            val date = keySplit(0)
            val userId = keySplit(1).toLong
            val adid = keySplit(2).toLong

            clickCountArray += AdUserClickCount(date, userId, adid, count)//追加进ArrayBuffer
          }
          AdUserClickCountDAO.updateBatch(clickCountArray.toArray)//调用批量更新方法更新MySQL
      }
    }//根据每一个rdd里面的数据更新用户点击次数表

    // key2BlackListDStream: DStream[RDD[(key, count)]]
    val key2BlackListDStream = key2CountDStream.filter{//得到
      case (key, count) =>
        val keySplit = key.split("_")
        val date = keySplit(0)
        val userId = keySplit(1).toLong
        val adid = keySplit(2).toLong

        val clickCount = AdUserClickCountDAO.findClickCountByMultiKey(date, userId, adid)//点击总数

        if(clickCount > 100){
          true
        }else{
          false
        }
    }//得到点击次数大于100的DStream

    // key2BlackListDStream.map: DStream[RDD[userId]] 插入到MySQL的黑名单表中
    val userIdDStream = key2BlackListDStream.map{
      case (key, count) => key.split("_")(1).toLong
    }.transform(rdd => rdd.distinct())//因为组合key的userId可能存在大量相同，去重一下

    userIdDStream.foreachRDD{
      rdd => rdd.foreachPartition{
        items =>
          val userIdArray = new ArrayBuffer[AdBlacklist]()//存储样例类
          for(userId <- items){
            userIdArray += AdBlacklist(userId)//追加样例类是实例
          }
          AdBlacklistDAO.insertBatch(userIdArray.toArray)//写入mysql黑名单表
      }
    }
  }//generateBlackList方法结尾

  def provinceCityClickStat(adRealTimeFilterDStream: DStream[String]) = {
    //传进来的参数：adRealTimeFilterDStream：DStream[RDD[String]]
    //String：timestamp province city userid adid
    val key2ProvinceCityDStream = adRealTimeFilterDStream.map{
      case log =>
        val logSplit = log.split(" ")
        val timeStamp = logSplit(0).toLong
        // dateKey : yy-mm-dd
        val dateKey = DateUtils.formatDateKey(new Date(timeStamp))
        val province = logSplit(1)
        val city = logSplit(2)
        val adid = logSplit(4)

        val key = dateKey + "_" + province + "_" + city + "_" + adid
        (key, 1L)
    }//key2ProvinceCityDStream  DStream[RDD[String，1L]]

    //某一天一个省的一个城市中某一个广告的点击次数
    val key2StateDStream =key2ProvinceCityDStream.updateStateByKey[Long]{//DStream[(K, S)]：S是Long
      //key对应的所有value，就是1，1，1，1，1，1，1
      //state对应是上一个checkpoint维护的状态
      (values:Seq[Long], state:Option[Long]) =>
        var newValue = 0L
        if(state.isDefined)//因为第一次是没有状态的，所以要判断
          newValue = state.get
        for(value <- values){//遍历累加每个1
          newValue += value
        }
        Some(newValue)
    }//key2StateDStream

    key2StateDStream.foreachRDD{//写进MySQL
      rdd => rdd.foreachPartition{
        items =>
          val adStatArray = new ArrayBuffer[AdStat]()
          // key: date province city adid
          for((key, count) <- items){
            val keySplit = key.split("_")
            val date = keySplit(0)
            val province = keySplit(1)
            val city = keySplit(2)
            val adid = keySplit(3).toLong

            adStatArray += AdStat(date, province, city, adid, count)
          }
          AdStatDAO.updateBatch(adStatArray.toArray)
      }
    }//key2StateDStream.foreachRDD
    key2StateDStream//返回给别的函数调用
  }//provinceCityClickStat方法

  def proveinceTope3Adver(sparkSession: SparkSession, key2ProvinceCityCountDStream: DStream[(String, Long)]) = {
    //key2ProvinceCityCountDStream[DStream:RDD[(key,count)]]
    //key:date_province_city_adid
    // key2ProvinceCountDStream: [RDD[(newKey, count)]]
    // newKey: date_province_adid
    val key2ProvinceCountDStream =key2ProvinceCityCountDStream.map{
      case (key, count) =>
        val keySplit = key.split("_")
        val date = keySplit(0)
        val province = keySplit(1)
        val adid = keySplit(3)

        val newKey = date + "_" + province + "_" + adid
        (newKey, count)
    }
    //每个省的总点击次数（因为把粒度提高了，对应省的记录数会有重复，所以重新再聚合）
    val key2ProvinceAggrCountDStream = key2ProvinceCountDStream.reduceByKey(_+_)

    val top3DStream = key2ProvinceAggrCountDStream.transform{
      rdd =>
        // rdd:RDD[(key, count)]
        // key: date_province_adid
        val basicDateRDD = rdd.map{
          case (key, count) =>
            val keySplit = key.split("_")
            val date = keySplit(0)
            val province = keySplit(1)
            val adid = keySplit(2).toLong

            (date, province, adid, count)
        }

        import sparkSession.implicits._
        basicDateRDD.toDF("date", "province", "adid", "count").createOrReplaceTempView("tmp_basic_info")

        val sql = "select date, province, adid, count from(" +
          "select date, province, adid, count, " +
          "row_number() over(partition by date,province order by count desc) rank from tmp_basic_info) t " +
          "where rank <= 3"
//select date, province, adid, count,row_number() over(partition by date,province order by count desc) rank from tmp_basic_info
//以同一天一个省份作为分组的adId聚在一起降序排序
        sparkSession.sql(sql).rdd
    }//tranform得到rdd

    top3DStream.foreachRDD{//写进mysql的top3表
      // rdd : RDD[row]
      rdd =>
        rdd.foreachPartition{
          // items : row
          items =>
            val top3Array = new ArrayBuffer[AdProvinceTop3]()
            for(item <- items){
              val date = item.getAs[String]("date")
              val province = item.getAs[String]("province")
              val adid = item.getAs[Long]("adid")
              val count = item.getAs[Long]("count")

              top3Array += AdProvinceTop3(date, province, adid, count)
            }
            AdProvinceTop3DAO.updateBatch(top3Array.toArray)
        }
    }//写进mysql的top3表
  }//统计各省Top3热门广告的方法

  def getRecentHourClickCount(adRealTimeFilterDStream: DStream[String]) = {
    //String:timeStamp province city userId adid
    val key2TimeMinuteDStream = adRealTimeFilterDStream.map{
      case log=>
        val logSplit=log.split(" ")
        val timeStamp = logSplit(0).toLong
        // yyyyMMddHHmm
        val timeMinute = DateUtils.formatTimeMinute(new Date(timeStamp))
        val adid = logSplit(4).toLong

        val key = timeMinute + "_" + adid

        (key, 1L)
    }
//窗口大小60分钟，步长大小1分钟
    val key2WindowDStream = key2TimeMinuteDStream.reduceByKeyAndWindow((a:Long, b:Long)=>(a+b), Minutes(60), Minutes(1))
//写进mysql
    key2WindowDStream.foreachRDD {
      rdd =>
        rdd.foreachPartition {
          // (key, count)
          items =>
            val trendArray = new ArrayBuffer[AdClickTrend]()
            for ((key, count) <- items) {
              val keySplit = key.split("_")
              // yyyyMMddHHmm
              val timeMinute = keySplit(0)
              val date = timeMinute.substring(0, 8)
              val hour = timeMinute.substring(8, 10)
              val minute = timeMinute.substring(10)
              val adid = keySplit(1).toLong

              trendArray += AdClickTrend(date, hour, minute, adid, count)
            }
            AdClickTrendDAO.updateBatch(trendArray.toArray)
        }
    }
  }//getRecentHourClickCount方法

}//object
