import java.text.DateFormat
import java.{io, lang}
import java.util.Date

import commons.conf.ConfigurationManager
import commons.constant.Constants
import commons.utils.DateUtils
import org.apache.kafka.clients.consumer.ConsumerRecord
import org.apache.kafka.common.serialization.StringDeserializer
import org.apache.spark.SparkConf
import org.apache.spark.sql.{Row, SparkSession}
import org.apache.spark.streaming.dstream.{DStream, InputDStream}
import org.apache.spark.streaming.kafka010.{ConsumerStrategies, KafkaUtils, LocationStrategies}
import org.apache.spark.streaming.{Duration, Minutes, Seconds, StreamingContext}

import scala.collection.mutable
import scala.collection.mutable.ArrayBuffer

/**
 * 集群启动步骤：
 * 1、启动zk
 *  zkServer.sh start   启动
 *  zkServer.sh stop    停止
 *  zkServer.sh status 查看状态
 * 2、启动kafka
 * nohup kafka-server-start.sh /opt/module/kafka_2.11-1.1.0/config/server.properties 1>/opt/module/kafka_2.11-1.1.0/logs/kafka_std.log 2>/opt/module/kafka_2.11-1.1.0/logs/kafka_err.log &
 * 3、创建topic
 * kafka-topics.sh \
 * --create \
 * --zookeeper hadoop101:2181,hadoop102:2181,hadoop103:2181 \
 * --replication-factor 3 \
 * --partitions 10 \
 * --topic AdRealTimeLog1
 * 3、开启消费者模拟消费数据：
 *
 * kafka-console-consumer.sh \
 * --zookeeper hadoop101:2181,hadoop102:2181,hadoop103:2181 \
 * --from-beginning \
 * --topic AdRealTimeLog1
 * 4、运行MockRealTimeData 生成实时模拟数据
 *
 */
object AdverStat {




  def main(args: Array[String]): Unit = {

    ////1.创建SparkConf并初始化SSC
    val sparkConf = new SparkConf().setAppName("adver").setMaster("local[*]")
    val sparkSession = SparkSession.builder().config(sparkConf).enableHiveSupport().getOrCreate()
    // val streamingContext = StreamingContext.getActiveOrCreate(checkpointDir, func)
    val streamingContext = new StreamingContext(sparkSession.sparkContext, Seconds(5))

    //2.定义kafka参数
    val kafka_brokers = ConfigurationManager.config.getString(Constants.KAFKA_BROKERS)
    val kafka_topics = ConfigurationManager.config.getString(Constants.KAFKA_TOPICS)

    //3.将kafka参数映射为map
    val kafkaParam = Map(
      "bootstrap.servers" -> kafka_brokers,
      "key.deserializer" -> classOf[StringDeserializer],
      "value.deserializer" -> classOf[StringDeserializer],
      "group.id" -> "group1", // 消费者组
      // auto.offset.reset
      // latest: 先去Zookeeper获取offset，如果有，直接使用，如果没有，从最新的数据开始消费；
      // earlist: 先去Zookeeper获取offset，如果有，直接使用，如果没有，从最开始的数据开始消费
      // none: 先去Zookeeper获取offset，如果有，直接使用，如果没有，直接报错
      "auto.offset.reset" -> "latest",
      "enable.auto.commit" -> (false: java.lang.Boolean)
    )

    //4.通过KafkaUtil创建kafkaDSteam
    // adRealTimeDStream: DStream[RDD RDD RDD ...]  RDD[message]  message: (key value)=>(null,value)
    val adRealTimeDStream = KafkaUtils.createDirectStream[String, String](streamingContext,
      LocationStrategies.PreferConsistent,
      ConsumerStrategies.Subscribe[String, String](Array(kafka_topics), kafkaParam)
    )

    // 取出了DSream里面每一条数据的value值
    // adReadTimeValueDStream: Dstram[RDD  RDD  RDD ...]   RDD[String]
    // String:  timestamp province city userid adid
    val adReadTimeValueDStream = adRealTimeDStream.map(item => item.value())


    // 以黑命单表的userid为过滤条件筛选出 不包含在黑名单包中的数据
    val adRealTimeFilterDStream = adReadTimeValueDStream.transform {
      logRDD =>
        // blackListArray: Array[AdBlacklist]     AdBlacklist: userId
        // 查询mysql所有广告黑名单用户,返回array[广告黑名单对象]
        val blackListArray: Array[AdBlacklist] = AdBlacklistDAO.findAll()

        // userIdArray: Array[Long]  [userId1, userId2, ...]
        // 将对象提取userid数据 ，返回元素为userid的数组
        val userIdArray: Array[Long] = blackListArray.map(item => item.userid)

        // 过滤出 黑名单列表中没有的userid 的logRDD
        logRDD.filter {
          // log : timestamp province city userid adid
          case log =>
            val logSplit = log.split(" ")
            val userId = logSplit(3).toLong
            !userIdArray.contains(userId)
        }
    }

    // 使用updateStateByKey算子需要设置checkpoint 实现全局累计
    streamingContext.checkpoint("./spark-streaming")
    // 对adRealTimeFilterDStream数据进行checkpoint，设置checkpoint的时间间隔
    adRealTimeFilterDStream.checkpoint(Duration(10000))


    //TODO: 需求七：广告点击黑名单实时统计
    generateBlackList(adRealTimeFilterDStream)

    // TODO:需求八：各省各城市一天中的广告点击量（累积统计）
//    val key2ProvinceCityCountDStream = provinceCityClickStat(adRealTimeFilterDStream)

    // TODO:需求九：每天每个省份Top3热门广告
//    proveinceTope3Adver(sparkSession, key2ProvinceCityCountDStream)

    // TODO:需求十：最近一个小时广告点击量统计
//    getRecentHourClickCount(adRealTimeFilterDStream)



    streamingContext.start()
    streamingContext.awaitTermination()
  }

  /**
   * TODO:需求四：最近一个小时广告点击量统计
   * 通过Spark Streaming的窗口操作(reduceByKeyAndWindow)实现统计一个小时内每个广告每分钟的点击量。
   *
   * @param adRealTimeFilterDStream
   * @return
   */
  def getRecentHourClickCount(adRealTimeFilterDStream: DStream[String]) = {

    val key2TimeMinuteDStream: DStream[(String, Long)] = adRealTimeFilterDStream.map {
      // log: timestamp province city userId adid
      case log =>
        val logSplit = log.split(" ")
        val timeStamp = logSplit(0).toLong
        // yyyyMMddHHmm
        val timeMinute = DateUtils.formatTimeMinute(new Date(timeStamp))
        val adid = logSplit(4).toLong

        val key = timeMinute + "_" + adid

        (key, 1L)
    }
    /*
    * 通过Spark Streaming的窗口操作(reduceByKeyAndWindow)实现统计一个小时内每个广告每分钟的点击量。
    * a:第一个参数的值
    * b:Long：第二个参数的值
    * Minutes(60) 窗口时长
    * Minutes(1) 步长
    * */
    val key2WindowDStream: DStream[(String, Long)] = key2TimeMinuteDStream.reduceByKeyAndWindow((a: Long, b: Long) => a + b, Minutes(60), Minutes(1))

    key2WindowDStream.foreachRDD{
      rdd=>rdd.foreachPartition{
        items=>
          val trendArray = new ArrayBuffer[AdClickTrend]()
          for((key, count) <- items){
            val keySplit = key.split("_")
            // yyyyMMddHHmm
            val timeMinute = keySplit(0)
            val date = timeMinute.substring(0, 8)
            val hour = timeMinute.substring(8,10)
            val minute = timeMinute.substring(10)
            val adid  = keySplit(1).toLong
            //AdClickTrend(date:String,hour:String,minute:String,adid:Long,clickCount:Long)
            trendArray += AdClickTrend(date, hour, minute, adid, count)
          }
          AdClickTrendDAO.updateBatch(trendArray.toArray)


      }
    }
  }

  /**
   * TODO:需求九：每天每个省份Top3热门广告
   * @param sparkSession
   * @param key2ProvinceCityCountDStream
   * @return
   */
  def proveinceTope3Adver(sparkSession: SparkSession,
                          key2ProvinceCityCountDStream: DStream[(String, Long)]) = {
    // key2ProvinceCityCountDStream: [RDD[(key, count)]]
    // key: date_province_city_adid
    // key2ProvinceCountDStream: [RDD[(newKey, count)]]
    // newKey: date_province_adid
    val key2ProvinceCountDStream: DStream[(String, Long)] = key2ProvinceCityCountDStream.map {
      case (key, count) =>
        val keySplit = key.split("_")
        val date = keySplit(0)
        val province = keySplit(1)
        val adid = keySplit(3)

        val newKey = date + "_" + province + "_" + adid
        (newKey, count)
    }

    val key2ProvinceAggrCountDStream: DStream[(String, Long)] = key2ProvinceCountDStream.reduceByKey(_ + _)

    // top3DStream(date, province, adid, count)
    val top3DStream: DStream[Row] = key2ProvinceAggrCountDStream.transform {
      rdd =>
        val basicDateRDD = rdd.map {
          case (key, count) =>
            val keySplit = key.split("_")
            val date = keySplit(0)
            val province = keySplit(1)
            val adid = keySplit(2).toLong

            (date, province, adid, count)
        }
        import sparkSession.implicits._
        basicDateRDD.toDF("date", "province", "adid", "count").createOrReplaceTempView("tmp_basic_info")

         /*
         * row_number() over(partition by date,province order by count desc) rank
         * 解析： 按date,province 分组，按count排序 ，row_number()返回行号，取别名 rank
         * ROW_NUMBER（）返回结果集分区内行的序列号，每个分区的第一行从 1 开始。
         * rank 别名
         * */
        val sql = "select date, province, adid, count from(" +
          "select date, province, adid, count, " +
          "row_number() over(partition by date,province order by count desc) rank from tmp_basic_info) t " +
          "where rank <= 3"

        sparkSession.sql(sql).rdd
    }

    top3DStream.foreachRDD{
      // rdd : RDD[row]
      rdd =>
        rdd.foreachPartition{
          // items : row
          items =>
            val top3Array = new ArrayBuffer[AdProvinceTop3]()
            for(item <- items){
              val date = item.getAs[String]("date")
              val province = item.getAs[String]("province")
              val adid = item.getAs[Long]("adid")
              val count = item.getAs[Long]("count")

              top3Array += AdProvinceTop3(date, province, adid, count)
            }
            AdProvinceTop3DAO.updateBatch(top3Array.toArray)
        }
    }


  }
  /**
   * TODO:需求二：各省各城市一天中的广告点击量（累积统计）
   *
   * @param adRealTimeFilterDStream
   */
  def provinceCityClickStat(adRealTimeFilterDStream: DStream[String]) = {
    // 数据源：adRealTimeFilterDStream: DStream[RDD[String]]包含的是RDD    String -> log : timestamp province city userid adid
    // 返回数据：key2ProvinceCityDStream: [RDD[(key, 1L)]]
    val key2ProvinceCityDStream: DStream[(String, Long)] = adRealTimeFilterDStream.map {
      case log =>
        val logSplit: Array[String] = log.split(" ")
        val timestamp = logSplit(0).toLong
        val dateKey: String = DateUtils.formatDateKey(new Date(timestamp))
        val province = logSplit(1).toLong
        val city = logSplit(2).toLong
        val adid = logSplit(4).toLong

        val key = dateKey + "_" + province + "_" + city + "_" + adid
        (key, 1L)
    }

    /*updateStateByKey 就是维护一个stat，指定类型为Long，
    * 第一次执行会将数据checkpoint，第二个rdd过来的时候，会将之前的算子checkpoint反序列化回来
    *
    * key2StateDStream 是全局的累加结果
    * */
    val key2StateDStream: DStream[(String, Long)] = key2ProvinceCityDStream.updateStateByKey[Long] {
      //  values:Seq[Long] :就是key对应的所有的value以序列的形式一次性的传递进来
      // state:Option[Long]:state就是key对应的之前checkpoint出去的累加值
      (values: Seq[Long], state: Option[Long]) =>
        var newValue = 0L
        if (state.isDefined) // 判断stat中有没有数据，有值返回true，没值返回false
        newValue = state.get //有值，九江之前的stat值取出赋值给newValue
        for (value <- values) { //遍历values的值，更newValue相加
          newValue += value
        }
        Some(newValue)
    }

    key2StateDStream.foreachRDD {
      rdd =>
        rdd.foreachPartition {
          items =>
            val adStatArray = new ArrayBuffer[AdStat]()
            // key: date province city adid
            for ((key, count) <- items) {
              val keySplit = key.split("_")
              val date = keySplit(0)
              val province = keySplit(1)
              val city = keySplit(2)
              val adid = keySplit(3).toLong

              adStatArray += AdStat(date, province, city, adid, count)
            }
            AdStatDAO.updateBatch(adStatArray.toArray)
        }
    }
    key2StateDStream
  }


  /**
   * TODO: 需求七：广告点击黑名单实时统计
   *
   * @param adRealTimeFilterDStream 黑名单表中包含的userid的数据
   * @return
   */
  def generateBlackList(adRealTimeFilterDStream: DStream[String]) = {
    // 数据源：adRealTimeFilterDStream: DStream[RDD[String]]    String -> log : timestamp province city userid adid
    // 返回数据：key2NumDStream: [RDD[(key, 1L)]]
    val key2NumDStream: DStream[(String, Long)] = adRealTimeFilterDStream.map {
      case log =>
        val timeStamp: Long = log.split(" ")(0).toLong
        val date: String = DateUtils.formatDateKey(new Date(timeStamp))
        val userid: String = log.split(" ")(3)
        val adid: String = log.split(" ")(4)

        val key = date + "_" + userid + "_" + adid
        (key, 1L)
    }
    // 聚合数得到 以key为值的聚合数据
    val key2CountDStream: DStream[(String, Long)] = key2NumDStream.reduceByKey(_ + _)

    //用户广告点击量DAO实现类，更新点击表 ad_user_click_count表的数据
    key2CountDStream.foreachRDD(
      rdd => rdd.foreachPartition {
        items =>
          //  AdUserClickCount对象的数组用于写入数据 用户广告点击量表
          var clickCountArray: ArrayBuffer[AdUserClickCount] = ArrayBuffer[AdUserClickCount]()
          for ((key, count) <- items) {
            val keyArray: Array[String] = key.split("_")
            val date: String = keyArray(0)
            val userid: Long = keyArray(1).toLong
            val adid: Long = keyArray(2).toLong
            clickCountArray += AdUserClickCount(date, userid, adid, count)
          }
          //用户广告点击量DAO实现类，更新ad_user_click_count表的数据
          AdUserClickCountDAO.updateBatch(clickCountArray.toArray)

      })

    // 过滤出点击某条广告数大于100的key
    val key2BlackListDStream: DStream[(String, Long)] = key2CountDStream.filter {
      case (key, count) =>
        val keySplit = key.split("_")
        val date: String = keySplit(0)
        val userId: Long = keySplit(1).toLong
        val adid: Long = keySplit(2).toLong
        val clickCount: Int = AdUserClickCountDAO.findClickCountByMultiKey(date, userId, adid)
        if (clickCount > 100) {
          true
        } else {
          false
        }
    }

    // 准备写入黑名单的用户数据
    // userIdDStream: DStream[UserId]
    val userIdDStream: DStream[Long] = key2BlackListDStream.map {
      case (key, count) => key.split("_")(1).toLong
    }.transform(rdd => rdd.distinct()) // rdd 去重


    // 将最后的userid数据写入数据库
    userIdDStream.foreachRDD {
      rdd =>
        rdd.foreachPartition {
          items =>
            val userIdArray: ArrayBuffer[AdBlacklist] = new ArrayBuffer[AdBlacklist]()

            for (userid <- items) {
              userIdArray += AdBlacklist(userid)
            }

            AdBlacklistDAO.insertBatch(userIdArray.toArray)
        }
    }

  }

}
