package com.niit.adverStat

import com.niit.commons.conf.ConfigurationManager
import com.niit.commons.constant.Constants
import com.niit.commons.utils.DateUtils
import org.apache.kafka.common.serialization.StringDeserializer
import org.apache.spark.SparkConf
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{Row, SparkSession}
import org.apache.spark.streaming.dstream.DStream
import org.apache.spark.streaming.kafka010.{ConsumerStrategies, KafkaUtils, LocationStrategies}
import org.apache.spark.streaming.{Duration, Minutes, Seconds, StreamingContext}
import spire.compat.fractional

import java.lang
import java.util.Date
import scala.collection.mutable.ArrayBuffer
import scala.math.Fractional.Implicits.infixFractionalOps

/**
 * @author Liweijian.
 * @Description
 * ①实时维护黑名单
 * ②各省各城市一天中的广告点击量（累积统计）
 * ③统计各省Top3热门广告
 * ④新增：根据点击量划分黑名单等级
 * @date 2025/07/11 14:52
 */

// 确保 AdBlacklist 定义在此处或已被正确导入，且 blacklistLevel 为 Int 类型
// case class AdBlacklist(userid: Long, blacklistLevel: Int) // 请确认这里的类型是 Int

object AdverStat {
  def main(args: Array[String]): Unit = {

    val sparkconf = new SparkConf().setAppName("adverStat").setMaster("local[*]")
    val sparkSession = SparkSession.builder().config(sparkconf).enableHiveSupport().getOrCreate()
    sparkSession.sparkContext.setLogLevel("WARN")

    val streamingContext = new StreamingContext(sparkSession.sparkContext,Seconds(5))

    val kafka_brokers="192.168.136.128:9092"
    val kafka_topics=ConfigurationManager.config.getString(Constants.KAFKA_TOPICS)

    val kafkaParam = Map(
      "bootstrap.servers" -> kafka_brokers,
      "key.deserializer" -> classOf[StringDeserializer],
      "value.deserializer" -> classOf[StringDeserializer],
      "group.id" -> "AdRealTimeLog",
      "auto.offset.reset" -> "latest",
      "enable.auto.commit" -> (false: lang.Boolean)
    )

    val adRealTimeDStream = KafkaUtils.createDirectStream(streamingContext,
      LocationStrategies.PreferConsistent,
      ConsumerStrategies.Subscribe[String, String](Array(kafka_topics), kafkaParam)
    )

    adRealTimeDStream.foreachRDD { rdd =>
      if (!rdd.isEmpty()) {
        println("--- 接收到来自Kafka的原始消息 ---")
        rdd.map(record => s"Key: ${record.key()}, Value: ${record.value()}")
          .take(5).foreach(println)
        println(s"本批次接收到的原始消息总数: ${rdd.count()}")
      }
    }

    val adRealTimeValueDStream: DStream[String] = adRealTimeDStream.map(item=>item.value())

    val filterBlackListRDD: DStream[String] = adRealTimeValueDStream.transform(
      logRDD => {
        // 从数据库获取当前黑名单用户及其等级
        val blacklistsWithLevel: Array[AdBlacklist] = AdBlacklistDAO.findAll()
        val blackListUserIDSet: Set[Long] = blacklistsWithLevel.map(item => item.userid).toSet // 使用Set提高查找效率
        println(s"--- 当前黑名单用户ID列表: ${blackListUserIDSet.mkString(", ")} ---")

        val filterUserIdLogRDD: RDD[String] = logRDD.filter {
          case log =>
            val parts = log.split(" ")
            if (parts.length >= 4) {
              val userid = parts(3).toLong
              val isBlacklisted = blackListUserIDSet.contains(userid)
              if (isBlacklisted) {
                // println(s"  正在过滤黑名单用户: $userid，日志: $log")
              }
              !isBlacklisted // 如果在黑名单中，则过滤掉
            } else {
              println(s"  跳过格式不正确的日志 (部分不足): $log")
              false
            }
        }
        println(s"--- 本批次过滤黑名单后剩余的日志数量: ${filterUserIdLogRDD.count()} ---")
        filterUserIdLogRDD
      }
    )
    streamingContext.checkpoint("./spark-streaming")
    filterBlackListRDD.checkpoint(Duration(10000))

    println("\n--- 启动黑名单生成流程 (包含等级划分) ---")
    generateBlackListWithLevels(filterBlackListRDD) // 调用新的函数

    println("\n--- 启动各省市广告点击量统计流程 ---")
    val ClickCountAggDStream: DStream[(String, Long)] = provinceCityClickStat(filterBlackListRDD)

    println("\n--- 启动各省Top3热门广告统计流程 ---")
    proveinceTope3Adver(sparkSession, ClickCountAggDStream)

    println("\n--- 启动最近一小时广告点击量统计流程 ---")
    getRecentHourClickCount(filterBlackListRDD)

    streamingContext.start()
    streamingContext.awaitTermination()
  }

  /**
   * 辅助函数：根据点击数判断黑名单等级 (返回 Int)
   * @param clickCount 用户点击数
   * @return 黑名单等级 (3=A, 2=B, 1=C, 0=不入黑名单)
   */
  def getBlacklistLevel(clickCount: Long): Int = {
    if (clickCount >= 100) 3 // A级
    else if (clickCount >= 50) 2 // B级
    else if (clickCount >= 10) 1 // C级
    else 0 // 低于最低阈值
  }

  /**
   * 需求一：实时维护黑名单 (带有等级划分)
   */
  def generateBlackListWithLevels(filterBlackListRDD: DStream[String]) = {
    //String: timestamp   province    city   userid    adid
    val LogAndOneDStream: DStream[(String, Long)] = filterBlackListRDD.map {
      item =>
        val splitLog = item.split(" ")
        val timestamp: Long = splitLog(0).toLong
        val date = DateUtils.formatDateKey(new Date(timestamp))
        val userId = splitLog(3).toLong
        val adid = splitLog(4).toLong
        val str = date + "_" + userId + "_" + adid
        (str, 1L)
    }

    LogAndOneDStream.foreachRDD { rdd =>
      if (!rdd.isEmpty()) {
        println("  LogAndOneDStream (日期_用户ID_广告ID, 1) 示例:")
        rdd.take(5).foreach(println)
      }
    }

    val ReducedLogData: DStream[(String, Long)] = LogAndOneDStream.reduceByKey(_+_)

    ReducedLogData.foreachRDD { rdd =>
      if (!rdd.isEmpty()) {
        println("  ReducedLogData (日期_用户ID_广告ID, 总点击数) 示例:")
        rdd.take(5).foreach(println)
      }
    }

    ReducedLogData.foreachRDD(
      rdd=>
        rdd.foreachPartition{
          itemIterator=>
            val arrayAdUser = new ArrayBuffer[AdUserClickCount]()
            itemIterator.foreach { elem =>
              val splitLogData = elem._1.split("_")
              val date = splitLogData(0)
              val userId=splitLogData(1).toLong
              val adid=splitLogData(2).toLong
              val count=elem._2.toLong
              arrayAdUser += AdUserClickCount(date, userId, adid, count)
            }
            if (arrayAdUser.nonEmpty) {
              println(s"  正在更新数据库中 ${arrayAdUser.size} 条 AdUserClickCount 记录。")
              AdUserClickCountDAO.updateBatch(arrayAdUser.toArray)
            }
        }
    )

    // 筛选出达到黑名单阈值的用户，并确定其等级
    val usersToBlacklist: DStream[AdBlacklist] = ReducedLogData.filter {
      item =>
        val splitLogData = item._1.split("_")
        val date = splitLogData(0)
        val userId = splitLogData(1).toLong
        val adid = splitLogData(2).toLong
        val currentCount = AdUserClickCountDAO.findClickCountByMultiKey(date, userId, adid)

        val level = getBlacklistLevel(currentCount)
        if (level > 0) { // 只有等级大于0才视为进入黑名单
          println(s"  用户 $userId, 广告 $adid 在 $date 的点击数: $currentCount. 达到黑名单级别: $level。")
          true
        } else {
          false
        }
    }.map { // 在这里重新计算 level，因为 filter 可能会是异步的，或者跨批次时点击数会累积
      item =>
        val splitLogData = item._1.split("_")
        val userId = splitLogData(1).toLong
        val currentCount = AdUserClickCountDAO.findClickCountByMultiKey(splitLogData(0), userId, splitLogData(2).toLong)
        val level = getBlacklistLevel(currentCount)
        AdBlacklist(userId, level)
    }

    // 打印即将被加入黑名单的用户及其等级
    usersToBlacklist.foreachRDD { rdd =>
      if (!rdd.isEmpty()) {
        println("  --- 潜在的黑名单用户 (用户ID, 等级) ---")
        rdd.take(5).foreach(println)
        println(s"  本批次潜在黑名单用户总数: ${rdd.count()}")
      } else {
        println("  本批次没有用户达到黑名单标准。")
      }
    }

    // 提交到数据库，由DAO处理插入或更新逻辑（升级等级）
    usersToBlacklist.foreachRDD { rdd =>
      rdd.foreachPartition { itemIterator =>
        val blacklists = new ArrayBuffer[AdBlacklist]()
        itemIterator.foreach { elem =>
          blacklists += elem
        }
        if (blacklists.nonEmpty) {
          println(s"  正在批量插入/更新 ${blacklists.size} 条黑名单记录 (含等级)。")
          AdBlacklistDAO.insertBatch(blacklists.toArray) // DAO 需要支持处理等级
          println("  AdBlacklist 记录已插入/更新。")
        } else {
          // println("  本分区没有黑名单需要插入。")
        }
      }
    }
  }


  /**
   *  需求二：各省各城市一天中的广告点击量（累积统计）
   */
  def provinceCityClickStat(filterBlackListRDD: DStream[String]) = {
    //String: timestamp   province    city   userid    adid
    val LogStringValueRDD: DStream[(String, Long)] = filterBlackListRDD.map {
      item =>
        val splitLog = item.split(" ")
        val timeLong = splitLog(0).toLong
        val date = DateUtils.formatDateKey(new Date(timeLong))
        val province = splitLog(1)
        val city = splitLog(2)
        val adId = splitLog(4).toLong
        val str = date + "_" + province + "_" + city + "_" + adId
        (str,1L)
    }

    // 打印 (日期_省份_城市_广告ID, 1L) 格式的数据
    LogStringValueRDD.foreachRDD { rdd =>
      if (!rdd.isEmpty()) {
        println("  LogStringValueRDD (日期_省份_城市_广告ID, 1) 示例:")
        rdd.take(5).foreach(println)
      }
    }

    //日期 + "_" + 省份 + "_" + 城市 + "_" + 广告ID, 点击数
    val key2StateDStream: DStream[(String, Long)] = LogStringValueRDD.updateStateByKey[Long] {
      (values: Seq[Long], state: Option[Long]) =>
        var newValue = state.getOrElse(0L) // 使用getOrElse简化
        for (elem <- values) {
          newValue += elem
        }
        Some(newValue)
    }

    // 打印累积统计后的各省各城市广告点击量
    key2StateDStream.foreachRDD{
      rdd =>
        if (!rdd.isEmpty()) {
          println("  key2StateDStream (日期_省份_城市_广告ID, 累计点击数) 示例:")
          rdd.take(5).foreach(println)
          println(s"  本批次唯一省份_城市_广告ID组合总数: ${rdd.count()}")
        } else {
          println("  本批次没有省份_城市_广告ID数据用于累计统计。")
        }

        rdd.foreachPartition{
          items =>
            val adStatArray = new ArrayBuffer[AdStat]()
            // key: 日期 省份 城市 广告ID
            items.foreach { case (key, count) =>
              val keySplit = key.split("_")
              val date = keySplit(0)
              val province = keySplit(1)
              val city = keySplit(2)
              val adid = keySplit(3).toLong
              adStatArray += AdStat(date, province, city, adid, count)
            }
            if (adStatArray.nonEmpty) {
              println(s"  正在更新数据库中 ${adStatArray.size} 条 AdStat 记录。")
              AdStatDAO.updateBatch(adStatArray.toArray)
              println("  AdStat 记录已更新。")
            } else {
              // println("  本分区没有 AdStat 记录需要更新。")
            }
        }
    }
    key2StateDStream
  }

  /**
   * 需求三：统计各省Top3热门广告
   */
  def proveinceTope3Adver(sparkSession: SparkSession, ClickCountAggDStream: DStream[(String, Long)]) ={
    val newKeyClickCountDStream: DStream[(String, Long)] = ClickCountAggDStream.map {
      item =>
        // item._1 是 "日期_省份_城市_广告ID", item._2 是点击数
        val SplitValue: Array[String] = item._1.split("_")
        val timeStamp = SplitValue(0)
        val province = SplitValue(1)
        val adId = SplitValue(3).toLong // 注意: 为省份Top3我们忽略城市
        val str = timeStamp + "_" + province + "_" + adId
        (str, item._2)
    }
    // 打印 (日期_省份_广告ID, 点击数) 数据
    newKeyClickCountDStream.foreachRDD { rdd =>
      if (!rdd.isEmpty()) {
        println("  newKeyClickCountDStream (日期_省份_广告ID, 点击数) 示例:")
        rdd.take(5).foreach(println)
      }
    }

    // 日期 省份 广告ID, 点击数
    // 再次聚合，因为ClickCountAggDStream是按(日期_省份_城市_广告ID)聚合的，现在我们需要按(日期_省份_广告ID)聚合
    val reducedDStream = newKeyClickCountDStream.reduceByKey(_+_)

    // 打印按省份和广告ID聚合后的点击量
    reducedDStream.foreachRDD { rdd =>
      if (!rdd.isEmpty()) {
        println("  reducedDStream (日期_省份_广告ID, 总点击数) 示例:")
        rdd.take(5).foreach(println)
        println(s"  本批次唯一省份_广告ID组合总数: ${rdd.count()}")
      } else {
        println("  本批次没有省份_广告ID数据用于Top3计算。")
      }
    }

    val Top3RDD: DStream[Row] = reducedDStream.transform {
      rdd =>
        if (rdd.isEmpty()) {
          println("  RDD为空，跳过Top3计算。")
          sparkSession.emptyDataFrame.rdd // 返回一个空的RDD
        } else {
          val LogRDD: RDD[(String, String, Long, Long)] = rdd.map {
            case (item, count) =>
              val keySplit = item.split("_")
              val date = keySplit(0)
              val province = keySplit(1)
              val adid = keySplit(2).toLong
              (date, province, adid, count)
          }

          import sparkSession.implicits._
          val df = LogRDD.toDF("date", "province", "adid", "count")
          df.createOrReplaceTempView("tmp_basic_info")

          val sql = "select date,province,adid,count " +
            "from (select date,province,adid,count,row_number() over(partition by date,province order by count desc) rank from tmp_basic_info ) t " +
            "where rank<=3"

          val top3DF = sparkSession.sql(sql)

          if (top3DF.isEmpty) {
            println("  Top3 SQL 查询返回空结果。")
          }
          top3DF.rdd
        }
    }
    Top3RDD.foreachRDD{
      RDD=>
        if (!RDD.isEmpty()) {
          println("  --- 最终将插入 AdProvinceTop3 表的Top3 RDD ---")
          RDD.collect().foreach(println)
        } else {
          println("  本批次没有Top3数据需要插入 AdProvinceTop3 表。")
        }
        RDD.foreachPartition{
          rows=>
            val adProvinceTops = new ArrayBuffer[AdProvinceTop3]()
            rows.foreach { row =>
              val date = row.getAs[String]("date")
              val province=row.getAs[String]("province")
              val adid=row.getAs[Long]("adid")
              val count=row.getAs[Long]("count")
              adProvinceTops+= AdProvinceTop3(date,province,adid,count)
            }
            if (adProvinceTops.nonEmpty) {
              println(s"  正在更新数据库中 ${adProvinceTops.size} 条 AdProvinceTop3 记录。")
              AdProvinceTop3DAO.updateBatch(adProvinceTops.toArray)
              println("  AdProvinceTop3 记录已更新。")
            } else {
              // println("  本分区没有 AdProvinceTop3 记录需要更新。")
            }
        }
    }
  }

  /**
   * 需求四：最近一个小时广告点击量统计
   */
  def getRecentHourClickCount(filterBlackListRDD: DStream[String]) = {
    //timestamp   province    city   userid    adid
    val newKeyAndLongDStream = filterBlackListRDD.map {
      item =>
        val splitLog = item.split(" ")
        val timeStampLong = splitLog(0).toLong
        //yyyyMMddHHmm
        val DateMinute = DateUtils.formatTimeMinute(new Date(timeStampLong))
        val adid = splitLog(4).toLong
        val str = DateMinute + "_" + adid
        (str, 1L)
    }

    // 打印 (YYYYMMDDHHMM_广告ID, 1L) 数据
    newKeyAndLongDStream.foreachRDD { rdd =>
      if (!rdd.isEmpty()) {
        println("  newKeyAndLongDStream (YYYYMMDDHHMM_广告ID, 1) 示例:")
        rdd.take(5).foreach(println)
      }
    }

    // 窗口函数，计算过去60分钟，每1分钟滑动一次
    val ReduceByWindow: DStream[(String, Long)] = newKeyAndLongDStream.reduceByKeyAndWindow(
      (a:Long, b:Long)=>a+b, // 累加新计数
      Minutes(60),           // 窗口时长 (回溯60分钟)
      Minutes(1)             // 滑动时长 (每1分钟计算一次)
    )

    // 打印窗口聚合后的数据
    ReduceByWindow.foreachRDD{
      RDD=>
        if (!RDD.isEmpty()) {
          println("  ReduceByWindow (YYYYMMDDHHMM_广告ID, 点击数) 示例:")
          RDD.take(5).foreach(println)
          println(s"  本批次唯一趋势数据总数: ${RDD.count()}")
        } else {
          println("  本批次没有趋势数据。")
        }

        RDD.foreachPartition{
          itemIterator=>
            val AdClickTdBuffer = new ArrayBuffer[AdClickTrend]()
            itemIterator.foreach { case (key,count) =>
              val splitKey = key.split("_")
              val date = splitKey(0).substring(0,8)
              val hour=splitKey(0).substring(8,10)
              val min=splitKey(0).substring(10,12)
              val adid=splitKey(1).toLong
              AdClickTdBuffer+=AdClickTrend(date,hour,min,adid,count)
            }
            if (AdClickTdBuffer.nonEmpty) {
              println(s"  正在更新数据库中 ${AdClickTdBuffer.size} 条 AdClickTrend 记录。")
              AdClickTrendDAO.updateBatch(AdClickTdBuffer.toArray)
              println("  AdClickTrend 记录已更新。")
            } else {
              // println("  本分区没有 AdClickTrend 记录需要更新。")
            }
        }
    }
  }

}