import java.util.Date

import com.atguigu.commons.conf.ConfigurationManager
import com.atguigu.commons.constant.Constants
import com.atguigu.commons.utils.DateUtils
import org.apache.kafka.common.serialization.StringDeserializer
import org.apache.spark.SparkConf
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.SparkSession
import org.apache.spark.streaming.dstream.DStream
import org.apache.spark.streaming.kafka010.{ConsumerStrategies, KafkaUtils, LocationStrategies}
import org.apache.spark.streaming.{Duration, Minutes, Seconds, StreamingContext}

import scala.collection.mutable.ArrayBuffer


object AdverStat {

	def main(args: Array[String]): Unit = {

		val sparkConf = new SparkConf().setAppName("adver").setMaster("local[*]")
		val sparkSession = SparkSession.builder().config(sparkConf).enableHiveSupport().getOrCreate()

		val streamingContext = new StreamingContext(sparkSession.sparkContext, Seconds(5))

		val kafka_brokers = ConfigurationManager.config.getString(Constants.KAFKA_BROKERS)
		val kafka_topics = ConfigurationManager.config.getString(Constants.KAFKA_TOPICS)

		val kafkaParam = Map(
			"bootstrap.servers" -> kafka_brokers,
			"key.deserializer" -> classOf[StringDeserializer],
			"value.deserializer" -> classOf[StringDeserializer],
			"group.id" -> "group1",
			// auto.offset.reset
			// latest : 先去zk中获取offset,如果有,直接使用,如果没有.从最新的数据开始消费
			// earlist: 先去zk中获取offset,如果有,直接获取,如果没有,从最开始的数据开始消费
			// none: 先去zk中获取offset,如果有,直接使用,如果没有,直接报错
			"auto.offset.reset" -> "latest",
			"enable.auto.commit" -> (false: java.lang.Boolean)
		)

		val adRealTimeDStream = KafkaUtils.createDirectStream(streamingContext,
			LocationStrategies.PreferConsistent,
			ConsumerStrategies.Subscribe[String, String](Array(kafka_topics), kafkaParam))


		// 取出了DStream里面每一条数据的value
		// adReadTimeValueDStream : DStream[RDD RDD RDD...]   RDD[String]
		// String: timestamp   province    city   userid    adid
		val adReadTimeValueDStream: DStream[String] = adRealTimeDStream.map(item => item.value())

		val adRealTimeFilterDStream: DStream[String] = adReadTimeValueDStream.transform {
			logRDD =>

				val blacklistArray: Array[AdBlacklist] = AdBlacklistDAO.findAll()
				val userIdArray = blacklistArray.map(item => item.userid)

				logRDD.filter {
					case log =>
						val logSplit = log.split(" ")
						val userId = logSplit(3).toLong
						!userIdArray.contains(userId)
				}
		}

		streamingContext.checkpoint("./spark-streaming")

		// 对某一个ds 进行checkpoint的时候 要设置时间间隔, 与sc 的时间间隔的倍数
		adRealTimeFilterDStream.checkpoint(Duration(10000))


		// 需求一: 实时维护黑名单
		generateBlackList(adRealTimeFilterDStream)

		// 需求二: 个省市一天广告点击实时统计
		val key2ProvinceCityCountDStream: DStream[(String, Long)] = provinceCityClickStat(adRealTimeFilterDStream)

		// 需求三: 各省top3热门广告
		provinceTop3Adver(sparkSession, key2ProvinceCityCountDStream)

		// 需求四: 近一小时广告点击量统计
		getRecentHourClickCount(adRealTimeFilterDStream)

		streamingContext.start()
		streamingContext.awaitTermination()

	}


	def getRecentHourClickCount(adRealTimeFilterDStream: DStream[String]) = {
		val key2TimeMinuteDStream = adRealTimeFilterDStream.map {
			case log =>
				val logSplit: Array[String] = log.split(" ")
				val timeStamp = logSplit(0).toLong
				// yy-mm-dd -- hh --mm
				val timeMinute = DateUtils.formatTimeMinute(new Date(timeStamp))
				val userId = logSplit(3).toLong
				val adid = logSplit(4).toLong

				val key = timeMinute + "_" + adid

				(key, 1L)
		}

		val key2WindowDStream = key2TimeMinuteDStream.reduceByKeyAndWindow((a: Long, b: Long) => (a + b), Minutes(60),
			Minutes(1))

		key2WindowDStream.foreachRDD {
			rdd =>
				rdd.foreachPartition {
					items =>
						val trendArray = new ArrayBuffer[AdClickTrend]()
						for ((key, count) <- items) {
							val keySplit = key.split("_")
							// yy -- mm -- dd -- HH -- mm
							val timeMinute = keySplit(0)
							val date = timeMinute.substring(0, 8)
							val hour = timeMinute.substring(8, 10)
							val minute = timeMinute.substring(10)
							val adid = keySplit(1).toLong

							trendArray += AdClickTrend(date, hour, minute, adid, count)

						}

						AdClickTrendDAO.updateBatch(trendArray.toArray)
				}
		}
	}


	def provinceTop3Adver(sparkSession: SparkSession,
	                      key2ProvinceCityCountDStream: DStream[(String, Long)]) = {
		val key2ProvinceCountDStream: DStream[(String, Long)] = key2ProvinceCityCountDStream.map {
			case (key, count) =>
				val keySplit = key.split("_")
				val date = keySplit(0)
				val province = keySplit(1)
				val adid = keySplit(3)

				val newKey = date + "_" + province + "_" + adid
				(newKey, count)
		}

		val key2ProvinceAggrCountDStream: DStream[(String, Long)] = key2ProvinceCountDStream.reduceByKey(_ + _)

		val top3DStream = key2ProvinceAggrCountDStream.transform {
			rdd =>
				// rdd: RDD[key .count]
				val basicDateRDD: RDD[(String, String, Long, Long)] = rdd.map {
					case (key, count) =>
						val keySplit = key.split("_")
						val date = keySplit(0)
						val province = keySplit(1)
						val adid = keySplit(2).toLong

						(date, province, adid, count)
				}

				import sparkSession.implicits._
				basicDateRDD.toDF("date", "province", "adid", "count").createOrReplaceTempView("tmp_basic_info")

				var sql = "SELECT " +
						"date," +
						"province," +
						"adid," +
						"count " +
						"FROM ( " +
						"SELECT " +
						"date," +
						"province," +
						"adid," +
						"count," +
						"ROW_NUMBER() OVER(PARTITION BY province ORDER BY count DESC) rank " +
						"FROM tmp_basic_info " +
						") t " +
						"WHERE rank <=3"

				sparkSession.sql(sql).rdd

		}

		top3DStream.foreachRDD {
			rdd =>
				rdd.foreachPartition {
					// row 类型
					items =>
						val top3Array = new ArrayBuffer[AdProvinceTop3]()
						for (item <- items) {
							val date = item.getAs[String]("date")
							var province = item.getAs[String]("province")
							var adid = item.getAs[Long]("adid")
							var count = item.getAs[Long]("count")

							top3Array += AdProvinceTop3(date, province, adid, count)

						}

						AdProvinceTop3DAO.updateBatch(top3Array.toArray)
				}
		}
	}


	def provinceCityClickStat(adRealTimeFilterDStream: DStream[String]) = {

		val key2ProvinceCityDStream: DStream[(String, Long)] = adRealTimeFilterDStream.map {
			case log =>
				val logSplit = log.split(" ")
				val timeStamp = logSplit(0).toLong
				// yy - mm -- dd
				val dateKey = DateUtils.formatDateKey(new Date(timeStamp))
				val province = logSplit(1)
				val city = logSplit(2)
				val adid = logSplit(4)

				val key = dateKey + "_" + province + "_" + city + "_" + adid
				(key, 1L)
		}

		val key2StateDStream: DStream[(String, Long)] = key2ProvinceCityDStream.updateStateByKey[Long] {
			(values: Seq[Long], state: Option[Long]) =>
				var newValue = 0L
				if (state.isDefined) {
					newValue = state.get
				}

				for (value <- values) {
					newValue += value
				}
				Some(newValue)
		}

		// 某一天 一个省的一个城市的某一个广告的点击次数
		key2StateDStream.foreachRDD {
			rdd =>
				rdd.foreachPartition {
					items =>
						val adStateArray = new ArrayBuffer[AdStat]()

						for ((key, count) <- items) {
							val keySplit = key.split("_")
							val date = keySplit(0)
							val province = keySplit(1)
							val city = keySplit(2)
							val adid = keySplit(3).toLong

							adStateArray += AdStat(date, province, city, adid, count)
						}

						AdStatDAO.updateBatch(adStateArray.toArray)
				}
		}

		key2StateDStream
	}


	def generateBlackList(adRealTimeFilterDStream: DStream[String]) = {
		val key2NumDStream: DStream[(String, Long)] = adRealTimeFilterDStream.map {
			// log : timestamp  province city userid adid
			case log =>
				val logSplit: Array[String] = log.split(" ")
				val timeStamp = logSplit(0).toLong
				// yy-mm-dd
				val dateKey = DateUtils.formatDateKey(new Date(timeStamp))
				val userId = logSplit(3).toLong
				val adid = logSplit(4).toLong

				val key = dateKey + "_" + userId + "_" + adid

				(key, 1L)
		}

		val key2CountDStream: DStream[(String, Long)] = key2NumDStream.reduceByKey(_ + _)

		// 根据每一个RDD中间的数据,更新用户点击数据表
		key2CountDStream.foreachRDD {
			rdd =>
				rdd.foreachPartition {
					items =>
						val clickCountArray = new ArrayBuffer[AdUserClickCount]()

						for ((key, count) <- items) {
							val keySplit: Array[String] = key.split("_")

							val date: String = keySplit(0)
							val userId = keySplit(1).toLong
							val adid = keySplit(2).toLong

							clickCountArray += AdUserClickCount(date, userId, adid, count)
						}

						AdUserClickCountDAO.updateBatch(clickCountArray.toArray)
				}
		}


		val key2BlackListDStream: DStream[(String, Long)] = key2CountDStream.filter {
			case (key, count) =>
				val keySplit = key.split("_")
				val date = keySplit(0)
				val userId = keySplit(1).toLong
				val adid = keySplit(2).toLong

				val clickCount = AdUserClickCountDAO.findClickCountByMultiKey(date, userId, adid)

				if (clickCount > 100) {
					true
				} else {
					false
				}
		}

		val userIdDStream: DStream[Long] = key2BlackListDStream.map {
			case (key, count) => key.split("_")(1).toLong
		}.transform(rdd => rdd.distinct())


		userIdDStream.foreachRDD {
			rdd =>
				rdd.foreachPartition {
					items =>
						val userIdArray = new ArrayBuffer[AdBlacklist]()

						for (userId <- items) {
							userIdArray += AdBlacklist(userId)
						}

						AdBlacklistDAO.insertBatch(userIdArray.toArray)
				}
		}
	}


}
