package com.atguigu.handler

import java.{lang, util}
import java.text.SimpleDateFormat
import java.util.Date

import com.atguigu.bean.StartUpLog
import org.apache.spark.SparkContext
import org.apache.spark.broadcast.Broadcast
import org.apache.spark.rdd.RDD
import org.apache.spark.streaming.dstream.DStream
import redis.clients.jedis.Jedis

object DauHandler {
  /**
    * 批次内去重
    * @param filterByRedisDStream
    */
  def filterByGroup(filterByRedisDStream: DStream[StartUpLog]) = {
    val value: DStream[StartUpLog] = {
      //1.将数据转为K（LogDate，mid），v类型
      val midToStartUpLogDStream = filterByRedisDStream.map(startUpLog => {
        ((startUpLog.logDate, startUpLog.mid), startUpLog)
      })

      //2.将相同mid的数据聚和到一块
      val midToIterStartUpLogDStream = midToStartUpLogDStream.groupByKey()

      //3.对迭代器中的数据进行排序（按照时间戳由到大排序），去第一条
      val midToListStartUpLogDStream = midToIterStartUpLogDStream.mapValues(iter => {
        iter.toList.sortWith(_.ts < _.ts).take(1)
      })
      //4.获取DStream[StartUpLog]
      midToListStartUpLogDStream.flatMap(_._2)
    }
    value
  }

  /**
    * 批次间去重
    *
    * @param startUpLogDStream
    */
  def filterByRedis(startUpLogDStream: DStream[StartUpLog],sc:SparkContext) = {
    //方案一
    /*val value: DStream[StartUpLog] = startUpLogDStream.filter(startUpLog => {
      //1.创建Redis连接
      val jedis: Jedis = new Jedis("hadoop102", 6379)

      //2.查看当前的mid在redis中是否存在
      val redisKey: String = "DauApp" + startUpLog.logDate
      val boolean: lang.Boolean = jedis.sismember(redisKey, startUpLog.mid)

      //关闭连接
      jedis.close()
      //如果存在的话则被过滤掉
      !boolean
    })
    value*/

    //优化：方案二：在分区下获取连接
   /* val value: DStream[StartUpLog] = startUpLogDStream.mapPartitions(partition => {
      //在分区下创建Redis连接
      val jedis: Jedis = new Jedis("hadoop102", 6379)
      val logs: Iterator[StartUpLog] = partition.filter(startUpLog => {
        //查看当前的mid在redis中是否存在
        val redisKey: String = "DauApp" + startUpLog.logDate
        val boolean: Boolean = jedis.sismember(redisKey, startUpLog.mid)
        !boolean
      })
      jedis.close()
      logs
    })
    value*/

    //优化：方案三：每个批次间获取一次连接
    val sdf: SimpleDateFormat = new SimpleDateFormat("yyyy-MM-dd")
    val value: DStream[StartUpLog] = startUpLogDStream.transform(rdd => {
      //1.创建redis连接
      val jedis: Jedis = new Jedis("hadoop102", 6379)

      //2.查询redis中的数据
      val redisKey: String = "DauApp" + sdf.format(new Date(System.currentTimeMillis()))
      val mids: util.Set[String] = jedis.smembers(redisKey)

      //3.将数据广播至Executor端
      val midsBC: Broadcast[util.Set[String]] = sc.broadcast(mids)

      //4.对比当前的mid是否在查询出来的mids中，进而进行过滤
      val value: RDD[StartUpLog] = rdd.filter(startUpLog => {
        //获取广播过来的数据,并比对
        !midsBC.value.contains(startUpLog.mid)
      })
      jedis.close()
      value
    })
    value

  }

  /**
    * 将Mid保存至Redis
    *
    * @param startUpLogDStream
    */
  def saveMidToRedis(startUpLogDStream: DStream[StartUpLog]) = {

    startUpLogDStream.foreachRDD(rdd => {
      rdd.foreachPartition(partition => {
        //创建redis连接（在分区下获取连接，可以减少连接个数）
        val jedis: Jedis = new Jedis("hadoop102", 6379)
        partition.foreach(startUpLog => {
          //将mid保存至Redis
          val redisKey: String = "DauApp" + startUpLog.logDate
          jedis.sadd(redisKey, startUpLog.mid)
        })
        //关闭连接
        jedis.close()
      })
    })
  }

}
