package com.atguigu.handle

import java.{lang, util}
import java.text.SimpleDateFormat
import java.util.Date

import com.atguigu.bean.StartUpLog
import org.apache.spark.SparkContext
import org.apache.spark.broadcast.Broadcast
import org.apache.spark.rdd.RDD
import org.apache.spark.streaming.dstream.DStream
import redis.clients.jedis.Jedis

/**
  *
  * @author Lec
  * @date 2022/7/17 13:35
  */
object DauHandle {
  def filterByGroup(filterByRedisDStream: DStream[StartUpLog]) = {
    //1.将数据转为KV类型，k：logDate+mid V:StartUpLog
    val logDateWithMIdToLogDStream: DStream[((String, String), StartUpLog)] = filterByRedisDStream.map(startUpLog => {
      ((startUpLog.logDate, startUpLog.mid), startUpLog)
    })
    //2.使用groupByKey算子，将 相同的key聚合到一块
    val logDateWithMidToIterLogDStream: DStream[((String, String), Iterable[StartUpLog])] = logDateWithMIdToLogDStream.groupByKey()
    //3.将迭代器中的数据按照时间抽由大到小进行排序取第一条
    val logDateWithMidToListLogDStream: DStream[((String, String), List[StartUpLog])] = logDateWithMidToIterLogDStream.mapValues(iter => {
      iter.toList.sortWith(_.ts < _.ts).take(1)
    })
    //4.将获取到的list集合中的数据打散
    logDateWithMidToListLogDStream.flatMap(_._2)
  }

  def filterBYRedis(startUpLogDStream: DStream[StartUpLog],sc:SparkContext) = {
    /*
    //方案一
    startUpLogDStream.filter(log => {
      //1.获取Redis连接
      val jedis = new Jedis("hadoop102",6379)
      //2.判断当前的mid是否在Redis中有保存，有的话证明重复，过滤掉重复的数据，没有保存的话证明数据没有重复，则保留下来
      val redisKey: String = "Dau:"+log.logDate
      //如果存在的话返回true，不存在的话返回false，那么意味着如果是true的话要过滤掉，反之留下
      val boolean: lang.Boolean = jedis.sismember(redisKey,log.mid)

      jedis.close()
      //取反保留
      !boolean
    })

    //方案二：在每个分区中创建链接，减少连接个数
    startUpLogDStream.mapPartitions(partition => {
      //在每个分区下获取Redis连接
      val jedis = new Jedis("hadoop102",6379)
      val logs: Iterator[StartUpLog] = partition.filter(log => {
        //判断当前的mid是否在Redis中有保存，有的话证明重复，过滤掉重复的数据，没有保存的话证明数据没有重复，则保留下来
        val redisKey: String = "Dau:" + log.logDate
        //如果存在的话返回true，不存在的话返回false，那么意味着如果是true的话要过滤掉，反之留下
        val boolean: lang.Boolean = jedis.sismember(redisKey, log.mid)
        !boolean
      })

      //在每个分区下关闭连接
      jedis.close()
      logs
    })
*/
    //方案三：在每个批次中（RDD）获取一次连接
    val sdf: SimpleDateFormat = new SimpleDateFormat("yyyy-MM-dd")
    val value: DStream[StartUpLog] = startUpLogDStream.transform(rdd => {
      //1.在每个批次中获取连接 Driver端
      val jedis = new Jedis("hadoop102", 6379)

      //2.在driver就去是用这个链接，获取到Redis中的数据
      val redisKey: String = "Dau:" + sdf.format(new Date(System.currentTimeMillis()))
      val mids: util.Set[String] = jedis.smembers(redisKey)

      //3.将在driver端查询到的set集合广播到execute端
      val midBc: Broadcast[util.Set[String]] = sc.broadcast(mids)
      //4.遍历rdd中每条数据的mid 和广播过来的集合中的数据对比看是否有重复的，如果重复的话则过滤掉,没重复的保留
      val filterRdd: RDD[StartUpLog] = rdd.filter(log => {
        !midBc.value.contains(log.mid)
      })
      jedis.close()
      filterRdd
    })
    value

  }

  def saveMidToRedis(startUpLogDStream: DStream[StartUpLog]) = {
    startUpLogDStream.foreachRDD(rdd => {
      rdd.foreachPartition(partition => {
        //每个分区下创建一个连接，以减少连接个数
        val jedis = new Jedis("hadoop102",6379)
        partition.foreach(log => {
          val redisKey: String = "Dau:"+log.logDate
          jedis.sadd(redisKey,log.mid)
        })
        jedis.close()
      })
    })
  }

}
