package com.atguigu.handle

import java.{lang, util}
import java.text.SimpleDateFormat
import java.util.Date

import com.atguigu.bean.StartUpLog
import org.apache.spark.SparkContext
import org.apache.spark.broadcast.Broadcast
import org.apache.spark.rdd.RDD
import org.apache.spark.streaming.dstream.DStream
import redis.clients.jedis.Jedis

object DauHandle {
  /**
    * 批次内去重
    * @param filerByRedisDStream
    */
  def filterByGroup(filerByRedisDStream: DStream[StartUpLog]) = {
    //1.将数据转为KV类型，k：logDate+mid V:StartUpLog
    val logDateWithMidToLogDStream: DStream[((String, String), StartUpLog)] = filerByRedisDStream.map(startUpLog => {
      ((startUpLog.logDate, startUpLog.mid), startUpLog)
    })
    //2.使用groupByKey算子，将相同key的数据聚合到一块
    val logDateWithMidToIterLogDStream: DStream[((String, String), Iterable[StartUpLog])] = logDateWithMidToLogDStream.groupByKey()

    //3.将迭代器中的数据按照时间戳由小到大进行排序，取第一条
    val logDateWithMidToListLogDStream: DStream[((String, String), List[StartUpLog])] = logDateWithMidToIterLogDStream.mapValues(iter => {
      iter.toList.sortWith(_.ts < _.ts).take(1)
    })
    
    //4.获取到list集合中的数据然后打散
    logDateWithMidToListLogDStream.flatMap(_._2)
  }

  /**
    * 批次间去重
    *
    * @param startUpLogDStream
    */
  def filterByRedis(startUpLogDStream: DStream[StartUpLog],sc:SparkContext) = {
/*    val value: DStream[StartUpLog] = startUpLogDStream.filter(log => {
      //1.获取redis连接
      val jedis: Jedis = new Jedis("hadoop102", 6379)

      //2.判断当前的mid是否在redis中有保存，如果有的话证明数据重复，过滤掉重复的数据，没有保存的话证明数据没有重复，则保留下来
      val redisKey: String = "Dau:" + log.logDate
      //如果存在的话返回true，不存在的话返回false，那么就意味着如果是true的话要过滤掉这条数据，反之留下
      val boolean: lang.Boolean = jedis.sismember(redisKey, log.mid)
      //关闭链接
      jedis.close()
      //取反
      !boolean
    })
    value*/
    //方案二：在每个分区中创建链接，减少连接个数
/*    val value: DStream[StartUpLog] = startUpLogDStream.mapPartitions(partition => {
      //在每个分区下获取Redis链接
      val jedis: Jedis = new Jedis("hadoop102", 6379)
      val logs: Iterator[StartUpLog] = partition.filter(log => {
        //判断当前的mid是否在redis中有保存，如果有的话证明数据重复，过滤掉重复的数据，没有保存的话证明数据没有重复，则保留下来
        val redisKey: String = "Dau:" + log.logDate
        //如果存在的话返回true，不存在的话返回false，那么就意味着如果是true的话要过滤掉这条数据，反之留下
        val boolean: lang.Boolean = jedis.sismember(redisKey, log.mid)
        !boolean
      })
      //在每个分区下关闭链接
      jedis.close()
      logs
    })
    value*/
    //方案三：在每个批次中（RDD）获取一次链接
    val sdf: SimpleDateFormat = new SimpleDateFormat("yyyy-MM-dd")
    val value: DStream[StartUpLog] = startUpLogDStream.transform(rdd => {
      //1.在每个批次中获取链接  Driver端
      val jedis: Jedis = new Jedis("hadoop102", 6379)

      //2.在driver就去使用这个链接，获取到redis中的数据
      val redisKey: String = "Dau:" + sdf.format(new Date(System.currentTimeMillis()))
      val mids: util.Set[String] = jedis.smembers(redisKey)

      //3.将在driver端查询到的set集合广播至Execute端
      val midsBc: Broadcast[util.Set[String]] = sc.broadcast(mids)

      //4.遍历rdd中每条数据的mid，和广播过来的集合中的数据做对比看是否有重复的,如果重复的话则过滤掉
      val filterRdd: RDD[StartUpLog] = rdd.filter(log => {
        !midsBc.value.contains(log.mid)
      })
      jedis.close()
      filterRdd
    })
    value
  }

  /**
    * 将mid存入Redis
    *
    * @param startUpLogDStream
    */
  def saveMidToRedis(startUpLogDStream: DStream[StartUpLog]) = {
    startUpLogDStream.foreachRDD(rdd=>{
     rdd.foreachPartition(partition=>{
       //每个分区下创建一个连接,以减少连接个数
       val jedis: Jedis = new Jedis("hadoop102",6379)
       partition.foreach(log=>{
         val redisKey: String = "Dau:"+log.logDate
         jedis.sadd(redisKey,log.mid)
       })
       jedis.close()
     })
    })
  }


}
