package com.atguigu.handle

import java.{lang, util}
import java.text.SimpleDateFormat
import java.util.Date

import com.atguigu.bean.StartUpLog
import org.apache.spark.SparkContext
import org.apache.spark.broadcast.Broadcast
import org.apache.spark.rdd.RDD
import org.apache.spark.streaming.dstream.DStream
import redis.clients.jedis.Jedis

object DauHandle {
  /**
    * 批次内去重
    * @param filterByRedisDStream
    */
  def filterByGroup(filterByRedisDStream: DStream[StartUpLog]) = {
    //1.将数据转为KV类型，k：logdate+mid v：明细数据 ，转成kv的原因是为了调取groupByKey算子
    val logDataWithMidToStartUpLogDStream: DStream[((String, String), StartUpLog)] = filterByRedisDStream.map(startUpLog => {
      ((startUpLog.logDate, startUpLog.mid), startUpLog)
    })

    //2.使用groupbykey算子，将相同key的数据聚和到一块
    val logDataWithMidToIterStartUpLogDStream: DStream[((String, String), Iterable[StartUpLog])] = logDataWithMidToStartUpLogDStream.groupByKey()

    //3.按照时间戳由小到大排序取第一条
    val logDataWithMidToListStartUpLogDStream: DStream[((String, String), List[StartUpLog])] = logDataWithMidToIterStartUpLogDStream.mapValues(iter => {
      iter.toList.sortWith(_.ts < _.ts).take(1)
    })

    //4.获取到list集合中的数据然后打散
    logDataWithMidToListStartUpLogDStream.flatMap(_._2)

  }

  /**
    * 做批次间去重
    *
    * @param startUpLogDStream
    */
  def filterByRedis(startUpLogDStream: DStream[StartUpLog], sc: SparkContext) = {
   /* val value: DStream[StartUpLog] = startUpLogDStream.filter(startUpLog => {
      //1.创建redis连接
      val jedis: Jedis = new Jedis("hadoop102", 6379)

      //2.判断当前批次的mid是否在redis里面有保存，有的话返回true证明数据重复，则过滤掉，没有的话返回false则证明数据没有重复，则保留
      val redisKey: String = "Dau:" + startUpLog.logDate
      val boolean: lang.Boolean = jedis.sismember(redisKey, startUpLog.mid)

      //关闭连接
      jedis.close()
      !boolean
    })
    value*/
    //方案二：在每个分区下获取连接以此减少连接个个数
    /*val value: DStream[StartUpLog] = startUpLogDStream.mapPartitions(partition => {
      //1.创建redis连接
      val jedis: Jedis = new Jedis("hadoop102", 6379)
      val logs: Iterator[StartUpLog] = partition.filter(startUpLog => {
        //2.判断当前批次的mid是否在redis里面有保存，有的话返回true证明数据重复，则过滤掉，没有的话返回false则证明数据没有重复，则保留
        val redisKey: String = "Dau:" + startUpLog.logDate
        val boolean: lang.Boolean = jedis.sismember(redisKey, startUpLog.mid)
        !boolean
      })
      //关闭连接
      jedis.close()
      logs
    })
    value*/
    //方案三：在每个批次下获取连接
    val sdf: SimpleDateFormat = new SimpleDateFormat("yyyy-MM-dd")
    val value: DStream[StartUpLog] = startUpLogDStream.transform(rdd => {
      //1.获取redis连接
      val jedis: Jedis = new Jedis("hadoop102", 6379)

      //2.在Driver端获取redis中的数据
      val time: String = sdf.format(new Date(System.currentTimeMillis()))
      val redisKey: String = "Dau:" + time
      val mids: util.Set[String] = jedis.smembers(redisKey)

      //3.将查询出来的数据广播到Executor端
      val midsBC: Broadcast[util.Set[String]] = sc.broadcast(mids)

      //4.遍历rdd获取到当前的mid然后做过滤，将重复的过滤掉
      val filterRDD: RDD[StartUpLog] = rdd.filter(startUpLog => {
        !midsBC.value.contains(startUpLog.mid)
      })
      //关闭连接
      jedis.close()
      filterRDD
    })
    value
  }

  /**
    * 将去重后的mid写入redis
    *
    * @param startUpLogDStream
    */
  def saveMidToRedis(startUpLogDStream: DStream[StartUpLog]) = {
    startUpLogDStream.foreachRDD(rdd => {
      rdd.foreachPartition(partition => {
        //1.创建redis连接
        val jedis: Jedis = new Jedis("hadoop102", 6379)
        partition.foreach(startUpLog => {
          val redisKey: String = "Dau:" + startUpLog.logDate
          //2.将mid写入redis
          jedis.sadd(redisKey, startUpLog.mid)
        })
        jedis.close()
      })
    })
  }

}
