package com.atguigu.realtime.streaming.apps

import java.time.{Instant, LocalDateTime, ZoneId}
import java.time.format.DateTimeFormatter

import com.alibaba.fastjson.JSON
import com.atguigu.realtime.constants.{PrefixConstant, TopicConstant}
import com.atguigu.realtime.streaming.apps.LogDiversionApp.{appName, batchDuration, context, groupId, parseRecord, runApp, topic}
import com.atguigu.realtime.streaming.beans.StartLog
import com.atguigu.realtime.streaming.utils.{MyKafkaUtil, RedisUtil}
import org.apache.hadoop.hbase.HBaseConfiguration
import org.apache.kafka.clients.consumer.ConsumerRecord
import org.apache.spark.rdd.RDD
import org.apache.spark.streaming.{Seconds, StreamingContext}
import org.apache.spark.streaming.dstream.InputDStream
import org.apache.spark.streaming.kafka010.{CanCommitOffsets, HasOffsetRanges, OffsetRange}
import redis.clients.jedis.Jedis

/**
 * Created by Smexy on 2022/5/24
 */
object StartLogApp extends BaseApp {
  override var appName: String = "StartLogApp"
  override var batchDuration: Int = 10
  override var groupId: String = "realtime1227"
  override var topic: String = TopicConstant.STARTUP_LOG

  // 解析样例类
  def parseBean(rdd: RDD[ConsumerRecord[String, String]]):RDD[StartLog] = {

    rdd.mapPartitions(partition => {

      //声明时间格式
      val formatter1: DateTimeFormatter = DateTimeFormatter.ofPattern("yyyy-MM-dd")
      val formatter2: DateTimeFormatter = DateTimeFormatter.ofPattern("yyyy-MM-dd HH:mm:ss")

      partition.map(record => {

        //解析从kafka中获取的已有的属性
        val log: StartLog = JSON.parseObject(record.value(), classOf[StartLog])
        // 封装其余三个额外添加的属性

        // 将string类型的ts转为日期类型
        // 日期: LocalDate   日期+时间: LocalDateTime
        val dateTime: LocalDateTime = LocalDateTime.ofInstant(Instant.ofEpochMilli(log.ts.toLong), ZoneId.of("Asia/Shanghai"))

        log.start_date = dateTime.format(formatter1)
        log.start_time = dateTime.format(formatter2)

        log.id = log.start_time + "_" + log.mid

        log

      })

    })

  }

  // 将当前批次的数据按照 (日期，mid) 分组，分组后，挑选每组中ts最小的那条
  // 什么操作:  分组聚合(reduceByKey 支持map端聚合)操作   多--->少
  def removeDuplicateBeanInCommonBatch(rdd: RDD[StartLog]):RDD[StartLog] = {

    rdd.map(log => ((log.start_date, log.mid), log))
      .reduceByKey((log1, log2) => {
        //返回时间戳小的
        if (log1.ts < log2.ts) {
          log1
        } else {
          log2
        }

      }).values


    //val rdd1: RDD[((String, String), StartLog)] = rdd.map(log => ((log.start_date, log.mid), log))

    //效率低
    /*val rdd2: RDD[((String, String), Iterable[StartLog])] = rdd1.groupByKey()

    val rdd3: RDD[StartLog] = rdd2.flatMap {
      case ((date, mid), logs) => logs.toList.sortBy(_.ts).take(1)
    }
    rdd3*/

    /*val rdd2: RDD[((String, String), StartLog)] = rdd1.reduceByKey((log1, log2) => {

      //返回时间戳小的
      if (log1.ts < log2.ts) {
        log1
      } else {
        log2
      }

    })

    val rdd3: RDD[StartLog] = rdd2.values
    rdd3*/




  }

  // 读取redis，判断当前设备是否已经在今天记录过startlog，如果有，在当前批次汇总的startlog就没有必要记录
  /*
        设计: redis中存储的K-V
            要存储的是一天中已经记录过启动日志的 所有设备(多值，集合)
        K: 体验唯一性
              日期
        V： 类型
              Set
   */
  def removeDuplicateBeanFromHistoryBatch(rdd: RDD[StartLog]): RDD[StartLog] = {

      rdd.mapPartitions(partition => {

        val jedis: Jedis = RedisUtil.getJedisClient()

        // 如果set集合中已经有当前log的mid，说明mid已经记录过启动日志了，返回false，过滤掉
        val filteredLogs: Iterator[StartLog] = partition.filter(log => !jedis.sismember(PrefixConstant.dau_redis_Preffix + log.start_date, log.mid))

        jedis.close()

        filteredLogs

      })

  }

  def saveMidToRedis(rdd3: RDD[StartLog]):Unit = {

    rdd3.foreachPartition(partition => {

      val jedis: Jedis = RedisUtil.getJedisClient()

      partition.foreach(log => jedis.sadd(PrefixConstant.dau_redis_Preffix + log.start_date , log.mid))

      jedis.close()


    })

  }

  def main(args: Array[String]): Unit = {

    //构造StreamingContext
    context = new StreamingContext("local[*]", appName, Seconds(batchDuration))

    runApp{

      //传入自己的逻辑
      val ds: InputDStream[ConsumerRecord[String, String]] = MyKafkaUtil.getKafkaStream(Array(topic), context, groupId)

      ds.foreachRDD(rdd => {

        if (!rdd.isEmpty()){

          //获取偏移量
          val ranges: Array[OffsetRange] = rdd.asInstanceOf[HasOffsetRanges].offsetRanges

          //封装为样例类
          val rdd1: RDD[StartLog] = parseBean(rdd)

          //同批次去重
          val rdd2: RDD[StartLog] = removeDuplicateBeanInCommonBatch(rdd1)

          //历史批次去重
          val rdd3: RDD[StartLog] = removeDuplicateBeanFromHistoryBatch(rdd2)

          //写入hbase
          //导入phoenix提供的一些隐式转换方法
          import  org.apache.phoenix.spark._

          rdd3.cache()

          println("当前批次要写入:"+rdd3.count())
          /*
            tableName: String 写入的表名
             cols: Seq[String]  ： RDD中的 Bean的属性按照顺序，应该写入指定表中的哪些列
             创建HBase使用的Configuration：  HBaseConfiguration.create
           */
          rdd3.saveToPhoenix(
            "REALTIME2022_STARTLOG",
            Seq("ID","OPEN_AD_MS","OS","CH","IS_NEW","MID","OPEN_AD_ID","VC","AR",
              "UID","ENTRY","OPEN_AD_SKIP_MS","MD","LOADING_TIME","BA","TS","START_DATE","START_TIME"),
            HBaseConfiguration.create,
            Some("hadoop103:2181")

          )
          
          //将已经记录过的log的mid写入redis
          saveMidToRedis(rdd3)

          //提交偏移量
          ds.asInstanceOf[CanCommitOffsets].commitAsync(ranges)

        }

      })
    }

  }
}
