package com.yiguo.realtime.app

import com.alibaba.fastjson.JSON
import com.yiguo.gmall.Constansts
import com.yiguo.realtime.beans.StartUpLog
import com.yiguo.realtime.utils.{MyKafkaUtil, RedisUtil}
import org.apache.hadoop.hbase.HBaseConfiguration
import org.apache.spark.streaming.dstream.DStream
import redis.clients.jedis.Jedis

import java.lang
import java.time.{Instant, LocalDateTime, ZoneId}
import java.time.format.DateTimeFormatter
import org.apache.phoenix.spark._

object DAUApp extends BaseApp {
  override var appName: String = "DAUApp"
  override var duration: Int = 10




  def main(args: Array[String]): Unit = {
    run{
      val ds = MyKafkaUtil.getKafkaStream(
                           Constansts.GMALL_STARTUP_LOG, streamingContext)
      val reslut: DStream[StartUpLog] = ds.map(
        record => {
          //todo 将value的数据封装成一个bean ,调用json工具
          var startUpLog: StartUpLog = JSON.parseObject(record.value(), classOf[StartUpLog])
          //需要额外封装两个属性
          //                    var logDate:String,
          //                      var logHour:String,
          val formatter = DateTimeFormatter.ofPattern("yyyy-MM-dd")
          val localDateTime = LocalDateTime.ofInstant(Instant.ofEpochMilli(startUpLog.ts), ZoneId.of("Asia/Shanghai"))
          startUpLog.logDate = localDateTime.format(formatter)
          startUpLog.logHour = localDateTime.getHour.toString
          startUpLog
        })
      reslut.count().print()
      //todo 批次去重，去时间戳最早的那条启动日志的明细信息
      /**
       * 逻辑 先按照mid和日期分组，在按照ts进行排序，之后最小的的
       */
      val minOfts: DStream[StartUpLog] = distincDatatBach(reslut)
      minOfts.count().print()


      //todo 连接redis查询，看哪些mid今日已经记录过了，对记录进行过滤
      val ds4: DStream[StartUpLog] = newfilterfromredisdata(minOfts: DStream[StartUpLog])
      ds4.cache()
      ds4.count().print()

      //todo 将获取到的数据写入redis中
      ds4.foreachRDD(rdd=>{
        //以分区为单位写出
        rdd.foreachPartition(partiton=>{
          val jedis: Jedis = RedisUtil.getJedisClient()
          partiton.foreach(log=>
            //写入redis的集合中
            jedis.sadd("DAU"+log.logDate,log.mid))
          jedis.close()
        })
      })
      //todo  将明细信息写入hbase中
      ds4.foreachRDD(rdd=>{
        //RDD隐式转换为
        /**
         * cols RDD中的数据要写到表的哪些列
         * conf 是hadoop包下的configeuration  不能new，必须使用Hbase提供的API来创建
         */
        rdd.saveToPhoenix(
          "GMALL2020_DAU",
          Seq("MID", "UID", "APPID", "AREA", "OS", "CH", "TYPE", "VS", "LOGDATE", "LOGHOUR", "TS"),
          HBaseConfiguration.create(),
          Some("hadoop102,hadoop103,hadoop104:2181")
        )
      })
    }
  }
  def distincDatatBach(ds: DStream[StartUpLog]): DStream[StartUpLog] = {
    val ds2 = ds.map(log => {
      ((log.mid, log.logDate), log)
    })
    val ds3 = ds2.groupByKey()
    val reslut = ds3.flatMap(x => {
      val firstStartlog = x._2.toList.sortBy(_.ts).take(1)
      firstStartlog
    })
    reslut
  }

  /**
   * 查询redis中
   * 假设ds中有1000条数据，创建1000个连接在发送1000次 sismember请求 再关闭sismember1000次连接
   * 弊端 再创建redis连接上会消耗大量的资源和时间
   * 不推荐使用
   * @param minOfts
   */
  def filterfromredisdata(minOfts: DStream[StartUpLog]): DStream[StartUpLog] = {
    val reslut: DStream[StartUpLog] = minOfts.filter(log => {
      val redis: Jedis = RedisUtil.getJedisClient()
      //判断一个集合是否在set集合中
      val ifExists: lang.Boolean = redis.sismember("DAU:" + log.logDate, log.mid)
      redis.close()
      !ifExists
    })
    reslut
  }

  /**
   * 再spark中进行读写，一般是以分区为单位获取连接
   *
   * @param value
   */
  def newfilterfromredisdata(minOfts: DStream[StartUpLog]): DStream[StartUpLog] = {
    minOfts.mapPartitions(partition=>{
      //获取连接
      val jedis: Jedis = RedisUtil.getJedisClient()

      //具体处理
      val filterpartiton: Iterator[StartUpLog] = partition.filter(log => {
        //判断一个集合是否在set集合中
        val ifExists: lang.Boolean = jedis.sismember("DAU:" + log.logDate, log.mid)
        !ifExists
      })

      //关闭连接
      jedis.close()
      filterpartiton
    })
  }
}
