package com.atguigu.realtime.app

import java.time.LocalDate

import com.atguigu.realtime.bean.StartupLog
import com.atguigu.realtime.util.{MyEsUtil, MyKafkaUtil, MyRedisUtil, OffsetManager}
import org.apache.spark.SparkConf
import org.apache.spark.rdd.RDD
import org.apache.spark.streaming.dstream.DStream
import org.apache.spark.streaming.kafka010.{HasOffsetRanges, OffsetRange}
import org.apache.spark.streaming.{Seconds, StreamingContext}
import org.json4s.JValue
import org.json4s.JsonAST.JObject
import org.json4s.jackson.JsonMethods
import redis.clients.jedis.Jedis

import scala.collection.mutable.ListBuffer

/**
 * Author atguigu
 * Date 2020/11/10 10:24
 */
object DauApp_1 {
    
    val groupId = "DauApp_1"
    val topic = "gmall_startup_topic"
    
    def main(args: Array[String]): Unit = {
        val conf: SparkConf = new SparkConf().setMaster("local[2]").setAppName("DauApp_1")
        val ssc = new StreamingContext(conf, Seconds(3))
        
        //1. 从redis读取上次保存的offsets. 启动的时候, 读一次
        val offsets = OffsetManager.readOffsets(groupId, topic)
        
        val offsetRanges = ListBuffer.empty[OffsetRange] // driver
        val sourceStream = MyKafkaUtil
            .getKafkaStream(ssc, groupId, topic, offsets)
            .transform(rdd => {
                // 读取到这次消费offset记录
                val newOffsetRanges: Array[OffsetRange] = rdd.asInstanceOf[HasOffsetRanges].offsetRanges // 强转, 必须是从kafka直接得到的那个流中的rdd
                offsetRanges.clear() // 每次向可变集合插入数据都时候, 先清空
                offsetRanges ++= newOffsetRanges //driver
                rdd
            })
            .map(_.value())
        
        val startupLogStream: DStream[StartupLog] = parseToStartupLog(sourceStream)
        val result: DStream[StartupLog] = distinct_2(startupLogStream)
        
        // 2. 保存offsets应该每个批次保存一次. 先写数据, 再保存offsets
        result.foreachRDD((rdd: RDD[StartupLog]) => {
            // 先写到es
            rdd.foreachPartition(it => {
                val today: String = LocalDate.now().toString
                MyEsUtil.insertBulk(s"gmall_dau_info_$today", it.map(log => (log.mid, log)))
            })
            // 保存offset到redis   如何知道这次消费到了哪些个offsets?
            OffsetManager.saveOffsets(offsetRanges, groupId, topic)
        })
        
        ssc.start()
        ssc.awaitTermination()
        
    }
    
    
    def parseToStartupLog(sourceStream: DStream[String]) = {
        sourceStream.map(jsonString => {
            val value: JValue = JsonMethods.parse(jsonString)
            val jCommon: JValue = value \ "common"
            val jTs: JValue = value \ "ts"
            implicit val f = org.json4s.DefaultFormats
            jCommon.merge(JObject("ts" -> jTs)).extract[StartupLog]
        })
    }
    
    def distinct_2(startupLogStream: DStream[StartupLog]) = {
        startupLogStream.mapPartitions((it: Iterator[StartupLog]) => {
            // 把当前分区所有的数据(Iterator)传过来, 然后需要返回一个新的迭代器
            val client: Jedis = MyRedisUtil.getClient
            val filerIt = it.filter(log => {
                val key = "mids:" + log.logDate
                client.sadd(key, log.mid) == 1
            })
            client.close()
            filerIt
        })
    }
}

/*
json4s 在scala中专用用于解析json工具

1. 隐式转换函数

2. 隐式类

3. 隐式参数和隐式值

 */