package com.atguigu.realtime.app


import java.time.LocalDate

import com.atguigu.realtime.bean.StartupLog
import com.atguigu.realtime.util.{MyEsUtil, MyKafkaUtil, MyRedisUtil}
import org.apache.spark.SparkConf
import org.apache.spark.streaming.dstream.DStream
import org.apache.spark.streaming.{Seconds, StreamingContext}
import org.json4s.JValue
import org.json4s.JsonAST.JObject
import org.json4s.jackson.JsonMethods
import redis.clients.jedis.Jedis

/**
 * Author atguigu
 * Date 2020/11/10 10:24
 */
object DauApp {
    /**
     * 把数据格式解析成StartupLog格式
     *
     * @param sourceStream
     * @return
     */
    def parseToStartupLog(sourceStream: DStream[String]) = {
        sourceStream.map(jsonString => {
            val value: JValue = JsonMethods.parse(jsonString)
            val jCommon: JValue = value \ "common"
            val jTs: JValue = value \ "ts"
            implicit val f = org.json4s.DefaultFormats
            jCommon.merge(JObject("ts" -> jTs)).extract[StartupLog]
        })
    }
    
    /**
     * 对传入的流去重
     *
     * @param startupLogStream
     * @return 去重后的流
     */
    def distinct(startupLogStream: DStream[StartupLog]) = {
        startupLogStream.filter(log => {
            // 返回true, 第一次启动保留下来. 返回false, 不是第一次启动过滤掉
            // 思路: 把设备存入到redis的set集合, 如何返回1表示存入成功(第一次), 否则是0, 表示存入失败(不是第一次)
            val client: Jedis = MyRedisUtil.getClient
            val key = "mids:" + log.logDate
            val r = client.sadd(key, log.mid)
            client.close()
            r == 1 // 如果mid是首次向redis存入, 则返回1
        })
    }
    
    /**
     * 对传入的流去重
     *
     * @param startupLogStream
     * @return 去重后的流
     */
    def distinct_2(startupLogStream: DStream[StartupLog]) = {
        /*startupLogStream.mapPartitions((it: Iterator[StartupLog]) => {
            // 把当前分区所有的数据(Iterator)传过来, 然后需要返回一个新的迭代器
            val client: Jedis = MyRedisUtil.getClient
            val filerIt = it.filter(log => {
                val key = "mids:" + log.logDate
                client.sadd(key, log.mid) == 1
            })
            client.close()
            filerIt
        })*/
        //代码1: driver 只执行一次
        startupLogStream.transform(rdd => {
            // 代码2: driver  一个批次执行一次
            rdd.mapPartitions(it => {
                //代码3 executor  每个批次每个分区执行一次
                val client: Jedis = MyRedisUtil.getClient
                val filerIt = it.filter(log => {
                    val key = "mids:" + log.logDate
                    client.sadd(key, log.mid) == 1
                })
                client.close()
                filerIt
            })
        })
    }
    
    def main(args: Array[String]): Unit = {
        // 1. 创建StreamingContext
        val conf: SparkConf = new SparkConf().setMaster("local[2]").setAppName("DauApp")
        val ssc = new StreamingContext(conf, Seconds(3))
        // 2. 从kafka获取一个流
        val sourceStream: DStream[String] = MyKafkaUtil.getKafkaStream(ssc, "DauApp", "gmall_startup_topic")
        // 3.把字符类型的数据, 解析成样例类
        val startupLogStream: DStream[StartupLog] = parseToStartupLog(sourceStream)
        // 4. 去重
        //        val result: DStream[StartupLog] = distinct(startupLogStream)
        val result: DStream[StartupLog] = distinct_2(startupLogStream)
        // 4. 流要输出(output) foreachRDD
        println("foreachRDD 外")
        result.foreachRDD(rdd => {
            import MyEsUtil._
            val today: String = LocalDate.now().toString
            rdd.saveToES(s"gmall_dau_info_$today")
            // 把rdd数据写入到es
            /*rdd.foreachPartition((it: Iterator[StartupLog]) => {
                // 创建到es的连接
                // 写入 TODO
                val today: String = LocalDate.now().toString
                MyEsUtil.insertBulk(s"gmall_dau_info_$today", it)
                // 关闭连接
            })*/
        })
        
        // 5. 启动流
        ssc.start()
        // 6. 防止主线程退出
        ssc.awaitTermination()
        
    }
}

/*
json4s 在scala中专用用于解析json工具

1. 隐式转换函数

2. 隐式类

3. 隐式参数和隐式值

 */