package com.atguigu.sparkstreaming.apps

import java.util

import com.alibaba.fastjson.{JSON, JSONArray, JSONObject}
import com.atguigu.realtime.constants.TopicConstant
import com.atguigu.realtime.utils.KafkaProducerUtil
import com.atguigu.sparkstreaming.utils.DStreamUtil
import com.google.gson.Gson
import org.apache.kafka.clients.consumer.ConsumerRecord
import org.apache.spark.rdd.RDD
import org.apache.spark.streaming.dstream.InputDStream
import org.apache.spark.streaming.kafka010.{CanCommitOffsets, HasOffsetRanges, OffsetRange}
import org.apache.spark.streaming.{Seconds, StreamingContext}

/**
 * Created by Smexy on 2022/7/18
 *
 *    从kafka的base_log中读取数据，调出其中的 start_log和actions_log(炸裂，拆分)，将数据写回kafka.
 *
 *    类似离线数仓:    ods_log_inc 输入 -----hql --------挑选 page_log ---------写入dwd_traffic_page_view_inc
 *
 *    离线数仓的HDFS  对应  实时项目的 kafka
 *
 *    ------------------------------------------
 *    at least once + 幂等输出(kafka producer开启了输出的幂等设置)
 *
 */
object LogDiversionApp extends BaseApp {
  override var groupId: String = "realtime220309"
  override var topic: String = TopicConstant.ORIGINAL_LOG
  override var appName: String = "LogDiversionApp"
  override var batchDuration: Int = 10

  def parseActions(commonMap: util.Map[String, AnyRef], pageMap: util.Map[String, AnyRef], actionsStr: String,gson:Gson) = {

    val jSONArray: JSONArray = JSON.parseArray(actionsStr)

    for( i <- 0 until jSONArray.size()){

      val actionStr: String = jSONArray.getString(i)

      val actionMap: util.Map[String, AnyRef] = JSON.parseObject(actionStr).getInnerMap

      //三个Map合并。三个Map中有重名的key(后合并会覆盖前面的)，要注意顺序，否则无所谓。
      actionMap.putAll(commonMap)
      actionMap.putAll(pageMap)

      KafkaProducerUtil.sendData(gson.toJson(actionMap),TopicConstant.ACTION_LOG)

    }

  }

  /*
        挑选start_log 和 actions_log(拆分)，将json字符串写出写到Kafka

        读写数据库(中间件)，以分区为单位操作
     */
  def handleLog(rdd: RDD[ConsumerRecord[String, String]]) = {

    rdd.foreachPartition(partition => {

      //以分区为单位创建gson
      val gson = new Gson()

      partition.foreach(record => {

        //取出kafka中的value
        val jsonobject: JSONObject = JSON.parseObject(record.value())

        if (jsonobject.containsKey("start") && !jsonobject.containsKey("err")){

          val commonMap: util.Map[String, AnyRef] = JSON.parseObject(jsonobject.getString("common")).getInnerMap

          commonMap.put("ts",jsonobject.getLong("ts"))

          val startMap: util.Map[String, AnyRef] = JSON.parseObject(jsonobject.getString("start")).getInnerMap

          startMap.putAll(commonMap)

          //这是一条启动日志
          KafkaProducerUtil.sendData(gson.toJson(startMap),TopicConstant.STARTUP_LOG)

        }else if(jsonobject.containsKey("actions") && !jsonobject.containsKey("err")){

          //这是一条含有actions的日志，需要把actions炸裂取出每一条action，再和当前这条日志的common,page部分拼接成一条新的 jsonstr，写出到kafka
          // 获取common部分
          val commonMap: util.Map[String, AnyRef] = JSON.parseObject(jsonobject.getString("common")).getInnerMap

          val pageMap: util.Map[String, AnyRef] = JSON.parseObject(jsonobject.getString("page")).getInnerMap

          val actionsStr: String = jsonobject.getString("actions")

          parseActions(commonMap,pageMap,actionsStr,gson)




        }


        jsonobject


      })

      //着急，可以立刻flush 缓冲区
      KafkaProducerUtil.flush()

    })

  }

  def main(args: Array[String]): Unit = {

    //重写context
    context = new StreamingContext("local[*]",appName,Seconds(batchDuration))

    runApp{

      val ds: InputDStream[ConsumerRecord[String, String]] = DStreamUtil.createDStream(groupId, context, topic)

      ds.foreachRDD(rdd => {

        if (!rdd.isEmpty()){

          //获取当前消费到的这个批次偏移量
          val ranges: Array[OffsetRange] = rdd.asInstanceOf[HasOffsetRanges].offsetRanges

          //处理
          handleLog(rdd)

          //提交offsets
          ds.asInstanceOf[CanCommitOffsets].commitAsync(ranges)

        }


      })


    }


  }
}
