package com.atguigu.realtime.apps

import java.util

import com.alibaba.fastjson.{JSON, JSONArray, JSONObject}
import com.atguigu.realtime.apps.TestDemo.{appName, batchDuration, context, groupId, runSparkStreamingApp, topic}
import com.atguigu.realtime.constants.TopicConstant
import com.atguigu.realtime.utils.{DStreamUtil, KafkaProducerUtil}
import com.google.gson.Gson
import org.apache.kafka.clients.consumer.ConsumerRecord
import org.apache.spark.rdd.RDD
import org.apache.spark.streaming.{Seconds, StreamingContext}
import org.apache.spark.streaming.dstream.InputDStream
import org.apache.spark.streaming.kafka010.{CanCommitOffsets, HasOffsetRanges, OffsetRange}

/**
 * Created by Smexy on 2022/8/26
 *
 *    从kafka中消费 日志数据，挑选其中的 startlog和actionslog，将其扁平化和炸裂粒度后写回kafka.
 *
 *          后续的需求只需要 startlog和actionslog
 */
object LogDiversionApp extends BaseApp {
  override var batchDuration: Int = 5
  override var appName: String = "LogDiversionApp"
  override var groupId: String = "220409realtime"
  override var topic: String = TopicConstant.ORIGINAL_LOG

  def writeToKafka(rdd: RDD[ConsumerRecord[String, String]]):Unit = {

    rdd.foreachPartition(partition => {

      val gson = new Gson()

      partition.foreach(record => {

        val logStr: String = record.value()
        //把字符串转为Map
        val map: JSONObject = JSON.parseObject(logStr)

        //对日志类型进行判断
        if (logStr.contains("start") && !logStr.contains("err")){

          //这是启动日志
          val dataMap: util.Map[String, AnyRef] = parseCommon(map)
          //放入start部分
          val startMap: util.Map[String, AnyRef] = JSON.parseObject(map.getString("start")).getInnerMap
          //将两个map中的数据合并
          dataMap.putAll(startMap)

          //写出
          KafkaProducerUtil.sendData(gson.toJson(dataMap),TopicConstant.STARTUP_LOG)

        }else if (logStr.contains("actions") && !logStr.contains("err")){

          val dataMap: util.Map[String, AnyRef] = parseCommon(map)

          //把page部分也放进去
          dataMap.putAll( JSON.parseObject(map.getString("page")).getInnerMap)

          //放入actions部分
          val array: JSONArray = JSON.parseArray(map.getString("actions"))

          for (i <- 0 until array.size()){

            val actionStr: String = array.getString(i)

            val actionMap: util.Map[String, AnyRef] = JSON.parseObject(actionStr).getInnerMap

            //将两个map中的数据合并
            dataMap.putAll(actionMap)

            //写出一次
            KafkaProducerUtil.sendData(gson.toJson(dataMap),TopicConstant.ACTION_LOG)
          }


        }

      })
      //一个分区处理完后，清空生产者的缓冲区
      KafkaProducerUtil.flush()

    })


  }

   def parseCommon(map: JSONObject):util.Map[String, AnyRef] = {

    val commonStr: String = map.getString("common")
    //把common转为map
    val commonMap: JSONObject = JSON.parseObject(commonStr)
    //取出真正存放数据的Map
    val dataMap: util.Map[String, AnyRef] = commonMap.getInnerMap
    //将ts放进去
    dataMap.put("ts", map.getString("ts"))
     dataMap
  }

  def main(args: Array[String]): Unit = {

    //重写父类中的context
    context = new StreamingContext("local[*]",appName,Seconds(batchDuration))

    runSparkStreamingApp{

      val ds: InputDStream[ConsumerRecord[String, String]] = DStreamUtil.getDStream(context, groupId, topic)

      //业务处理
      ds.foreachRDD(rdd => {

        if (!rdd.isEmpty()){

          //获取偏移量
          val ranges: Array[OffsetRange] = rdd.asInstanceOf[HasOffsetRanges].offsetRanges

          //解析写出到kafka
          writeToKafka(rdd)

          //提交偏移量
          ds.asInstanceOf[CanCommitOffsets].commitAsync(ranges)

        }

      })

    }

  }
}
