package com.atguigu.gmall.realtime.app

import java.lang

import com.alibaba.fastjson.serializer.SerializeConfig
import com.alibaba.fastjson.{JSON, JSONObject}
import com.atguigu.gmall.realtime.bean.PageLog
import com.atguigu.gmall.realtime.util.{MyKafakUtil, MykafkaSender, OffsetManager}
import org.apache.kafka.clients.consumer.ConsumerRecord
import org.apache.kafka.common.TopicPartition
import org.apache.spark.SparkConf
import org.apache.spark.streaming.dstream.{DStream, InputDStream}
import org.apache.spark.streaming.kafka010.{HasOffsetRanges, OffsetRange}
import org.apache.spark.streaming.{Seconds, StreamingContext}

object OdsBaseLogApp {


  //1 接收kafka数据
  //2 把record转换成一个方便处理的结构化对象
  //转换算子 （map）
  //3  判断每个json的类型 进行拆解、提取关键字段  分发到不同的topic中 kafka
  def main(args: Array[String]): Unit = {
    // ...
    val topic = "ODS_BASE_LOG"
    val groupId = "ods_base_log_app"
    val pageTopic = "DWD_PAGE_LOG"

    val sparkConf: SparkConf = new SparkConf().setAppName("ods_base_log_app").setMaster("local[4]")
    val ssc = new StreamingContext(sparkConf, Seconds(5))

    val offsetMap: Map[TopicPartition, Long] = OffsetManager.getOffset(topic, groupId)

    var inputDstream: InputDStream[ConsumerRecord[String, String]] = null
    //如果能够加载已保存的偏移 则通过偏移量加载kafka数据  ，否则从最新的默认位置读取偏移量
    if (offsetMap == null || offsetMap.size == 0) {
      inputDstream = MyKafakUtil.getKafkaStream(topic, ssc, groupId)
    } else {
      inputDstream = MyKafakUtil.getKafkaStream(topic, ssc, offsetMap, groupId)
    }

    // map  每条数据进行转换
    // mapPartitions()   每批次每分区
    // transform 每个rdd =  每批次
    var offsetRanges: Array[OffsetRange] = null //driver
    val inputDstreamWithOffset: DStream[ConsumerRecord[String, String]] = inputDstream.transform { rdd =>
      println("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
      offsetRanges = rdd.asInstanceOf[HasOffsetRanges].offsetRanges //driver ?  executor ?
      rdd
    }

    //inputDstream.map(record=>  record.value() )
    // inputDstream.map(_.value()).print(1000)
    //2 把record转换成一个方便处理的结构化对象  jsonObject
    val jsonObjDstream: DStream[JSONObject] = inputDstreamWithOffset.map { record => //ex
      val jsonString: String = record.value()
      val jsonObj: JSONObject = JSON.parseObject(jsonString)
      jsonObj
    }

    //3  判断每个json的类型 进行拆解、提取关键字段  分发到不同的topic中 kafka
    jsonObjDstream.foreachRDD { rdd =>
      rdd.foreachPartition { jsonObjItr => // 每批次 每分区  executor

        for (jsonObj <- jsonObjItr) { //每条数据的处理
          val ts: lang.Long = jsonObj.getLong("ts")

          val commonObj: JSONObject = jsonObj.getJSONObject("common")
          val mid: String = commonObj.getString("mid")
          val uid: String = commonObj.getString("uid")
          val ar: String = commonObj.getString("ar")
          val ch: String = commonObj.getString("ch")
          val os: String = commonObj.getString("os")
          val md: String = commonObj.getString("md")
          val vc: String = commonObj.getString("vc")
          val isNew: String = commonObj.getString("is_new")

          val pageJsonObj: JSONObject = jsonObj.getJSONObject("page")
          // topic 1  :   发送 页面访问数据   DWD_PAGE_LOG
          if (pageJsonObj != null && pageJsonObj.size() > 0) {
            val pageId = pageJsonObj.getString("page_id")
            val pageItem = pageJsonObj.getString("item")
            val pageItemType = pageJsonObj.getString("item_type")
            val lastPageId = pageJsonObj.getString("last_page_id")
            val duringTime = pageJsonObj.getLong("during_time")
            val pageLog = PageLog(mid, uid, ar, ch, isNew, md, os, vc, pageId, lastPageId, pageItem, pageItemType, duringTime, ts)
            // 发送到kafka
            val pageJsonString: String = JSON.toJSONString(pageLog, new SerializeConfig(true))
            MykafkaSender.send(pageTopic, pageJsonString)
            //a      OffsetManager.saveOffset(topic,groupId,offsetRanges)  //foreach中  executor 错
            //a MykafkaSender.flush()  错  每条发送
          }
        }
        //foreachPartition中  每批次 每分区 ex
        MykafkaSender.flush() //  对

      }

      //b MykafkaSender.flush()  //driver 错
      OffsetManager.saveOffset(topic, groupId, offsetRanges) //foreachRDD中   周期性在driver中执行
    }
    //c     OffsetManager.saveOffset(topic,groupId,offsetRanges) // 方法中   driver   应用启动时 执行一次  错
    //c  MykafkaSender.flush()  错  driver
    //偏移量的保存 要在driver中  提交频率 每批次提交一次
    //MykafkaSender.flush()   driver ? executor ?  executor  发送频率 ： 每批次发送

    //d 以上都不是

    ssc.start()
    ssc.awaitTermination()

  }

}
