package com.bigdata.spark.testapp

import com.alibaba.fastjson.JSONObject
import com.alibaba.fastjson.JSON

import java.lang
import java.text.SimpleDateFormat
import java.util.Date


import com.bigdata.spark.util.MyKafkaUtil
import org.apache.kafka.clients.consumer.ConsumerRecord
import org.apache.spark.SparkConf
import org.apache.spark.streaming.dstream.{DStream, InputDStream}
import org.apache.spark.streaming.{Seconds, StreamingContext}

//从kafka中获得了json对象。

object streamingapp_json {
  def main(args: Array[String]): Unit = {
    val conf: SparkConf = new SparkConf().setMaster("local[4]").setAppName("app1")
    val ssc = new StreamingContext(conf, Seconds(5))

    var topic:String = "gmall_start_0523"

    var groupId = "gmall_dau_0523"

    //返回Kafka离散化流对象。
    val recordDstream: InputDStream[ConsumerRecord[String, String]] = MyKafkaUtil.getKafkaStream(topic, ssc, groupId)
    //返回值是Kafka的ConsumerRecord[String, String]类，是一个键值对。


    val jsonObjDStream: DStream[JSONObject] = recordDstream.map { record =>

      //获取启动日志
      val jsonStr: String = record.value()
      //将启动日志转换为 json 对象
      val jsonObj: JSONObject = JSON.parseObject(jsonStr)
      //获取时间戳 毫秒数
      val ts: lang.Long = jsonObj.getLong("ts")

//      //获取字符串 日期 小时

      val dateHourString: String = new SimpleDateFormat("yyyy-MM-dd HH").format(new Date(ts))



        //对字符串日期和小时进行分割，分割后放到 json 对象中，方便后续处理
        val dateHour: Array[String] = dateHourString.split(" ")
        jsonObj.put("dt",dateHour(0))
        jsonObj.put("hr",dateHour(1))
      println(jsonObj)
        jsonObj
        }

    jsonObjDStream.print()

    //启动 SparkStreamingContext
    ssc.start()
    ssc.awaitTermination()

  }
}
