package com.bigdata.spark.mallapp_realtime.test

import java.lang

import com.alibaba.fastjson.{JSON, JSONArray, JSONObject}
import com.bigdata.spark.util.{MyKafkaUtil, OffsetManagerUtil}
import org.apache.kafka.clients.consumer.{ConsumerConfig, ConsumerRecord}
import org.apache.kafka.common.TopicPartition
import org.apache.spark.SparkConf
import org.apache.spark.streaming.dstream.{DStream, InputDStream}
import org.apache.spark.streaming.kafka010.{ConsumerStrategies, KafkaUtils, LocationStrategies}
import org.apache.spark.streaming.{Seconds, StreamingContext}

import scala.collection.mutable.ListBuffer

object mall_log_test_app {

  def main(args: Array[String]): Unit = {
    val logtest: SparkConf = new SparkConf().setMaster("local[*]").setAppName("logtest2")
    val ssc = new StreamingContext(logtest, Seconds(3))


    var topic:String = "gmall_event_0523"

    var groupId = "test2"


    val offsetMap: Map[TopicPartition, Long] = OffsetManagerUtil.getOffset(topic, groupId)

    if(offsetMap!=null  && offsetMap.size>0){
      //如果Redis中存在当前消费者组的偏移量
      val recordDstream: InputDStream[ConsumerRecord[String, String]] = MyKafkaUtil.getKafkaStream(topic, ssc, offsetMap, groupId)
    }else{ //如果不存在偏移量
      val recordDStream: InputDStream[ConsumerRecord[String, String]] = MyKafkaUtil.getKafkaStream(topic, ssc, groupId)
    }







    val KafkaPara = Map[String, Object](
      ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG -> "hadoop102:9092,hadoop103:9092,hadoop104:9092",
      ConsumerConfig.GROUP_ID_CONFIG -> "atguigu",
      "key.deserializer" ->
        "org.apache.kafka.common.serialization.StringDeserializer",
      "value.deserializer" ->
        "org.apache.kafka.common.serialization.StringDeserializer"

    )

    val KafkaDS: InputDStream[ConsumerRecord[String, String]] = KafkaUtils.createDirectStream[String, String](ssc,
      LocationStrategies.PreferConsistent,
      ConsumerStrategies.Subscribe[String, String](Set("gmall_event_0523"), KafkaPara))
    //不可以直接对kafkaDS进行print，因为kafkaDS






    val actarr: DStream[(String, String)] = KafkaDS.flatMap(
      kafkadata => {

        val str: String = kafkadata.value()
        val jsonlog: JSONObject = JSON.parseObject(str)
        val common = jsonlog.getJSONObject("common")
        val actions = jsonlog.getJSONArray("actions")
        val displays: JSONArray = jsonlog.getJSONArray("displays")
        val page: JSONObject = jsonlog.getJSONObject("page")
        val err: JSONObject = jsonlog.getJSONObject("err")
        val ts: lang.Long = jsonlog.getLong("ts")

        val actarr = new ListBuffer[(String, String)] //存储动作id，目标id
        //       获取元素（使用括号访问(索引值)）
        //添加元素（+=）
        //追加一个列表（++=）
        //更改元素（使用括号获取元素，然后进行赋值）
        //删除元素（-=）
        //转换为List（toList）
        //转换为Array（toArray）
        //        if(actions!=null){           //统计十分钟内所有动作id，并且
        //
        if (actions!=null&& actions.size()>0) { //对本json中的动作数据遍历。


          for (i <- 0 to actions.size() - 1) {
            actarr.append((actions.getJSONObject(i).getString("action_id"), actions.getJSONObject(i).getString("item")))
            println("动作列表",(actions.getJSONObject(i).getString("action_id"), actions.getJSONObject(i).getString("item")))

          }
        }


        actarr




      }
    )

    val wordToOne: DStream[((String, String), Int)] = actarr.map((_, 1))

//    val actionsid = actarr

    //val windowDS: DStream[(String, Int)] = actionsid.window(Seconds(6), Seconds(6))


    val windowDS: DStream[((String, String),Int)] = wordToOne.window(Seconds(120), Seconds(12))

    val wordToCount = windowDS.reduceByKey(_+_)

    wordToCount.print()

    ssc.start()
    ssc.awaitTermination()
  }
}
