package com.niit.spark.sparkStreaming

import com.alibaba.fastjson.JSON
import com.niit.utils.{MyConfig, MyKafkaUtils}
import org.apache.kafka.clients.producer.ProducerRecord
import org.apache.spark.SparkConf
import org.apache.spark.streaming.{Seconds, StreamingContext}
import org.json4s.jackson.Json
import org.json4s.{DefaultFormats, JsonAST}
import org.json4s.jackson.JsonMethods.compact

import java.util
import scala.collection.mutable
import scala.collection.mutable.ListBuffer

/**
 * @author 杨铭
 *         2022/11/19,14:54
 */


object sparkStreaming4 {

  case class Order(isCorrect: String, orderCategory: String, orderData: String, orderName: String, orderNum: Int)

  def main(args: Array[String]): Unit = {

//    System.setProperty("hadoop.home.dir", "D:\\ScalaProject\\hadoop-2.7.3")
//    System.setProperty("HADOOP_USER_NAME", "root")

    val groupId = "niit02"
    val topicm = "orders"
    val conf = new SparkConf().setAppName("Spark4").setMaster("local[*]")
    val ssc = new StreamingContext(conf, Seconds(2))

    val streamRdd = MyKafkaUtils.getKafkaDStream(ssc, topicm, groupId)

    val lines = streamRdd.map(_.value())
    val orderDS = lines.filter(i => { //转化为Object并排除一些其他结构的数据
      try {
        JSON.parseObject(i, classOf[Order])
        true
      } catch {
        case ex: Exception => false
      }

    }).map(i => {
      JSON.parseObject(i, classOf[Order])
    })

    val resultData = orderDS.map(i => {
      (i.orderCategory, 1)
    }).foreachRDD(
      x => {
        val lines = x.reduceByKey(_ + _).collect()
        val value = new ListBuffer[(String, Long)]()
        lines.foreach(x => {



          var map = Map(
            "category" -> x._1,
            "num" -> x._2
          )

          val s= Json(DefaultFormats).write(map)
         // println(s)
          MyKafkaUtils.send("task4", s)

        })


      })
     ssc.start()
    ssc.awaitTermination()



 }
}

