package org.niit.spark

import java.util.{HashMap, Properties}
import org.apache.spark.SparkConf
import org.apache.spark.streaming.{Seconds, StreamingContext}
import org.apache.kafka.common.serialization.StringDeserializer
import org.apache.spark.sql.SQLContext
import org.apache.spark.streaming.dstream.DStream
import org.apache.spark.streaming.kafka010.LocationStrategies.PreferConsistent
import org.apache.spark.streaming.kafka010.{ConsumerStrategies, KafkaUtils}
import org.niit.spark.connUil.{kafkaConn, mysqlConn}

object demo {

//  creatTopic();
  case class goodss(name: String, number: Int)
  case class updown(status: String, number: Int)
  case class categories (name: String, number: Int)
  case class supermarkets (name:String, status: String, number: Int)

  def main(args: Array[String]): Unit = {


    val conf = new SparkConf().setMaster("local[*]").setAppName("streamingKafka")
    val streamingCon = new StreamingContext(conf, Seconds(6))

    val sqlCon = new SQLContext(streamingCon.sparkContext)

    val topic =  "supermarket"
    val group = "niit112"

    streamingCon.sparkContext.setLogLevel("error")
    val  kafkaParams = Map[String, Object](
      "bootstrap.servers" -> "niit202134070927:9092",
      "key.deserializer" -> classOf[StringDeserializer],
      "value.deserializer" -> classOf[StringDeserializer],
      "group.id" -> group,
      "auto.offset.reset" -> "earliest",
      "enable.auto.commit" -> (false: java.lang.Boolean)
    )
    // 记录 元数据  ，消费位置信息
    streamingCon.checkpoint("./checkpoint")
    val topics = Array(topic)
    // 连接 kafka 配置项
    val streamRdd = KafkaUtils.createDirectStream(
      streamingCon,
      PreferConsistent,
      ConsumerStrategies.Subscribe[String, String](topics, kafkaParams)
    )

    val line = streamRdd.map(_.value())

        val counts = line.flatMap(line => line.split("\\s+")).filter(word => word == "U" || word == "D")

        val result = counts.map(x => (x, 1)).updateStateByKey(updateFunc)

    line.foreachRDD(
      x => {

        import sqlCon.implicits._
        val mysqlconn = new mysqlConn
        //val kafkaconn = new kafkaConn
//      功能一：实时展示所有上架和下架各自的数量----------------------------------------------------
        val coun1 = x.map(line => {
          val split = line.split("\t")
          val status = split(5)
          val num1 = if (status == "U" || status == "D") 1 else 0
          (status, num1)
        })
        coun1.foreach(x => {
          val status = x._1
          val num1 = x._2
          updown(status, num1)
          val updown_k = status + "," + num1
          //println(updown_k)

          //这里不可以在外部创建对象进行调用， 会导致无法序列化报错
          // 直接new这个类将不可定义化的类定义到闭包内
          new kafkaConn().kafkaconnect("updown",updown_k)
        })
        val counDataFrame = coun1.toDF()
        counDataFrame.show()
        mysqlconn.mysqlconn(counDataFrame, "updown_Info")


//      功能二：实时展示各个货品号 各自的数量-------------------------------------------------------
        val coun2 = x.map(line => {
          val split = line.split("\t")
          val goods = split(1)
          val num2 = if (goods == "apple" || goods == "banana" || goods == "orange" || goods == "phone" || goods == "ipad" || goods == "pc") 1 else 0
          (goods, num2)
        })

        coun2.foreach(x1 => {
          val goods = x1._1
          val num2 = x1._2
          goodss(goods, num2)

          //传值到kafka
          val goodss_k = goods + "," + num2
          new kafkaConn().kafkaconnect("goods", goodss_k)
        })
          //传值到mysql
        val counDataFrame2 = coun2.toDF()
        counDataFrame2.show()
        mysqlconn.mysqlconn(counDataFrame2, "goods_Info")

//        功能三：实时展示各个类别的物品数量---------------------------------------------------------------
        val coun3 = x.map(line => {
        val split = line.split("\t")
        val category = split(2)
        val num3 = if (category == "fruit" || category == "electronics") 1 else 0
        (category, num3)
})

        coun3.foreach(x => {
          val category = x._1
          val num3 = x._2
          categories(category, num3)

          val categories_k = category + "," + num3
          new kafkaConn().kafkaconnect("category", categories_k)
        })

        val counDataFrame3 = coun3.toDF()
        counDataFrame3.show()
        mysqlconn.mysqlconn(counDataFrame3, "category_Info")

//        功能四、五：各个货品号各个物品上架和下架的数量--------------------------------------------------------
        val coun4 = x.map(line => {
          val split = line.split("\t")
          val category2 = split(1)
          val status2 = split(5)
          val num4 = if ((category2 == "apple" || category2 == "banana" || category2 == "orange" || category2 == "phone" || category2 == "ipad" || category2 == "pc")||(status2 == "U" || status2 == "D")) 1 else 0
          (category2, status2, num4)
        })//.updateStateByKey(updateFunc)

        coun4.foreach(x => {
          val category2 = x._1
          val status2 = x._2
          val num4 = x._3
          supermarkets(category2, status2, num4)

          val supermarket_k = category2 + "," + status2 + "," + num4
         new kafkaConn().kafkaconnect("cs", supermarket_k)
        })

        val counDataFrame4 = coun4.toDF()
        counDataFrame4.show()
        mysqlconn.mysqlconn(counDataFrame4, "final_Info")


      }
    )


    streamingCon.start()
    streamingCon.awaitTermination()

  }


    def updateFunc(newValues: Seq[Int], oldStatus: Option[Int]): Option[Int] = {
      val newValue: Int = newValues.sum
      val od: Int = oldStatus.getOrElse(0)
      Some(newValue)
    }





}


