package com.shujia.stream

import org.apache.spark.sql.{DataFrame, Dataset, SparkSession}
import org.apache.spark.sql.streaming.{DataStreamReader, OutputMode}

object Demo9Compute {
  def main(args: Array[String]): Unit = {

    val spark: SparkSession = SparkSession
      .builder()
      .master("local[5]")
      .appName("item")
      .config("spark.sql.streaming.checkpointLocation", "/spark/data/checkpoint")
      .getOrCreate()

    import spark.implicits._


    //读取kafka数据

    val kafkaDF: DataFrame = spark
      .readStream
      .format("kafka")
      .option("kafka.bootstrap.servers", "master:9092,node1:9092,node2:9092")
      .option("subscribe", "item")
      .load()


    //获取value
    val lineDS: Dataset[String] = kafkaDF.selectExpr("cast(value as string) as line").as[String]


    //将数据切分出来
    val itemDF: DataFrame = lineDS.map(line => {
      val split: Array[String] = line.split(",")
      val id: String = split(0)
      val time: Long = split(1).toLong
      val pic: Int = split(2).toInt
      (id, time, pic)
    }).toDF("id", "time", "pic")

    ///统计总的交易额

    itemDF.createOrReplaceTempView("item")


    /**
      * 存到kafka里面的数据格式必须是字符串或者二进制
      *
      */

    val sumDF: DataFrame = spark.sql(
      """
        |
        |select cast(sum(pic) as string) as value from item
        |
        |
      """.stripMargin)


    //将总的交易额保存到kafka
    sumDF.writeStream
      .format("kafka")
      .outputMode(OutputMode.Update())
      .option("kafka.bootstrap.servers", "master:9092,node1:9092,node2:9092")
      .option("topic", "sum_pic")
      .start()
      .awaitTermination()


  }
}
