

import com.typesafe.scalalogging.Logger
import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.functions._
import org.apache.spark.sql.streaming.OutputMode
import org.apache.spark.sql.types._

/**
  *
  * @author Abyss
  * @date 2019/9/01
  * @description
  */
object KafkaToStructuredStreaming2 {


  private val logger = Logger(this.getClass)

  def main(args: Array[String]): Unit = {
    System.setProperty("HADOOP_USER_NAME", "hadoop")
    println("hello world")
    val spark = getSparkSession

    import spark.implicits._


    //    val schema = SocSchemaCollection.getSchemaBySourceName(sourceName) //从数据库加载json schema
    val df = spark
      .readStream
      .format("kafka")
      .option("kafka.bootstrap.servers", "192.168.0.185:9092")
      .option("subscribe", "mq_cj_data_1")
      .option("startingOffsets", "earliest")
      .load()


    val schema = new StructType()
      .add("dataPointId", IntegerType, true)
      .add("dataSource", IntegerType, true)
      .add("dataType", IntegerType, true)
      .add("dataTime", LongType, true)
      .add("data",MapType(StringType,StringType))

      val parsed = df.select(from_json(col("value").cast("string"), schema).alias("parsed_value"))
      val data = parsed
        .select($"parsed_value.dataPointId",$"parsed_value.data",explode($"parsed_value.data")).toDF("dataPointId","data","key","value")
          .groupBy("key").agg(("value","sum"))
    data.printSchema()

    val console = data.writeStream
      .format("console")
      .outputMode(OutputMode.Complete())

    val query = console.start()

    query.awaitTermination()
  }

  /**
    * 创建SparkSession
    *
    * @return
    */
  private def getSparkSession = {
    val spark = SparkSession.builder().appName(getClass.getName).master("local[*]").getOrCreate()
    spark
  }


}

