package com.shujia.stream

import org.apache.spark.sql.streaming.OutputMode
import org.apache.spark.sql.{DataFrame, SparkSession}

object Demo5Join {
  def main(args: Array[String]): Unit = {

    val spark: SparkSession = SparkSession.builder()
      .config("spark.sql.shuffle.partitions", 1)
      .master("local[5]")
      .appName("stru")
      .getOrCreate()


    /**
      *
      * 读取socket
      */

    val scoDF: DataFrame = spark
      .readStream
      .format("socket")
      .option("host", "master")
      .option("port", 9999)
      .load()


    import spark.implicits._


    val scoreDF: DataFrame = scoDF
      .as[String]
      .map(line => {
        val split: Array[String] = line.split(",")
        (split(0), split(1), split(2))
      }).toDF("s_id", "c_id", "sco")


    val studentDF: DataFrame = spark.read
      .option("sep", ",")
      .schema("id STRING , name STRING ,age LONG , gender STRING , clazz STRING") //列名和类的类型，按顺序写
      .csv("spark/data/students.txt")

    import org.apache.spark.sql.functions._

    /**
      * 流批统一
      *
      */

    val joinDF: DataFrame = scoreDF.join(broadcast(studentDF), $"s_id" === $"id")


    joinDF
      .writeStream
      .outputMode(OutputMode.Append()) //流模式
      .format("console") //输出到控制台
      .start() //启动一个流
      .awaitTermination() //等待关闭


  }

}
