package com.shujia.spark.streaming

import org.apache.spark.sql.{SQLContext, SaveMode}
import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.streaming.{Durations, StreamingContext}

object Demo4DstreamToRDDToDF {
  def main(args: Array[String]): Unit = {

    val conf = new SparkConf().setMaster("local[2]").setAppName("wc")
    conf.set("spark.sql.shuffle.partitions", "1")

    val sc = new SparkContext(conf)
    val sqlContext = new SQLContext(sc)

    //创建sparkStraming上下文对象,  需要指定创建RDD的间隔时间    batch时间   可以理解为多少秒计算一次
    val ssc = new StreamingContext(sc, Durations.seconds(5))

    val ds = ssc.socketTextStream("node1", 8888)


    /**
      * ds -->   rdd  --->  DF
      *
      */
    ds.foreachRDD(rdd => {

      val stuRDD = rdd.map(line => {
        val split = line.split(",")
        (split(0), split(1), split(2).toInt, split(3), split(4))
      })

      import sqlContext.implicits._

      //rdd转换成df
      val stuDF = stuRDD.toDF("id", "name", "age", "gender", "clazz")

      //注册成表
      stuDF.registerTempTable("student")

      val countDF = sqlContext.sql("select clazz,count(1) from student group by clazz")

      countDF.show()


      //保存到文件
      countDF.write.mode(SaveMode.Append).json("spark/data/dsondf")

    })


    //启动sparkstreaming程序
    ssc.start()
    ssc.awaitTermination()
    ssc.stop()

  }
}
