package com.shujia.stream

import org.apache.spark.sql.SQLContext
import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.streaming.{Durations, StreamingContext}

object Demo4OnSql {
  def main(args: Array[String]): Unit = {

    val conf = new SparkConf()
      .setAppName("stream")
      .setMaster("local[2]") //指定两个线程
      .set("spark.sql.shuffle.partitions", "4")
    val sc = new SparkContext(conf)

    //创建spark sql上下文对象
    val sqlContext = new SQLContext(sc)

    import sqlContext.implicits._

    //创建spark streaming上下文对象   指定间隔时间
    val ssc = new StreamingContext(sc, Durations.seconds(5))

    //保存之前的状态
    ssc.checkpoint("data/onsql")

    //连接socket创建ds
    val ds = ssc.socketTextStream("192.168.129.101", 7777)

    /**
      * foreachRDD  将ds转换成rdd   不是一个算子  不会触发job执行
      * 注意：在foreachRDD 内部要有一个action算子
      *
      * ds ----> rdd -----> DF ----> 注册成一张表
      *
      * rdd  <---->  DF
      * ds  ----> rdd
      *
      *
      */

    ds.foreachRDD(rdd => {

      //一个batch的数据
      val stuDF = rdd.map(line => {
        val split = line.split(",")
        val id = split(0)
        val name = split(1)
        val age = split(2).toInt
        val gender = split(3)
        val clazz = split(4)

        (id, name, age, gender, clazz)
      })
        //将一个类型为元祖的RDD转换成DF  需要指定列名
        .toDF("id", "name", "age", "gender", "clazz")

      stuDF.registerTempTable("student")


      sqlContext
        .sql("select clazz,count(1) from student group by clazz")
      //.show()

    })

    /**
      * transform 将DS转换成一个rdd  最后需要返回一个新的RDD
      *
      */

    val countDS = ds.transform(rdd => {
      //一个batch的数据
      val stuDF = rdd.map(line => {
        val split = line.split(",")
        val id = split(0)
        val name = split(1)
        val age = split(2).toInt
        val gender = split(3)
        val clazz = split(4)

        (id, name, age, gender, clazz)
      })
        //将一个类型为元祖的RDD转换成DF  需要指定列名
        .toDF("id", "name", "age", "gender", "clazz")

      stuDF.registerTempTable("student")


      val countDF = sqlContext
        .sql("select clazz,count(1) as c from student group by clazz")

      //将DF转换成一个RDD
      val countRDD = countDF.rdd.map(row => {
        val clazz = row.getAs[String]("clazz")
        val count = row.getAs[Long]("c")
        (clazz, count)
      })

      countRDD
    })

    val updataFun = (seq: Seq[Long], opt: Option[Long]) => {
      //当前batch统计结果
      val currCount = seq.sum

      //之前所有batch的状态   如果没有状态  返回一个默认值
      val last = opt.getOrElse(0L)


      //将当前batch的结果加上之前所有batch的结果，得到总的结果
      val sum = currCount + last

      //返回一个更新之后的状态
      Option(sum)
    }

    val updatads = countDS
      .updateStateByKey(updataFun)


    //间隔数据写hdfs  后缀默认是时时间戳
    updatads.saveAsTextFiles("data/ds/out")

    ssc.start()
    ssc.awaitTermination()
    ssc.stop()


  }
}
