package sparkstreaming.lesson07

import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.streaming.{Seconds, StreamingContext}
import org.apache.spark.streaming.dstream.ReceiverInputDStream
import sparkstreaming.demo.lesson01.ConnectionPool

/**
  * Created by Administrator on 2018/5/12.
  */
object OutoutTest {

  def main(args: Array[String]): Unit = {
    val conf = new SparkConf().setMaster("local[2]").setAppName("OutoutTest")
    val sc = new SparkContext(conf)
    val ssc = new StreamingContext(sc,Seconds(2))
    ssc.checkpoint("hdfs://hadoop1:9000/streamingcheckpoint3")
    /**
      * 数据的输入
      */
    val dstream: ReceiverInputDStream[String] = ssc.socketTextStream("hadoop1",9999)


    val wordCountDStream = dstream.flatMap(_.split(","))
      .map((_, 1))
      .updateStateByKey((values: Seq[Int], state: Option[Int]) => {
        val currentCount = values.sum
        val lastCount = state.getOrElse(0)
        Some(currentCount + lastCount)
      })


    /*
      数据的输出
     */
    wordCountDStream.foreachRDD( rdd=>{
      rdd.foreachPartition( paritition =>{
         val connection = ConnectionPool.getConnection()
        val statement = connection.createStatement()
        paritition.foreach{
          case (word,count) =>{
              val sql=s"insert into aura.1711wordcount values(now(),'$word',$count)"
              print(sql)
              statement.execute(sql)
          }
        }
        ConnectionPool.returnConnection(connection)
      } )
    })

    ssc.start()
    ssc.awaitTermination()
    ssc.stop()
  }

}
