package com.atguigu.bigdata.spark.streaming

import java.sql.{Connection, DriverManager, PreparedStatement}

import org.apache.spark.SparkConf
import org.apache.spark.streaming.{Duration, StreamingContext}
import org.apache.spark.streaming.dstream.{DStream, ReceiverInputDStream}

object SprakStreaming13_RDD1 {
  def main(args: Array[String]): Unit = {
    val conf: SparkConf = new SparkConf().setMaster("local[*]").setAppName("StreamWordCount")

    //sparkStreaming程序可能需要业务升级，技术更新，导致采集功能正常关闭
    //关闭时需要调用stop方法，可能调用位置不合适,所以创建一个新的线程去关闭.
    val ssc = new StreamingContext(conf,Duration(3000))


    val socketData: ReceiverInputDStream[String] = ssc.socketTextStream("localhost",9999)

    val words: DStream[String] = socketData.flatMap(_.split(" "))
    val wordToOne: DStream[(String, Int)] = words.map((_,1))
    val wordToCount: DStream[(String, Int)] = wordToOne.reduceByKey(_ + _)

    wordToCount.foreachRDD(
      rdd=>{
        rdd.foreachPartition(
          iter=>{
              //获取数据库连接
              Class.forName("com.mysql.jdbc.Driver")
            val url="jdbc:mysql://hadoop104:3306/spark-sql"
            val user="root"
            val password="000000"
            val conn: Connection = DriverManager.getConnection(url,user,password)
            val statement: PreparedStatement = conn.prepareStatement("insert  into test(word,count) values (?,?)")

            iter.foreach{
              case (word,count)=>{
              statement.setString(1, word)
              statement.setInt(2, count)
              statement.executeUpdate()
              statement.close()
                conn.close()
            }

            }
          }
        )
        rdd.foreach {


          }

        }
      }
    )



    ssc.start()
    ssc.awaitTermination()







  }
}
