package com.atguigu.day08

import java.sql.{Connection, DriverManager, PreparedStatement}

import org.apache.spark.SparkConf
import org.apache.spark.streaming.{Seconds, StreamingContext}

object $07_Output {

  def main(args: Array[String]): Unit = {

    val ssc = new StreamingContext(new SparkConf().setMaster("local[4]").setAppName("test"),Seconds(5))

    val ds = ssc.socketTextStream("hadoop102",9999)

    ds.flatMap(line=>line.split(" ")).map((_,1)).reduceByKey(_+_).foreachRDD(rdd=>{

        rdd.foreachPartition( it=>{

        var connection:Connection = null
        var statement:PreparedStatement = null
        try{

          connection = DriverManager.getConnection("jdbc:mysql://hadoop102:3306/test","root","root123")
          statement = connection.prepareStatement("insert into wc values(?,?) ON DUPLICATE KEY UPDATE num = num+?")
          var i = 0
          it.foreach(x=>{
            i=i+1

            statement.setString(1,x._1)
            statement.setInt(2,x._2)
            statement.setInt(3,x._2)
            statement.addBatch()
            if(i%200==0){
              statement.executeBatch()
              statement.clearBatch()
            }
          })

          statement.executeBatch()

        }catch {
          case e:Exception => e.printStackTrace()
        }finally {
          if(statement!=null)
              statement.close()
          if(connection!=null)
            connection.close()
        }

      } )

    })

    ssc.start()

    ssc.awaitTermination()
  }
}
