package com.jinghang.streaming_base

import java.sql.DriverManager

import org.apache.spark.SparkConf
import org.apache.spark.streaming.{Seconds, StreamingContext}

object _13_ForeachRDDApp {

  def main(args: Array[String]): Unit = {

    val sparkConf = new SparkConf().setAppName("ForeachRDDApp").setMaster("local[4]")
    val ssc = new StreamingContext(sparkConf, Seconds(5))
    ssc.sparkContext.setLogLevel("ERROR")


    val ds1 = ssc.socketTextStream("hadoop000", 6789)

    val result = ds1
      .flatMap(_.split(" "))
      .map((_, 1))//(a,1),(a,1)
      .reduceByKey(_ + _)//(a,2)

    //result.print()  //此处仅仅是将统计结果输出到控制台

    //TODO... 将结果写入到MySQL
//    result.foreachRDD(rdd =>{
//          val connection = createConnection()  // executed at the driver
//          rdd.foreach { record =>
//            val sql = "insert into wordcount(word, wordcount) values('"+record._1 + "'," + record._2 +")"
//            connection.createStatement().execute(sql)
//          }
//        })

    result.print()

    //遍历一系列的rdd
    result.foreachRDD(rdd => {

      rdd.foreachPartition(partitionOfRecords => {
        //partition内部的操作和单机编程一样，不用考虑分布式数据集的问题
        val connection = createConnection()

        //record （a,1）,(b,3)
        partitionOfRecords.foreach(record => {
          val sql = "insert into word_count(word, word_count,create_time,update_time) " +
            "values('" + record._1 + "'," + record._2 + ",'" + DateUtils.getCurrentTime()  + "','" + DateUtils.getCurrentTime() + "')"
          connection.createStatement().execute(sql)
        })

        connection.close()

      })
    })


    ssc.start()
    ssc.awaitTermination()
  }


  /**
    * 获取MySQL的连接
    */
  def createConnection() = {
    Class.forName("com.mysql.jdbc.Driver")
    DriverManager.getConnection("jdbc:mysql://hadoop000:3306/sparkStreaming", "root", "root")
  }




}
