package com.shujia.spark.core

import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.rdd.RDD

import java.sql.{Connection, DriverManager, PreparedStatement}

object Demo20ToMysql {
  def main(args: Array[String]): Unit = {
    //1、创建spark的执行环境
    val conf = new SparkConf()
    //设置运行模式
    conf.setMaster("local")
    conf.setAppName("wc")
    val sc = new SparkContext(conf)

    //2、读取数据
    //RDD:弹性的分布式数据集（相当于List）
    val linesRDD: RDD[String] = sc.textFile("data/lines.txt")

    //一行转换多行
    val wordsRDD: RDD[String] = linesRDD.flatMap(_.split(","))

    val kvRD: RDD[(String, Int)] = wordsRDD.map(word => (word, 1))

    //统计单词的数量
    val countRDD: RDD[(String, Int)] = kvRD.reduceByKey((x, y) => x + y)

    val start: Long = System.currentTimeMillis()
    /*   //1 创建数据库连接
       //数据库连接不能在网络中传输
       val con: Connection = DriverManager.getConnection("jdbc:mysql://master:3306/bigdata31", "root", "123456")
       val end: Long = System.currentTimeMillis()
       println(end - start)

       //保存到数据库中,使用JDBC
       countRDD.foreach {
         case (word, count) =>
           //2 编写sql插入数据
           val stat: PreparedStatement = con.prepareStatement("insert into word_count values(?,?)")
           stat.setString(1, word)
           stat.setInt(2, count)
           stat.execute()
       }
       con.close()*/

    //foreachPartition: 训练分区
    countRDD.foreachPartition(iter => {
      //1 创建数据库连接
      //每一个分区创建一个数据库连接
      val con: Connection = DriverManager.getConnection("jdbc:mysql://master:3306/bigdata31", "root", "123456")
      val end: Long = System.currentTimeMillis()
      println(end - start)

      //在分区内循环
      iter.foreach {
        case (word, count) =>
          //2 编写sql插入数据
          val stat: PreparedStatement = con.prepareStatement("insert into word_count values(?,?)")
          stat.setString(1, word)
          stat.setInt(2, count)
          stat.execute()
      }
      con.close()
    })
  }
}
