package com.offcn.bigdata.spark.p1.p2

import java.sql.DriverManager

import com.mysql.jdbc.Driver
import com.offcn.bigdata.spark.p2._02CombineByKeyOps._
import com.offcn.bigdata.util.db.ConnectionPool
import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}

/**
  * foreachPartition相对于foreach，就相当于mapPartition相对于map算子
  * create database if not exists test;
  * use test;
  * CREATE TABLE `words` (
      `word` varchar(20) DEFAULT NULL,
      `count` int(11) DEFAULT NULL
    ) ENGINE=InnoDB DEFAULT CHARSET=utf8;
  */
object _05ForeachPartitionOps {
    def main(args: Array[String]): Unit = {
        val conf = new SparkConf()
            .setMaster("local[*]")
            .setAppName(s"${_05ForeachPartitionOps.getClass.getSimpleName}")
        val sc = new SparkContext(conf)
        val infos = sc.parallelize(List(
            Student("郭雪磊", 18, "山东"),
            Student("单 松", 20, "山东"),
            Student("刘宇航", 18, "河北"),
            Student("王健", 18, "河南"),
            Student("许迎港", 18, "河北"),
            Student("元永劫", 18, "黑龙江"),
            Student("林博", 18, "黑龙江"),
            Student("李佳奥", 18, "河南"),
            Student("冯世明", 18, "黑龙江"),
            Student("肖楚轩", 18, "山东"),
            Student("张皓", 18, "河南"),
            Student("冯岩", 18, "黑龙江")
        ), 2)

        val p2Info = infos.mapPartitionsWithIndex((index, it) => {
            val list = it.toList
            println(s"分区编号{${index}}中的数据为： ${list}")
            list.map(stu => (stu.province, 1)).toIterator
        })
        val p2count:RDD[(String, Int)] = p2Info.reduceByKey(_+_)
        //统计结果进入数据库
//        错误的入库操作(p2count)
//        正确的入库操作(p2count)
//        升级后的入库操作(p2count)
//        升级后的入库操作2(p2count)
        升级后的入库操作3(p2count)
        sc.stop()
    }

    /**
      * 用数据库连接池来进行优化
      */
    def 升级后的入库操作3(p2count: RDD[(String, Int)]) {
        p2count.foreachPartition(partition => {
            val connection = ConnectionPool.getConnection()
            val sql = "insert into words(word, count) values(?, ?)"
            val ps = connection.prepareStatement(sql)

            partition.foreach{case (word, count) => {
                ps.setString(1, word)
                ps.setInt(2, count)
                ps.addBatch()
            }}
            ps.executeBatch()
            ps.close()
            ConnectionPool.release(connection)
        })
    }

    /**
      * 我们发现之前的sql入库操作都是一样的，所以我们可以用批量操作来进行优化
      */
    def 升级后的入库操作2(p2count: RDD[(String, Int)]) {
        p2count.foreachPartition(partition => {
            classOf[Driver]
            val connection = DriverManager.getConnection(
                "jdbc:mysql://localhost:3306/test",
                "root", "sorry"
            )
            val sql = "insert into words(word, count) values(?, ?)"
            val ps = connection.prepareStatement(sql)

            partition.foreach{case (word, count) => {
                ps.setString(1, word)
                ps.setInt(2, count)
                ps.addBatch()
            }}
            ps.executeBatch()
            ps.close()
            connection.close()
        })
    }

    /**
      * 因为foreach操作，每一条记录都要创建一次sql链接，效率太低，所以考虑使用foreachPartition来优化
      * 每一个partition创建一次链接
      * @param p2count
      */
    def 升级后的入库操作(p2count: RDD[(String, Int)]) {
        p2count.foreachPartition(partition => {
            classOf[Driver]
            val connection = DriverManager.getConnection(
                "jdbc:mysql://localhost:3306/test",
                "root", "sorry"
            )
            val sql = "insert into words(word, count) values(?, ?)"
            val ps = connection.prepareStatement(sql)

            partition.foreach{case (word, count) => {
                ps.setString(1, word)
                ps.setInt(2, count)
                ps.execute()
            }}

            ps.close()
            connection.close()
        })
    }

    /**
      * 但是这么玩，就被经理开除了，效率太低了
      * @param p2count
      */
    def 正确的入库操作(p2count: RDD[(String, Int)]) {
        p2count.foreach{case (word, count) => {
            classOf[Driver]
            val connection = DriverManager.getConnection(
                "jdbc:mysql://localhost:3306/test",
                "root", "sorry"
            )
            val sql = "insert into words(word, count) values(?, ?)"
            val ps = connection.prepareStatement(sql)
            ps.setString(1, word)
            ps.setInt(2, count)
            ps.execute()
            ps.close()
            connection.close()
        }}
    }

    /**
      * 错误，因为在executor上main调用了在driver上面创建的变量，而这些变量并没有序列化，进而报错
      * @param p2count
      */
    def 错误的入库操作(p2count: RDD[(String, Int)]) {
        classOf[Driver]
        val connection = DriverManager.getConnection(
            "jdbc:mysql://localhost:3306/test",
            "root", "sorry"
        )
        val sql = "insert into words(word, count) values(?, ?)"
        val ps = connection.prepareStatement(sql)
        p2count.foreach{case (word, count) => {

            ps.setString(1, word)
            ps.setInt(2, count)
            ps.execute()
        }}

        ps.close()
        connection.close()
    }
}
