package com.alison.sink

import org.apache.spark.sql.streaming.StreamingQuery
import org.apache.spark.sql.{DataFrame, ForeachWriter, Row, SparkSession}

import java.sql.{Connection, DriverManager, PreparedStatement}
import java.util.Properties

object E4_sink_jdbc {

  def main(args: Array[String]): Unit = {
//    out_jdbc
    out_jdbc_batch
  }

  def out_jdbc() = {
    // 创建 SparkSession
    val spark = SparkSession.builder()
      .appName("Hello")
      .master("local[*]")
      .getOrCreate()

    // 导入隐式转换
    import spark.implicits._

    // 创建一个流式DataFrame，这里从Socket读取数据
    val lines = spark.readStream
      .format("socket")
      .option("host", "192.168.56.104")
      .option("port", 9999)
      .load()

    // 单词统计
    val wordCount: DataFrame = lines.as[String]
      .flatMap(_.split("\\W+"))
      .groupBy("value")
      .count()

    // 启动查询, 把结果输出至MySQL
    val query: StreamingQuery = wordCount.writeStream
      .outputMode("update")
      // 使用 foreach 的时候, 需要传递ForeachWriter实例, 三个抽象方法需要实现.
      // 每个批次的所有分区都会创建 ForeeachWriter 实例
      .foreach(new ForeachWriter[Row] {
        var conn: Connection = _
        var ps: PreparedStatement = _
        var batchCount = 0

        // 一般用于 打开链接. 返回 false 表示跳过该分区的数据,
        override def open(partitionId: Long, epochId: Long): Boolean = {
//          println("open ..." + partitionId + "  " + epochId)
          Class.forName("com.mysql.cj.jdbc.Driver")
          conn = DriverManager.getConnection("jdbc:mysql://localhost:3306/spark", "root", "root")
          // 插入数据, 当有重复的 key 的时候更新
          val sql = "insert into word_count values(?, ?) " +
            "on duplicate key update word=?, count=?"
          ps = conn.prepareStatement(sql)

          conn != null && !conn.isClosed && ps != null
        }

        // 把数据写入到连接
        override def process(value: Row): Unit = {
          println("process ...." + value)
          val word: String = value.getString(0)
          val count: Long = value.getLong(1)
          ps.setString(1, word)
          ps.setLong(2, count)
          ps.setString(3, word)
          ps.setLong(4, count)
          ps.execute()
        }

        // 用户关闭连接
        override def close(errorOrNull: Throwable): Unit = {
          println("close...")
          ps.close()
          conn.close()
        }
      })
      .start()

    // 等待应用程序终止
    query.awaitTermination()

    //关闭 Spark
    spark.stop()
  }

  def out_jdbc_batch()={
    // 创建 SparkSession
    val spark = SparkSession.builder()
      .appName("Hello")
      .master("local[*]")
      .getOrCreate()

    // 导入隐式转换
    import spark.implicits._

    // 创建一个流式DataFrame，这里从Socket读取数据
    val lines = spark.readStream
      .format("socket")
      .option("host", "192.168.56.104")
      .option("port", 9999)
      .load()

    // 单词统计
    val wordCount: DataFrame = lines.as[String]
      .flatMap(_.split("\\W+"))
      .groupBy("value")
      .count()

    // 启动查询, 把结果输出至MySQL
    // **********************************
    val props = new Properties()
    props.setProperty("user", "root")
    props.setProperty("password", "root")
    val query: StreamingQuery = wordCount.writeStream
      .outputMode("complete")
      .foreachBatch{ (df: DataFrame, batchId: Long) =>   // 当前分区id, 当前批次id
        if (df.count() != 0) {
          df.cache()
          df.write.json(s"./$batchId")
          df.write.mode("overwrite")
            .jdbc("jdbc:mysql://localhost:3306/spark", "word_count", props)
        }
      }
      .start()

    // 等待应用程序终止
    query.awaitTermination()

    //关闭 Spark
    spark.stop()
  }
}
