package com.study.spark.scala.structured_streaming

import org.apache.spark.sql.{ForeachWriter, Row}
import java.sql._

/**
  * JDBC Sink
  *
  * @author stephen
  * @create 2019-03-03 10:37
  * @since 1.0.0
  */
class JDBCSink(url: String, username: String, password: String) extends ForeachWriter[Row] {
  val driver = "com.mysql.jdbc.Driver"
  var connection: Connection = _
  var statement: Statement = _

  override def open(partitionId: Long, epochId: Long): Boolean = {
    Class.forName(driver)
    connection = DriverManager.getConnection(url, username, password)
    statement = connection.createStatement()
    true
  }

  override def process(row: Row): Unit = {
    val word = row.getAs[String]("word")
    val count = row.getAs[Long]("count")
    println("word => "+word+", count => "+count)
    val querySql = "SELECT 1 FROM tb_word_count WHERE word = '" + word + "'"
    val updateSql = "UPDATE tb_word_count SET count = " + count + " WHERE word = '" + word + "'"
    val insertSql = "INSERT INTO tb_word_count(word,count) values('" + word + "'," + count + ")"

    // 判断是否已存在
    val resultSet = statement.executeQuery(querySql)
    // 存在则更新
    if (resultSet.next()) {
      statement.executeUpdate(updateSql)
    }
    // 不存在则插入
    else {
      statement.execute(insertSql)
    }
  }

  override def close(errorOrNull: Throwable): Unit = {
    if(statement != null){
      statement.close()
    }
    if(connection!=null){
      connection.close()
    }
  }
}
