package com.zt.bigdata.template.spark.jdbc

import java.sql.Connection
import java.util.concurrent.atomic.AtomicLong

import com.zt.bigdata.spark.common.dto.StreamRecord
import org.apache.spark.internal.Logging
import org.apache.spark.sql.ForeachWriter

abstract class JDBCForeachWriter[E](jdbcUrl: String, username: String, password: String,
                                    connectionTimeout: Long = 30000, idleTimeout: Long = 600000, maxLifetime: Long = 1800000,
                                    maximumPoolSize: Int = 10, autoCommit: Boolean = true)
  extends ForeachWriter[StreamRecord[E]] with Logging with Serializable {

  protected var connection: Connection = _
  // 定义计数器
  protected val counter = new AtomicLong(0)

  protected var partitionId: Long = _

  protected var version: Long = _

  def openFun(connection: Connection, partitionId: Long, version: Long): Boolean = {
    this.partitionId = partitionId
    this.version = version
    true
  }

  def processFun(record: StreamRecord[E], connection: Connection): Unit

  def closeFun(connection: Connection, errorOrNull: Throwable): Unit = {}

  override def open(partitionId: Long, version: Long): Boolean = {
    JDBCTemplate.init(jdbcUrl, username, password, connectionTimeout, idleTimeout, maxLifetime, maximumPoolSize, autoCommit)
    connection = JDBCTemplate.getConnection
    counter.set(0)
    openFun(connection, partitionId, version)
  }

  override def process(value: StreamRecord[E]): Unit = {
    log.trace(s"Record parse :${value.toString}")
    processFun(value, connection)
    counter.incrementAndGet() //自增1
  }

  override def close(errorOrNull: Throwable): Unit = {
    closeFun(connection, errorOrNull)
    connection.close()
    JDBCTemplate.close()
  }

}
