package xubo.wangcaifeng.love2

import kafka.common.TopicAndPartition
import org.apache.spark.streaming.kafka.OffsetRange
import scalikejdbc.{DB, SQL}
import scalikejdbc.config.DBs

object OffsetUtils {
  //加载数据库配置信息
  DBs.setupAll()
  /**
    * 获取数据库中的偏移量
    */
  def apply(gId : String): Map[TopicAndPartition,Long] = {
    DB.readOnly(implicit session =>
      SQL(s"select * from move where groupid = ?")
        .bind(gId)
        .map(rs => (TopicAndPartition(rs.string("topic"),rs.int("partition")),rs.long("offerset")))
        .list()
        .apply()
    ).toMap
  }

  /**
    * 将每个批次的RDD的偏移量存储到Mysql中
    */
  def apply(offsetRanges:Array[OffsetRange],gId:String) = {
    //如果第一个分区插入成功了，第二分区插不进去了，然后就没法保证一致性，所以本地开启一个事物
    DB.localTx(implicit session =>
      offsetRanges.foreach(or => {
        SQL("replace into move(groupid,topic,partition,offerset) values(?,?,?,?)")
          .bind(gId,or.topic,or.partition,or.untilOffset)
          .update()
          .apply()
      })
    )
  }


}
