package com.cl.util

import java.sql.{Connection, DriverManager, PreparedStatement, ResultSet}

import org.apache.kafka.common.TopicPartition
import org.apache.spark.streaming.kafka010.OffsetRange

import scala.collection.mutable

/**
 * @Author xc
 * @Date 2023/4/15 16:49
 * @Version 1.0
 */
//针对s_d_p
//暂未使用
object OffsetUtils {
  /**
   * 将消费者组的偏移量信息存入MySQL
   * @param groupID 消费者组名称
   * @param topic 偏移量信息
   * @return
   */
  def getOffsetMap(groupID: String, topic: String): mutable.Map[TopicPartition, Long] = {
    //1.获取连接
    val conn: Connection = DriverManager.getConnection("jdbc:mysql://localhost:3306/ym?characterEncoding=UTF-8","root","cl416416")
    //2.编写SQL
    val sql : String ="select `partition`,`offset` from t_offset where groupid = ? and topic = ? "
    //3.获取ps
    val ps: PreparedStatement = conn.prepareStatement(sql)
    //4.设置参数并执行
    ps.setString(1,groupID)
    ps.setString(2,topic)
    val rs: ResultSet = ps.executeQuery()
    //5.获取返回值并封装成map
    val offsetMap:mutable.Map[TopicPartition, Long]=mutable.Map[TopicPartition, Long]()
    while (rs.next()){
      val partition :Int=rs.getInt("partition")
      val offset:Int=rs.getInt("offset")
      offsetMap+=new TopicPartition(topic,partition)->offset
    }
    //6.关闭资源
    rs.close()
    ps.close()
    conn.close()
    //7.返回map
    offsetMap
  }

  def saveOffsets(groupId: String, offsets: Array[OffsetRange]) = {
    //1.加载驱动并获取连接
    val conn: Connection = DriverManager.getConnection("jdbc:mysql://localhost:3306/ym?characterEncoding=UTF-8","root","cl416416")
    //2.编写SQL//jdbc:mysql://localhost:3306/bigdata
    val sql:String = "replace into t_offset (`topic`,`partition`,`groupid`,`offset`) values(?,?,?,?)"
    //3.创建预编译语句对象
    val ps:PreparedStatement=conn.prepareStatement(sql)
    //4.设置参数执行
    for(o<-offsets){
      ps.setString(1,o.topic)
      ps.setInt(2,o.partition)
      ps.setString(3,groupId)
      ps.setLong(4,o.untilOffset)
      ps.executeUpdate()
    }
    //5.关闭资源
    ps.close()
    conn.close()
  }


}
