package com.pw.study.realtime.handle

import com.alibaba.druid.pool.DruidDataSourceFactory
import com.pw.study.realtime.utils.MyUtils
import org.apache.kafka.common.TopicPartition
import org.apache.spark.streaming.kafka010.OffsetRange

import java.sql.{Connection, PreparedStatement}
import java.util.Properties
import javax.sql.DataSource
import scala.collection.mutable

object MysqlHandler {
  private val properties: Properties = MyUtils.load("db.properties")
  private val dataSource: DataSource = init()

  def init(): DataSource = {

    val paramMap = new java.util.HashMap[String, String]()
    paramMap.put("driverClassName", properties.getProperty("jdbc.driver.name"))
    paramMap.put("url", properties.getProperty("jdbc.url"))
    paramMap.put("username", properties.getProperty("jdbc.user"))
    paramMap.put("password", properties.getProperty("jdbc.password"))
    paramMap.put("maxActive", properties.getProperty("jdbc.datasource.size"))
    DruidDataSourceFactory.createDataSource(paramMap)
  }

  /**
   * 提交消费offset到mysql
   */
  def endOffset(results: Array[((String, String), Double)], ranges: Array[OffsetRange], topic: String, group: String) = {
    val sql_data =
      """
        |insert into gmvstats(date,hour,gmv) VALUES(?,?,?) ON DUPLICATE key update gmv=gmv+values(gmv)
        |""".stripMargin

    val sql_offset =
      """
        |insert into offsets(group_id,topic,`PARTITION`,OFFSET) VALUES(?,?,?,?) on duplicate key update offset=offset+VALUES(offset)
        |""".stripMargin

    var conn: Connection = null
    var statement_data: PreparedStatement = null
    var statement_offset: PreparedStatement = null
    try {
      conn = getConnection()
      //取消自动提交
      conn.setAutoCommit(false)
      statement_data = conn.prepareStatement(sql_data)
      statement_offset = conn.prepareStatement(sql_offset)
      //为sql赋值
      for (((day, hour), gmv) <- results) {
        statement_data.setString(1, day)
        statement_data.setString(2, hour)
        statement_data.setDouble(3, gmv)
        statement_data.addBatch()
      }
      for (offsetRange <- ranges) {
        println(offsetRange)
        statement_offset.setString(1, group)
        statement_offset.setString(2, topic)
        statement_offset.setInt(3, offsetRange.partition)
        statement_offset.setLong(4, offsetRange.untilOffset)
        statement_offset.addBatch()
      }
      //执行操作
      val result_data = statement_data.executeBatch()
      val result_offset = statement_offset.executeBatch()
      //提交事务
      conn.commit()
      println("开始写入每条记录。。。。。。。。。。。。。。。。。。。")
      println(s"word：${result_data.size}.........")
      println(s"offset ：${result_offset.size}.........")

    } catch {
      case e: Exception => {
        e.printStackTrace()
        throw new RuntimeException("查询偏移量失败。。。。")
      }
    }
    finally {
      if (statement_data != null) {
        statement_data.close()
      }
      if (statement_offset != null) {
        statement_offset.close()
      }
      if (conn != null) {
        conn.close()
      }
    }

  }

  def main(args: Array[String]): Unit = {
    println(getConnection())

  }

  def getConnection(): Connection = {
    dataSource.getConnection
  }

  /**
   * 获取Mysql当前状态
   */
  def startOffset(topic: String, groupId: String): Map[TopicPartition, Long] = {
    val offsets = new mutable.HashMap[TopicPartition, Long]()
    val sql =
      """
        |SELECT `partition`,offset FROM offsets where topic=? and group_id=?
        |""".stripMargin

    var conn: Connection = null
    var statement: PreparedStatement = null
    try {
      conn = getConnection()
      statement = conn.prepareStatement(sql)
      statement.setString(1, topic)
      statement.setString(2, groupId)
      val set = statement.executeQuery()
      while (set.next()) {
        val partition = set.getInt("partition")
        val offset = set.getLong("offset")
        println(s"partition:${
          partition
        } Offset:${
          offset
        }")
        offsets.put(new TopicPartition(topic, partition), offset)
      }
    } catch {
      case e: Exception => {
        e.printStackTrace()
        throw new RuntimeException("查询偏移量失败。。。。")
      }
    } finally {
      if (statement != null) {
        statement.close()
      }
      if (conn != null) {
        conn.close()
      }
    }
    offsets.toMap
  }
}
