package com.fwmagic.spark.streaming.util

import java.sql.{Connection, DriverManager, PreparedStatement, ResultSet}
import java.util
import java.util.Map
import org.apache.kafka.common.TopicPartition
import org.apache.spark.streaming.kafka010.OffsetRange
import redis.clients.jedis.{Jedis, Pipeline}
import scala.collection.mutable.HashMap

object OffsetRangesUtils {
    /**
      * 根据appName和groupId查询topic，partition和offset
      *
      * @param appNameGid
      */
    def queryHistoryOffsetFromMysql(appNameGid: String): collection.Map[TopicPartition, Long] = {

        val offsets: HashMap[TopicPartition, Long] = new HashMap[TopicPartition, Long]()

        val con: Connection = DBUtils.getConnection()
        val ps: PreparedStatement = con.prepareStatement("select topic_partition,offset from t_kafka_offset where app_gid=?")
        ps.setString(1, appNameGid)
        val rs: ResultSet = ps.executeQuery()
        while (rs.next()) {
            val topic_partition: String = rs.getString(1)
            val offset: Long = rs.getLong(2)
            val fields: Array[String] = topic_partition.split("_")

            val topicPartition = new TopicPartition(fields(0), fields(1).toInt)
            offsets.put(topicPartition, offset)
        }
        offsets.toMap
    }

    /**
      * 批量更新：将计算结果写入mysql
      *
      * @param res
      * @param con
      */
    def updateWordCountToMySQL(res: Array[(String, Int)], con: Connection) = {
        //ON DUPLICATE KEY UPDATE：实现有则更新，无则插入的效果
        val ps: PreparedStatement = con.prepareStatement("insert into t_word_counts(word,counts) values (?,?) ON DUPLICATE KEY UPDATE counts=counts+? ")
        for (tp <- res) {
            ps.setString(1, tp._1)
            ps.setLong(2, tp._2)
            ps.setLong(3, tp._2)
            ps.addBatch()
        }
        ps.executeBatch()
    }

    /**
      * 批量更新：将偏移量写到mysql
      *
      * @param appNameGid
      * @param offsetRanges
      * @param con
      */
    def updateOffsetToMySQL(appNameGid: String, offsetRanges: Array[OffsetRange], con: Connection) = {
        val ps: PreparedStatement = con.prepareStatement("insert into t_kafka_offset (app_gid,topic_partition,offset) values (?,?,?) ON DUPLICATE KEY UPDATE offset=?")
        //eg:(wc_gp123, wctest_0) -> 1000
        for (offsetRange <- offsetRanges) {
            ps.setString(1, appNameGid)
            ps.setString(2, offsetRange.topic + "_" + offsetRange.partition)
            ps.setLong(3, offsetRange.untilOffset)
            ps.setLong(4, offsetRange.untilOffset)
            ps.addBatch()
        }
        ps.executeBatch()
    }


    /**
      * 查询redis中记录的历史偏移量
      *
      * @param appNameGid
      * @return
      */
    def queryHistoryOffsetFromRedis(appNameGid: String): collection.Map[TopicPartition, Long] = {
        val offsets: HashMap[TopicPartition, Long] = new HashMap[TopicPartition, Long]()
        var jedis: Jedis = null
        try {
            jedis = JedisConnectionPool.getConnection()
            //选择指定的数据库
            jedis.select(4)

            //获取所有topicAndPartitionAndOffset
            val topicAndPartitionAndOffset: util.Map[String, String] = jedis.hgetAll(appNameGid)

            //导入隐式转换
            import scala.collection.JavaConversions._

            //封装偏移量数据信息到Map中
            for ((topicAndPartition, offset) <- topicAndPartitionAndOffset) {
                val fields: Array[String] = topicAndPartition.split("_")
                val topic: String = fields(0)
                val partition: Int = fields(1).toInt
                val topicPartition: TopicPartition = new TopicPartition(topic, partition)
                offsets.put(topicPartition, offset.toLong)
            }
        } finally {
            if (jedis != null) jedis.close()
        }
        offsets.toMap
    }


    /**
      * 更新计算结果到redis中
      *
      * @param res
      * @param pipeline
      */
    def updateWordCountToRedis(res: Array[(String, Int)], pipeline: Pipeline) = {
        res.foreach(tp => {
            pipeline.hincrBy("redis_wc", tp._1, tp._2)
        })
    }

    /**
      * 更新偏移量到Redis中
      *
      * @param appNameGid
      * @param offsetRanges
      * @param pipeline
      */
    def updateOffsetToRedis(appNameGid: String, offsetRanges: Array[OffsetRange], pipeline: Pipeline) = {
        for (offsetRange <- offsetRanges) {
            val topic: String = offsetRange.topic
            val partition: Int = offsetRange.partition
            val offset: Long = offsetRange.untilOffset
            //key相同，值覆盖
            pipeline.hset(appNameGid, topic + "_" + partition, offset.toString)
        }
    }

    /**
      * 通过phoneix的视图从hbase中获取偏移量
      *
      * @param view    视图名称
      * @param groupId 组id
      * @return
      */
    def queryHistoryOffsetFromHbaseByPhoenix(view: String, groupId: String): collection.Map[TopicPartition, Long] = {
        val offsts: HashMap[TopicPartition, Long] = new HashMap[TopicPartition, Long]()
        Class.forName("org.apache.phoenix.jdbc.PhoenixDriver")
        val connection: Connection = DriverManager.getConnection("jdbc:phoenix:192.168.62.131,192.168.62.132,192.168.62.133:2181")

        val sql = "select \"topic_partition\",max(\"offset\") from  \"myorder\" where \"groupid\" =? group by \"topic_partition\"";
        val ps: PreparedStatement = connection.prepareStatement(sql)
        ps.setString(1, groupId)
        val rs: ResultSet = ps.executeQuery()
        while (rs.next()) {
            val topicPartition: String = rs.getString(1)
            val fields: Array[String] = topicPartition.split("_")
            val topic: String = fields(0)
            val partition: String = fields(1)
            val offset: Long = rs.getLong(2)
            offsts.put(new TopicPartition(topic, partition.toInt), offset)
        }
        offsts.toMap
    }

}
