package com.bw.month.streaming

import java.util.Properties

import groovy.sql.DataSet
import org.apache.kafka.clients.consumer.{ConsumerConfig, ConsumerRecord, KafkaConsumer}
import org.apache.kafka.clients.producer.KafkaProducer
import org.apache.kafka.common.TopicPartition
import org.apache.spark.SparkContext
import org.apache.spark.sql.{DataFrame, SaveMode, SparkSession}
import org.apache.spark.streaming.StreamingContext
import org.apache.spark.streaming.dstream.InputDStream
import org.apache.spark.streaming.kafka010.{ConsumerStrategies, KafkaUtils, LocationStrategies, OffsetRange}
import redis.clients.jedis.Jedis

import scala.collection.{JavaConversions, mutable}
import scala.collection.immutable.HashMap

object SparkUtils {

  //mysql读数据
  def readMysql(sparkSession: SparkSession, tableName: String): DataFrame = {
    val dataFrame = sparkSession.read.format("jdbc")
      .option("user", "root")
      .option("password", "root")
      .option("url", "jdbc:mysql://hadoop100:3306/1903A?characterEncoding=utf-8")
      .option("driver", "com.mysql.jdbc.Driver")
      .option("dbtable", tableName)
      .load()
    dataFrame
  }

  //mysql写数据
  def dfwriteMysql(df: DataFrame, tableName: String): Unit = {
    df.write.format("jdbc")
      .option("user", "root")
      .option("password", "root")
      .option("url", "jdbc:mysql://hadoop100:3306/1903A?characterEncoding=utf-8")
      .option("driver", "com.mysql.jdbc.Driver")
      .option("dbtable", tableName)
      .mode(SaveMode.Append)
      .save()
  }


  //创建kafka生产者
  def createKafkaProducer(): KafkaProducer[String, String] = {
    val prop = new Properties
    // 指定请求的kafka集群列表
    prop.put("bootstrap.servers", "hadoop100:9092") // 指定响应方式
    //prop.put("acks", "0")
    //prop.put("acks", "all")
    // 指定key的序列化方式, key是用于存放数据对应的offset
    prop.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer")
    // 指定value的序列化方式
    prop.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer")
    // 配置超时时间
    prop.put("request.timeout.ms", "60000")
    // 得到生产者的实例
    val producer = new KafkaProducer[String, String](prop)
    producer
  }

  //kafka读数据
  def kafkaRead(streamingContext: StreamingContext, topics: String): InputDStream[ConsumerRecord[String, String]] = {
    val kafkaParam: Map[String, Object] = Map[String, Object](
      ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG -> "hadoop100:9092",
      ConsumerConfig.GROUP_ID_CONFIG -> "hadoop100",
      "key.deserializer" -> "org.apache.kafka.common.serialization.StringDeserializer",
      "value.deserializer" -> "org.apache.kafka.common.serialization.StringDeserializer",
      //sparkStreaming checkpoint自己管理Offset
      "auto.offset.reset" -> "latest",
      "enable.auto.commit" -> (false: java.lang.Boolean)
    )

    val inputDStream: InputDStream[ConsumerRecord[String, String]] =
      KafkaUtils.createDirectStream[String, String](streamingContext,
        LocationStrategies.PreferConsistent,
        ConsumerStrategies.Subscribe[String, String](Set(topics), kafkaParam)
      )
    inputDStream
  }


  //kafka读数据 使用偏移量
  def kafkaReadOffset(streamingContext: StreamingContext,
                      topic: String,
                      map:collection.Map[TopicPartition, Long]
                     ): InputDStream[ConsumerRecord[String, String]] = {
    val kafkaParam: Map[String, Object] = Map[String, Object](
      ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG -> "hadoop100:9092",
      ConsumerConfig.GROUP_ID_CONFIG -> "hadoop100",
      "key.deserializer" -> "org.apache.kafka.common.serialization.StringDeserializer",
      "value.deserializer" -> "org.apache.kafka.common.serialization.StringDeserializer"
    )

    val inputDStream: InputDStream[ConsumerRecord[String, String]] =
      KafkaUtils.createDirectStream[String, String](streamingContext,
        LocationStrategies.PreferConsistent,
        ConsumerStrategies.Subscribe[String, String](Set(topic), kafkaParam, map)
      )
    inputDStream
  }


  /**
   * 从Redis中获取偏移量
   *
   * @param groupid
   * @param topic
   * @return
   */
  def getOffsetFromRedis(groupid: String, topic: String): Map[TopicPartition, Long] = {
    val jedis = new Jedis("bw")
    var offsets = mutable.Map[TopicPartition, Long]()
    val key = s"${topic}_${groupid}"
    val fields : java.util.Map[String, String] = jedis.hgetAll(key)
    for (partition <- JavaConversions.mapAsScalaMap(fields)) {
      offsets.put(new TopicPartition(topic, partition._1.toInt), partition._2.toLong)
    }
    offsets.toMap
  }
  //将数据存到redis中
  def saveOffsetToRedis(topic:String,group:String,part:Int,offset:Long): Unit ={
    val jedis = new Jedis("bw")
    jedis.hset(s"${topic}_${group}", s"${part}", s"${offset}")
  }
}
