package fun

import org.apache.kafka.clients.producer.{KafkaProducer, ProducerRecord}
import org.apache.kafka.common.serialization.StringDeserializer
import org.apache.spark.SparkConf
import org.apache.spark.streaming.{Seconds, StreamingContext}
import org.apache.spark.streaming.dstream.DStream
import org.apache.spark.streaming.kafka010.{ConsumerStrategies, KafkaUtils}
import org.apache.spark.streaming.kafka010.LocationStrategies.PreferConsistent

import java.util.Properties
import scala.util.Random

object functions {

  //向kafka发送数据
  def sendKafka(topic: String, record: StringBuilder): Unit = {
    //配置kafka
    val kafkaProps = new Properties()
    kafkaProps.put("bootstrap.servers", "192.168.88.128:9092")
    kafkaProps.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer")
    kafkaProps.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer")
    val producer = new KafkaProducer[String, String](kafkaProps)
    //发送数据
    val records = new ProducerRecord[String, String](topic, record.toString())
    producer.send(records)
    producer.close()
  }

  //开启spark 每两秒处理一次数据的 Spark Streaming
  def openSpark: StreamingContext = {
    System.setProperty("hadoop.home.dir", "D:\\Hadoop\\hadoop-2.7.3\\hadoop-2.7.3")

    val conf = new SparkConf().setMaster("local[*]").setAppName("stream")
    val streamContext = new StreamingContext(conf, Seconds(2))
    //存储元数据和状态
    streamContext.checkpoint("./checkpoint")
    //只输出错误日志
    streamContext.sparkContext.setLogLevel("error")
    streamContext
  }

  //获取数据
  def getKafkaData(streamingContext: StreamingContext): DStream[String] = {
    val topic = "student"
    val group = String.valueOf(Random.nextInt(99999))
    val kafkaParams = Map[String, Object](
      "bootstrap.servers" -> "192.168.88.128:9092",
      "key.deserializer" -> classOf[StringDeserializer],
      "value.deserializer" -> classOf[StringDeserializer],
      "group.id" -> group,
      "auto.offset.reset" -> "earliest",
      "enable.enable.commit" -> "False"
    )
    //使用spark streaming 创建消费者读取相应主题的数据
    val linesStream = KafkaUtils.createDirectStream(
      streamingContext,
      PreferConsistent,
      ConsumerStrategies.Subscribe[String, String](Array(topic), kafkaParams) //使用 kafkaParams 提供的参数来连接 Kafka。
    )

    //spark streaming 创建消费者读取相应主题的数据
    val lines = linesStream.map(_.value())
    lines
  }


}
