package com.air.antispider.stream.dataprocess.businessprocess

import com.air.antispider.stream.common.bean.ProcessedData
import com.air.antispider.stream.common.util.jedis.PropertiesUtil
import com.air.antispider.stream.dataprocess.constants.BehaviorTypeEnum
import org.apache.kafka.clients.producer.{KafkaProducer, ProducerConfig, ProducerRecord}
import org.apache.spark.rdd.RDD

/**
  * 将数据发送给Kafka
  */
object DataSend {


  //将数据发送给Kafka
  def sendDataToKafka(processedDataRDD: RDD[ProcessedData]) = {
    //根据不同的类型(查询/预定),将数据发送给不同的Topic
    sendToKafka(processedDataRDD, 0); //发送查询数据
    sendToKafka(processedDataRDD, 1); //发送预定数据
  }

  /**
    * 发送数据到指定的类型
    * @param processedDataRDD
    * @param topicType
    */
  def sendToKafka(processedDataRDD: RDD[ProcessedData], topicType: Int) = {
    //根据指定的topicType过滤数据
    val filterRDD: RDD[ProcessedData] = processedDataRDD.filter(processedData => {
      //看当前ProcessedData的操作类型和传入的参数是否一样,作为过滤条件
      processedData.requestType.behaviorType.id  == topicType
    })

    //创建 map 封装 kafka 参数
    val props = new java.util.HashMap[String, Object]()
    //设置 brokers
    props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, PropertiesUtil.getStringByKey("default.brokers", "kafkaConfig.properties"))
    //key 序列化方法
    props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, PropertiesUtil.getStringByKey("default.key_serializer_class_config", "kafkaConfig.properties"))
    //value 序列化方法
    props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, PropertiesUtil.getStringByKey("default.value_serializer_class_config", "kafkaConfig.properties"))
    //批发送设置：32KB 作为一批次或 10ms 作为一批次
    props.put(ProducerConfig.BATCH_SIZE_CONFIG, PropertiesUtil.getStringByKey("default.batch_size_config", "kafkaConfig.properties"))
    props.put(ProducerConfig.LINGER_MS_CONFIG, PropertiesUtil.getStringByKey("default.linger_ms_config", "kafkaConfig.properties"))

    //声明Topic,默认为空
    var topic = ""
    if (topicType == 0) {
      //查询的Topic
      topic = PropertiesUtil.getStringByKey("target.query.topic", "kafkaConfig.properties")
    } else if (topicType == 1){
      //预定的Topic
      topic = PropertiesUtil.getStringByKey("target.book.topic", "kafkaConfig.properties")
    }


    //按照分区进行发送数据,提高性能
    filterRDD.foreachPartition(iter => {
      //      iter是整个分区所有的processedData的集合
      //创建Kafka连接
      val producer: KafkaProducer[String, String] = new KafkaProducer[String, String](props)
      iter.foreach(processedData => {
        //将ProcessedData转换为String字符串,发送给Kafka
        //1.将数据转换为Json,发送出去
        //2.使用分隔符转换为字符串将数据发送出去
        val message: String = processedData.toKafkaString()
        producer.send(new ProducerRecord[String, String](topic, message))
      })
      //关闭连接
      producer.close()
    })
  }
}
