package cn.lagou.sparkhw

import java.util.Properties

import org.apache.kafka.clients.producer.{KafkaProducer, ProducerConfig, ProducerRecord}
import org.apache.kafka.common.serialization.StringSerializer
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.SparkSession

// 读取文件发送到kafka
object ReadToKafka {

  def main(args: Array[String]): Unit = {
    val spark = SparkSession
      .builder()
      .appName(s"${this.getClass.getCanonicalName}")
      .master("local[*]")
      .getOrCreate()
    val sc = spark.sparkContext
    sc.setLogLevel("warn")

    // 定义 kafka 参数
    val brokers = "linux121:9092,linux122:9092,linux123:9092"
    //val topic = "topicB"
    val topic1 = "hw_topic03"
    val prop = new Properties()
    prop.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, brokers)
    prop.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, classOf[StringSerializer])
    prop.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, classOf[StringSerializer])


    // 读取data/sample.log 生产RDD
    val lines: RDD[String] = sc.textFile("data/sample.log")


    //将读取到的数据发送到hw_topic01
    //先按分区，然后按行发送
    lines.foreachPartition{iter =>
      // KafkaProducer
      val producer = new KafkaProducer[String, String](prop)
      iter.foreach{line =>
        val record = new ProducerRecord[String, String](topic1, line)
        // 发送消息
        producer.send(record)
      }
      producer.close()
    }

    sc.stop()
  }
}
