package com.test.cn.spark.streaming.kafka

import java.util.Properties

import org.apache.kafka.clients.producer.{KafkaProducer, ProducerConfig, ProducerRecord}
import org.apache.kafka.common.serialization.StringSerializer

import scala.io.Source

object KafkaProducer {
  def main(args: Array[String]): Unit = {
    // 定义 kafka 参数
    val brokers = "linux121:9092"
    val topic1 = "lagou_demo_1"
    val prop = new Properties()

    prop.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, brokers)
    prop.put(
      ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG,
      classOf[StringSerializer]
    )
    prop.put(
      ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG,
      classOf[StringSerializer]
    )

    // KafkaProducer
    val producer = new KafkaProducer[String, String](prop)

    //在工程的根目录下执行的
    val file = Source.fromFile("spark_scala_stream/src/data/sample.log")
    val lines: Iterator[String] = file.getLines()

    for (elem <- lines) {
      println(elem)
      val msg = new ProducerRecord[String, String](topic1, elem)
      producer.send(msg)
      Thread.sleep(100)
    }

    producer.close()
  }
}
