package com.shujia

import org.apache.kafka.clients.producer.{KafkaProducer, ProducerRecord}

import java.util.Properties
import scala.io.{BufferedSource, Source}

object Demo02StudentToKafka {
  def main(args: Array[String]): Unit = {
    // 1、建立连接
    val properties = new Properties()
    // kafka broker列表
    properties.setProperty("bootstrap.servers", "master:9092,node1:9092,node2:9092")
    // 指定K、V的序列化的类
    properties.setProperty("key.serializer", "org.apache.kafka.common.serialization.StringSerializer")
    properties.setProperty("value.serializer", "org.apache.kafka.common.serialization.StringSerializer")

    val kafkaProducer: KafkaProducer[String, String] = new KafkaProducer[String, String](properties)

    // 2、读取文件 逐条遍历并写入
    val bs: BufferedSource = Source
      .fromFile("Kafka/data/students.txt")

    bs.getLines().foreach(line => {
      // 3、构建消息格式

      /**
       * 当往一个不存在的Topic中消费或者生产数据时 Kafka会自动创建一个分区数为1，副本数为1的Topic
       */

      //      val producerRecord = new ProducerRecord[String, String]("students", line)
      // 将同一个班级的数据写入同一个分区 对班级计算Hash值并对分区的数量取余
      val clazz: String = line.split(",")(4)
      val partitionNO: Int = Math.abs(clazz.hashCode) % 3
      val producerRecord = new ProducerRecord[String, String]("students", partitionNO, "", line)



      // 4、发送数据
      kafkaProducer.send(producerRecord)
      kafkaProducer.flush()


    })
    // 5、关闭连接
    kafkaProducer.close()

    // 关闭文件
    bs.close()

  }

}
