import java.util.Properties

import kafka.javaapi.producer.Producer
import kafka.producer.{KeyedMessage, ProducerConfig}
import org.apache.spark.{SparkConf, SparkContext}
import org.codehaus.jettison.json.JSONObject
object KafkaEventProducer {
  // bin/kafka-topics.sh --zookeeper spark001:2181 --create --topic car_events --replication-factor 2 --partitions 2
  // bin/kafka-topics.sh --zookeeper spark001:2181 --list
  // bin/kafka-topics.sh --zookeeper spark001:2181  --describe car_events
  def main(args: Array[String]): Unit = {
    val topic = "car_events"  //kafka topic 将不同的数据理论上进行分类
    val brokers = "node1:9092,node2:9092,node3:9092"  //kafka节点
    //kafka的配置
    val props = new Properties()
    props.put("metadata.broker.list", brokers)  //breker列表，即集群的所有节点
    props.put("serializer.class", "kafka.serializer.StringEncoder")  //往kafka里面塞的数据类型

    val kafkaConfig = new ProducerConfig(props)
    val producer = new Producer[String, String](kafkaConfig)  //用来往kafka塞数据的api
    //sparkContext是Spark的入口
    val sparkConf = new SparkConf().setAppName("Shanghai traffic").setMaster("local[2]")
    val sc = new SparkContext(sparkConf)

    //原始数据位置    val filePath = "D:/traffic/trafficlf_all_column_all.txt"
    val filePath = "D:/2014082013_all_column_test.txt"

    val records = sc.textFile(filePath)
      .filter(!_.startsWith(";"))  //过滤脏数据  将所有以;分号开头的数据
      .map(_.split(",")).collect()  //将每行数据以逗号分隔 ， collect 序列化写出

    //records是一个二维数组 便利每一行数据
    for(record <- records){
      // prepare event data
      val event = new JSONObject()  //新建一个json对象封装目标字段的数据  json 异构数据交换语言
      event
        .put("camera_id", record(0))
        .put("car_id", record(2))
        .put("event_time", record(4))
        .put("speed", record(6))
        .put("road_id", record(13))

      // producer往kafka中塞数据
      producer.send(new KeyedMessage[String, String](topic, event.toString))  //json对象转化为string 呼应17行
      println("Message sent: " + event) //打印每行数据

      Thread.sleep(200)  //线程休眠200ms 也就是每200ms发送一次数据
    }
  }
}
