package com.zhang.sparkstreaming_2

import org.apache.kafka.clients.producer.{KafkaProducer, ProducerConfig, ProducerRecord}
import org.apache.spark.SparkConf
import org.apache.spark.streaming.{Duration, StreamingContext}

import java.util.{Properties, Random}
import scala.collection.mutable.ListBuffer

/**
 * @title: 模拟生成数据 to kafka
 * @author: zhang
 * @date: 2022/2/21 11:12 
 */
object SparkStreaming06_Mock {

  def main(args: Array[String]): Unit = {

    // TODO 向Kafka中周期性地生成数据
    val prop = new Properties()
    // 添加配置
    prop.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "hadoop102:9092")
    prop.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringSerializer")
    prop.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringSerializer")
    val producer = new KafkaProducer[String, String](prop)

    while ( true ) {
      for ( dat <- generateDatas() ) {
        println(dat)
        producer.send( new ProducerRecord[String, String]("spark_test", dat) )
      }
      Thread.sleep(5000)
    }


    def generateDatas(): ListBuffer[String] = {
      val list = ListBuffer[String]()
      // todo 生成数据
      // 格式：timestamp,area,city,userid,adid
      val areas = List("华北", "华中", "华南", "西北", "东北")
      val citys = List("北京", "上海", "杭州", "深圳", "南京")
      val users = List("zhang", "bob", "Alice", "John", "Mary","xiaoming","lisi","cat")

      for (i <- 1 to new Random().nextInt(50)) {
        val time = System.currentTimeMillis()
        val area = areas(new Random().nextInt(areas.length))
        val city = citys(new Random().nextInt(citys.length))
        val user = users(new Random().nextInt(users.length))
        val ad = new Random().nextInt(6) + 1
        list.append(s"${time} ${area} ${city} ${user} ${ad}")
      }
      list
    }
  }
}
