package com.bd03.streaminglearn.day0402.test

import java.io.{File, FileOutputStream}
import java.nio.charset.MalformedInputException
import java.util.Properties

import org.apache.commons.io.IOUtils
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.{FileSystem, Path}
import org.apache.kafka.clients.producer.{KafkaProducer, Producer, ProducerRecord}
import org.apache.log4j.{Level, Logger}
import org.apache.spark.sql.SparkSession
import org.apache.spark.{SparkConf, SparkContext}
import redis.clients.jedis.Jedis

import scala.collection.mutable.ArrayBuffer
import scala.io.Source

/**
  * @see `在这里说明这个类`
  * @ClassName ReadHDFSToTopic.java
  * @author Ablue
  * @version 1.0.0
  * @projectName spark-kakfa-redis-process-data
  * @createTime 2020年04月01日 12:06:00
  */
object ReadHDFSToTopic {
  def main(args: Array[String]): Unit = {
    Logger.getLogger("org").setLevel(Level.WARN)
    //注意修改ipaccess.log文件的编码格式为utf8
    val array = Source.fromFile("D:\\data\\ip\\ipaccess.log").getLines().map(_.split("\\|")).filter(t=>t.length>=5).map(arr => {
      arr(1) + "," + arr(2)
    }).toList
    // 准备配置属性
    var props: Properties = new Properties();
    props.setProperty("bootstrap.servers", "hdp01:9092,hdp02:9092,hdp03:9092")
    props.setProperty("acks", "all")
    props.setProperty("batch.size", "16384")
    props.setProperty("linger.ms", "1")
  //  props.setProperty("buffer.memory", "33554432")
    props.setProperty("key.serializer", "org.apache.kafka.common.serialization.StringSerializer")
    props.setProperty("value.serializer", "org.apache.kafka.common.serialization.StringSerializer")

    val producer = new KafkaProducer[String, String](props);


    for(i <- 1 to array.length-1){
      Thread.sleep(5);
      val value = producer.send(new ProducerRecord[String,String]("test04", Integer.toString(i),array(i)))
      val metadata = value.get()
      println("----------------------------------------")
      println(metadata.partition()+":"+array(i))
    }


  }

}
