package ETL

import LogBean.LogBeans
import com.alibaba.fastjson.{JSON, JSONException}
import org.apache.kafka.common.serialization.StringDeserializer
import org.apache.spark.sql.SparkSession
import org.apache.spark.streaming.kafka010._
import org.apache.spark.streaming.{Seconds, StreamingContext}


object Etl {
  def main(args: Array[String]): Unit = {
    val session = SparkSession
      .builder()
      .master("loca[*]")
      .appName(s"${this.getClass.getName}")
      .getOrCreate()
    val sc = session.sparkContext
    val ssc = new StreamingContext(sc,Seconds(120))
    val topics = Array("ds_37")

    val kafkaPargram = Map[String,Object](
      "bootstrap.servers" -> "master:9092,slave1:9092,slave2:9092",
      "key.deserializer" -> classOf[StringDeserializer],
      "value.deserializer" -> classOf[StringDeserializer],
      "group.id" -> "store37",
      "auto.offset.reset" -> "earliest",
      "enable.auto.commit" -> (false: java.lang.Boolean)
    )
    //创建消费者流
    val inputStream = KafkaUtils.createDirectStream(
      ssc,
      LocationStrategies.PreferConsistent,
      ConsumerStrategies.Subscribe[String, String](topics, kafkaPargram)
    )
    inputStream.foreachRDD(rdd=>{
      if(!rdd.isEmpty()){
        //获取偏移量
        val offsetRanges = rdd.asInstanceOf[HasOffsetRanges].offsetRanges
        val logbean = rdd.map(_.value()).map(line => {
          var logBean: LogBeans = null
          try {
            JSON.parseObject(line, classOf[LogBeans])

          } catch {
            case e: JSONException => println(e)
          }
          logBean
        })
        import session.implicits._
        val frame = logbean.filter(_!=null).toDF()
        frame.coalesce(2).write.parquet("hdfs://master:9000/dianshang/parquet")
        //写入偏移量
        inputStream.asInstanceOf[CanCommitOffsets].commitAsync(offsetRanges)
      }
    })
    ssc.start()
    ssc.awaitTermination()
  }
}
