package com.kafka

import kafka.serializer.StringDecoder
import org.apache.spark.streaming.dstream.{DStream, InputDStream}
import org.apache.spark.streaming.kafka.KafkaUtils
import org.apache.spark.streaming.{Seconds, StreamingContext}
import org.apache.spark.{SparkConf, SparkContext}

/**
  * @todo 利用sparkStreaming对接kafka----采用Direct(低级API)
  */
object SparkStreamingKafka_Direct {
  def main(args: Array[String]): Unit = {

    System.setProperty("hadoop.home.dir", "E:\\hadoop-common-2.7.3-bin-master")
    System.setProperty("HADOOP_USER_NAME", "root")

    //1、创建sparkConf
    val sparkConf: SparkConf = new SparkConf()
      .setAppName("SparkStreamingKafka_Direct")
      .setMaster("local[2]")
      .set("spark.streaming.stopGracefullyOnShutdown","true")
    //    val sparkConf: SparkConf = new SparkConf().setAppName("SparkStreamingKafka_Direct")
    //2、创建sparkContext
    val sc = new SparkContext(sparkConf)
    sc.setLogLevel("WARN")
    //3、创建StreamingContext
    val ssc = new StreamingContext(sc, Seconds(5))
    val kafkaParams = Map[String, String](
      "bootstrap.servers" -> "hadoop01:9092,hadoop02:9092,hadoop03:9092", //metadata.broker.list
      "group.id" -> "Kafka_Direct",
      "auto.offset.reset" -> "smallest" //largest
    )
    //5、定义topic
    val topics = Set("t_message_02")
    //6、通过 KafkaUtils.createDirectStream接受kafka数据，这里采用是kafka低级api偏移量不受zk管理
    val dstream: InputDStream[(String, String)] = KafkaUtils.createDirectStream[String, String, StringDecoder, StringDecoder](ssc, kafkaParams, topics)
    //7、获取kafka中topic中的数据
    val topicData: DStream[String] = dstream.map(_._2)
//    topicData.saveAsTextFiles("hdfs://hadoop01:9000/data01/kafka2hdfs/test.txt")

    topicData.foreachRDD(rdd => {
//      val rowRdd = rdd.filter(msg => msg != null && msg.trim.length() != 0)
//        .map(line => line.split("\\t")).map(arr => (arr(0).toInt, arr(1), arr(2)))
//      rowRdd.saveAsTextFile("hdfs://hadoop01:9000/data01/kafka2hdfs/test/")
      rdd.foreach(println)


//      val spark = SparkSession.builder()
//        .config(sparkConf)
//        .config("spark.sql.warehouse.dir", "warehouse/dir")
//        .enableHiveSupport()
//        .getOrCreate()
//      import spark.implicits._
//      rowRdd.toDF()

    })


    //    topicData.foreachRDD(rdd => if (rdd.count() != 0) {
    //      rdd.saveAsTextFile("/user/root/hive/jboss/" + out)
    //    }
    //    topicData.saveAsTextFiles("hdfs://hadoop01:9000/data01/kafka2hdfs/", "spark")

    //开启计算
    ssc.start()
//    println("1233333333")
//    Thread.sleep(15000)
//    ssc.stop(true, true)
    ssc.awaitTermination()




    /*
        val stream: InputDStream[(String, String)] = createStream(scc, kafkaParam, topics)
        //将Dstream转到RDD在转到scala的数组test_data
        val test_data = stream.map(_._2).map(_.split(' ') match { case Array(user, item) => (user, item) })
          .foreachRDD(rdd => {
            val x = rdd.count
            if (x > 0) {
              println(x)
              for (line <- spark.rdd.collect.toArray) {
                var user = line._1.toInt println (user)
                var item = line._2.toInt println (item)
                println(sameModel.predict(user, item))
              }
            }
          }
          )
    */

  }
}