package main.scala.demo.kafka

import kafka.serializer.StringDecoder
import org.apache.log4j.{Level, Logger}
import org.apache.spark.SparkConf
import org.apache.spark.sql.SparkSession
import org.apache.spark.storage.StorageLevel
import org.apache.spark.streaming.kafka.KafkaUtils
import org.apache.spark.streaming.{Seconds, StreamingContext}

/**
  * MyKafkaStreamDemo
  *
  * @author zhangyimin
  * @date 2018-10-18 下午3:38
  * @version 1.0
  */
object MyKafkaStreamReceiversDemo {
  def main(args: Array[String]): Unit = {
    //创建SparkStreaming对象
    Logger.getLogger("org.apache.spark").setLevel(Level.ERROR)
    Logger.getLogger("org.eclipse.jetty.server").setLevel(Level.OFF)
    //local[2]代表核数为2
    //    val spark = SparkSession.builder().appName("MyNetworkWordCount").master("local[2]").getOrCreate()
    //    val sc = spark.sparkContext
    val sparkConf = new SparkConf().setAppName("MyNetworkWordCount").setMaster("local[2]")
    val streamingContext = new StreamingContext(sparkConf, Seconds(3))
    //创建Topic名称,1表示每次从topic中获取一条数据
    val topic = Map("mydemo1" -> 1)
    val kafkaStream = KafkaUtils.createStream(
      streamingContext,
      "10.16.7.36:2181",
      "myGroup",
      topic,
      StorageLevel.MEMORY_ONLY_SER_2)

    //============数据接收的并行水平================
    //在输入时候进行并行操作,提高系统吞吐量
    val kafkaStreams = (1 to 5).map(i => KafkaUtils.createStream(
      streamingContext,
      "10.16.7.36:2181",
      "myGroup",
      topic,
      StorageLevel.MEMORY_ONLY_SER_2)
    )

    val unionStreams = streamingContext.union(kafkaStreams)


    //处理每次接收到的数据
    val lineStream1 = unionStreams.map(x => {
      new String(x.toString())
    })

    lineStream1.print()
    //============数据接收的并行水平================


    //处理每次接收到的数据
    val lineStream = kafkaStream.map(x => {
      new String(x.toString())
    })

    lineStream.print()

    streamingContext.start()
    streamingContext.awaitTermination()
  }

}


object MyKafkaStreamDemo {
  def main(args: Array[String]): Unit = {
    //创建SparkStreaming对象
    System.setProperty("hadoop.home.dir", "C:\\hadoop2.6.0")
    Logger.getLogger("org.apache.spark").setLevel(Level.ERROR)
    Logger.getLogger("org.eclipse.jetty.server").setLevel(Level.OFF)
    //local[2]代表核数为2
    //    val spark = SparkSession.builder().appName("MyNetworkWordCount").master("local[2]").getOrCreate()
    //    val sc = spark.sparkContext
    val sparkConf = new SparkConf().setAppName("MyNetworkWordCount").setMaster("local[2]")
    //在windows下设置SPRAK的最大内存  或者设置  -Xms256m -Xmx1024m   或 -Dspark.testing.memory=1073741824
    //    sparkConf.set("spark.testing.memory", "2147480000")
    val streamingContext = new StreamingContext(sparkConf, Seconds(3))

    //创建Topic名称
    val topic = Set("topc_syslog_test")
    //创建kafka的基本属性 属性值为kafka的broker的ip和port
    val kafkaProps = Map[String, String](
      "metadata.broker.list" -> "172.16.1.239:9092,172.16.1.240:9092,172.16.1.241:9092",
      "group.id" -> "CID_PV_test",
      "auto.offset.reset" -> "largest"
    )


    val kafkaStream = KafkaUtils.createDirectStream[String, String, StringDecoder, StringDecoder](
      streamingContext,
      kafkaProps,
      topic)
    //处理每次接收到的数据


    kafkaStream.foreachRDD(rdd => {
      val spark = SparkSession
        .builder()
        .master("local[2]")
        .appName("test_sql_spark")
        //        .config(rdd.sparkContext.getConf)
        .config("spark.sql.warehouse.dir", "file:///e:/tmp/spark-warehouse")
        .getOrCreate()


      val testSql = rdd.map(x => {
        println(x._2.toString)
        val strs = x._2.split(",")
        println(strs.length + "=======>")

        val obj = new MyMsg(
          if (strs(0).split(":").length > 1) {
            strs(0).split(":")(1).toString
          } else {
            null
          },
          if (strs(1).split(":").length > 1) {
            strs(1).split(":")(1).toString
          } else {
            null
          },
          if (strs(2).split(":").length > 1) {
            strs(2).split(":")(1).toString
          } else {
            null
          },
          if (strs(3).split(":").length > 1) {
            strs(3).split(":")(1).toString
          } else {
            null
          },
          if (strs(4).split(":").length > 1) {
            strs(4).split(":")(1) + strs(4).split(":")(2) + strs(4).split(":")(3)
          } else {
            null
          },
          if (strs(5).split(":").length > 1) {
            strs(5).split(":")(1).toString
          } else {
            null
          }
        )

        obj


      })


      import spark.sqlContext.implicits._
      val sqlDF = testSql.toDF()
      testSql.toDF().show()
      sqlDF.createOrReplaceTempView("fm_loan_pv")
      val result = spark.sql("select IP,count(*) total from fm_loan_pv group by IP")

      result.show()
    })







    //    val lineStream = kafkaStream.map(x => {
    //      println(x._2.toString)
    //      val strs = x._2.split(",")
    //      println(strs.length + "=======>")
    //      val obj = new MyMsg(
    //        if (strs(0).split(":").length > 1) {
    //          strs(0).split(":")(1).toString
    //        } else {
    //          null
    //        },
    //        if (strs(1).split(":").length > 1) {
    //          strs(1).split(":")(1).toString
    //        } else {
    //          null
    //        },
    //        if (strs(2).split(":").length > 1) {
    //          strs(2).split(":")(1).toString
    //        } else {
    //          null
    //        },
    //        if (strs(3).split(":").length > 1) {
    //          strs(3).split(":")(1).toString
    //        } else {
    //          null
    //        },
    //        if (strs(4).split(":").length > 1) {
    //          strs(4).split(":")(1) + strs(4).split(":")(2) + strs(4).split(":")(3)
    //        } else {
    //          null
    //        },
    //        if (strs(5).split(":").length > 1) {
    //          strs(5).split(":")(1).toString
    //        } else {
    //          null
    //        }
    //      )
    //
    //
    //      obj
    //
    //      //      strs.asInstanceOf[MyMsg]
    //      ////      val stuDF = spark.createDataFrame(strs.asInstanceOf[MyMsg])
    //    })


    //kafka的解码器
    //    val kafkaStream =(1 to 5).map(i=>( KafkaUtils.createDirectStream[String,String,StringDecoder,StringDecoder](
    //      streamingContext,
    //      kafkaProps,
    //      topic)))
    //    //处理每次接收到的数据
    //
    //    val lineStream = kafkaStream.map(x => {
    //      new String(x.toString())
    //    })
    //
    //    lineStream.foreach(println(_))


    //    val stuDF = spark.createDataFrame(lineStream).toDF


    streamingContext.start()
    streamingContext.awaitTermination()
  }


  case class MyMsg(OPERATER: String, IP: String, TIME: String, METHOD: String, URL: String, QUERYSTR: String)


}

