package niit.sparkstreaming

import org.apache.kafka.clients.producer.{KafkaProducer, ProducerRecord}
import java.util.Properties
import scala.util.Random
import org.apache.spark.SparkConf
import org.apache.spark.streaming.{Seconds, StreamingContext}
import org.apache.kafka.common.serialization.StringDeserializer
import org.apache.spark.streaming.kafka010.{ConsumerStrategies, KafkaUtils}
import org.apache.spark.streaming.kafka010.LocationStrategies.PreferConsistent

object streamKafka {

  private def sendKafka(topic: String, record: StringBuilder): Unit = {
    //配置kafka
    val kafkaProps = new Properties()
    kafkaProps.put("bootstrap.servers", "192.168.202.101:9092") //  Kafka 服务器地址
    kafkaProps.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer")
    kafkaProps.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer")
    val producer = new KafkaProducer[String, String](kafkaProps)
    //发送数据
    val records = new ProducerRecord[String, String](topic, record.toString())
    producer.send(records)
    producer.close()
  }

  def main(args: Array[String]): Unit = {
    System.setProperty("hadoop.home.dir", "D:\\NIIT\\hadoop\\hadoop-2.7.3")
    /*
    * sparkStreaming
    * 每两秒处理一次数据的 Spark Streaming
    * */
    val conf = new SparkConf().setMaster("local[*]").setAppName("stream")
    val streamContext = new StreamingContext(conf, Seconds(2))
    //存储元数据和状态
    streamContext.checkpoint("./checkpoint")
    //只输出错误日志
    streamContext.sparkContext.setLogLevel("error")

    /*
    * kafka读取数据
    * */
    val topic = "stuInfo"
    val group = String.valueOf(Random.nextInt(99999))
    val kafkaParams = Map[String, Object](
      "bootstrap.servers" -> "192.168.202.101:9092",
      "key.deserializer" -> classOf[StringDeserializer],
      "value.deserializer" -> classOf[StringDeserializer],
      "group.id" -> group,
      "auto.offset.reset" -> "earliest",
      "enable.enable.commit" -> "False"
    )


    //使用spark streaming 创建消费者读取相应主题的数据
    val linesStream = KafkaUtils.createDirectStream(
      streamContext,
      PreferConsistent,
      ConsumerStrategies.Subscribe[String, String](Array(topic), kafkaParams)
    )

    //spark streaming 创建消费者读取相应主题的数据
    val lines = linesStream.map(_.value())

    // 定义更新状态的函数
    val updateGenderCount = (values: Seq[Int], state: Option[Int]) => {
      val currentCount = values.sum //0:2
      val previousCount = state.getOrElse(0) //0:5 1:8
      Some(currentCount + previousCount) //0:7 1:8
    }

    /**
     * 每隔2秒分别统计所有男女人数各自的总和 result
     * */

    val genderCount = lines
      .map { line =>
        val gender = line.split("\t")(2).toInt
        (gender, 1)
      }
      .updateStateByKey(updateGenderCount) //0:7 1:8

    genderCount.foreachRDD { rdd =>
      val genderCountList = rdd.collect().toList //[0:7][1:8]
      val record: StringBuilder = new StringBuilder
      genderCountList.foreach {
        case (gender, count) =>
          record.append(s"$gender $count ") //0 7 0 8
      }
      sendKafka("result", record)
    }


    /**
     * 每隔2秒各个学期 男女人数总和 -> 各个学期有多少人
     */

    val semesterCount = lines
      .map { line =>
        val semester = line.split("\t")(5).toInt
        (semester, 1)
      }
      .updateStateByKey(updateGenderCount)

    semesterCount.foreachRDD { rdd =>
      val semesterCountList = rdd.collect().toList
      val record: StringBuilder = new StringBuilder
      semesterCountList.foreach {
        case (semester, count) =>
          record.append(s"$semester:$count ")
      }
      sendKafka("result1", record)
    }

    /**
     * 在籍和不在籍男女生 -> （在籍，性别） 人数
     * */

    val genderEnrollCount = lines
      .map {
        line =>
          val gender = line.split("\t")(2)
          val enrolled = line.split("\t")(6)
          ((gender, enrolled), 1) //(（0,L）,1)
      }
      .updateStateByKey(updateGenderCount)
    genderEnrollCount.print()

    genderEnrollCount.foreachRDD { rdd =>
      val semesterCountList = rdd.collect().toList
      val record: StringBuilder = new StringBuilder
      semesterCountList.foreach {
        case (semester, count) =>
          record.append(s"$semester:$count ")
      }
      sendKafka("result2", record)
    }


    //开启检测
    streamContext.start()
    streamContext.awaitTermination() //等待终端操作
  }

}