import org.apache.kafka.clients.producer.{KafkaProducer, ProducerConfig, ProducerRecord}
import org.apache.kafka.common.serialization.StringDeserializer
import org.apache.spark.SparkConf
import org.apache.spark.sql.SQLContext
import org.apache.spark.streaming.kafka010.{ConsumerStrategies, KafkaUtils}
import org.apache.spark.streaming.kafka010.LocationStrategies.PreferConsistent
import org.apache.spark.streaming.{Seconds, StreamingContext}

import java.util
import java.util.Properties
object TermClassSum_6 {
  case class TCSum(term: String, cla: String, sex: String, num: Int)
  def main(args: Array[String]): Unit = {
    val conf = new SparkConf().setMaster("local[*]").setAppName("TermClass");
    val ssc = new StreamingContext(conf, Seconds(20));
    ssc.sparkContext.setLogLevel("error")
    // 1) 导入 sparksql 的依赖
    val sqlCon = new SQLContext(ssc.sparkContext)

    val topic = "stu"
    val group = "TermClass"
    val kafkaparam = Map[String, Object](
      "bootstrap.servers" -> "niit01:9092",
      "key.deserializer" -> classOf[StringDeserializer],
      "value.deserializer" -> classOf[StringDeserializer],
      "group.id" -> group,
      // 消费模式  从头消费， 从尾消费，当前消费
      "auto.offset.reset" -> "earliest",
      "enable.auto.commit" -> (false: java.lang.Boolean)
    )
    ssc.checkpoint("./checkpoint")
    val lineStream = KafkaUtils.createDirectStream(
      ssc,
      // 策略     PreferConsistent   kafka 集群 master /leader
      PreferConsistent,
      ConsumerStrategies.Subscribe[String, String](Array(topic), kafkaparam))
    val line = lineStream.map(_.value())
    line.foreachRDD(
      x => {
        val count = x.map(
          line => {
            val split = line.split("\t")
            ((split(5), split(0),split(2).filter(x => x != 2)), 1)
          }
        ) /*.groupBy(count=>if(count._2==1) (1,count._1,1) else (0,count._1,1))//没有聚合 只是map了（0，8，1）而不是（0，8，2）*/
          .reduceByKey(_ + _)

        count.foreach(
          x=> {
              val term = x._1._1
              val cla = x._1._2
              val sex = x._1._3
              val sum = x._2
              TCSum(term, cla, sex, sum)
            val count6=x._1._1+","+x._1._2+","+x._1._3+","+x._2
            println(count6)
            import sqlCon.implicits._
            val counDataFrame = count.toDF()
            counDataFrame.show()

            print("-------------------------------------------------------------------------------")
            // 推送kafka
            // 1. 配置kafka  服务，key value 序列化
            // 2. 创建 kafkaProducer
            // 3. 数据发送
          /*  val props = new util.HashMap[String, Object]()
            props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "niit01:9092")
            props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringSerializer")
            props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringSerializer")

            // 构建 producer
            val producer = new KafkaProducer[String, String](props)

            producer.send(new ProducerRecord[String, String]("stu6", count6))*/
            val prop = new Properties()
            prop.setProperty("user", "root")
            prop.setProperty("password", "123456")
            prop.setProperty("driver", "com.mysql.jdbc.Driver") // 5.7 mysql   //8.x  com.mysql.cj.jdbc.Driver

            counDataFrame.write.mode("append").jdbc("jdbc:mysql://localhost:3306/classinfo", "st6", prop)


          }) })

    //line 一行数据输出完成

    ssc.start()
    ssc.awaitTermination()


  }
}
