import org.apache.kafka.common.serialization.StringDeserializer
import org.apache.spark.SparkConf
import org.apache.spark.sql.SQLContext
import org.apache.spark.streaming.{Seconds, StreamingContext}
import org.apache.spark.streaming.kafka010.{ConsumerStrategies, KafkaUtils}
import org.apache.spark.streaming.kafka010.LocationStrategies.PreferConsistent

import java.sql.DriverManager
import java.util.Properties


object isschoolsum_4 {
  def main(args: Array[String]): Unit = {


    System.setProperty("hadoop.home.dir", "D:\\idea\\hadoop-2.7.3(4)\\hadoop-2.7.3")
    System.setProperty("HADOOP_USER_NAME", "root")




    val conf = new SparkConf().setMaster("local[*]").setAppName("CountGenderSum");
    val ssc = new StreamingContext(conf, Seconds(10));
    val sqlCon = new SQLContext(ssc.sparkContext)
    ssc.sparkContext.setLogLevel("error")

    val topic = "stu3"
    val group = "countGenderSum"
    val kafkaparam = Map[String, Object](
      "bootstrap.servers" -> "niit01:9092",
      "key.deserializer" -> classOf[StringDeserializer],
      "value.deserializer" -> classOf[StringDeserializer],
      "group.id" -> group,
      // 消费模式  从头消费， 从尾消费，当前消费
      "auto.offset.reset" -> "earliest",
      // 是否自动提交
      "enable.auto.commit" -> (false: java.lang.Boolean)
    )
    ssc.checkpoint("./checkpoint")
    val lineStream = KafkaUtils.createDirectStream(
      ssc,
      // 策略     PreferConsistent   kafka 集群 master /leader
      PreferConsistent,
      ConsumerStrategies.Subscribe[String, String](Array(topic), kafkaparam)
    )
    val line = lineStream.map(_.value())

    // 统计的是男女   \t

    // line  数据源

    //  \t   逻辑
    //    line.flatMap( _.split("\t") )

    // Dstream   Seq[rdd]
    line.foreachRDD(

      // x  rdd
      x => {


        // DStream   --->   RDD
        // DStream   --->   RDD --》    dataFrame     toDF


        // DStream   --->   RDD
        // Dstream   Seq[rdd]

        // line Dstream
        //           line.foreachRDD(
        //             // x  rdd
        //             x => { }


        // DStream   --->   RDD --》    dataFrame     toDF
        // 统计男女

        val coun4 = x.map(line => {
          val split = line.split("\t")
          //        val sex = split(2) == "1" ?
          (split(6), split(2).filter(x => x != 2), 1)
          // (sex ,1  )
        })

        // coun  RDD
        // 逻辑
        //  1)  sqlContext
        //  2)  dataFrame 的字段结构信息
        //  3)  写入数据库

        //  2    sex   , num
        // 表名和 字段名的映射
//                  println(cla + "  " + sex + "  " + num)
//
//                  val url = "jdbc:mysgl://10.202.27.7:3306/studentinfo"
//                  val user = "root"
//                  val password = "123456"
//                  Class.forName(  "com.mysql.jdbc.Driver")
//                  val connection = DriverManager.getConnection(url, user, password)
//
//                  val sql = "INSERT INTO student5  (cla, sex ,num) VALUES (?, ? ,?) "
//                  val statement = connection.prepareStatement(sql)
//                  statement.setString(1, cla)
//                  statement.setString(2, sex)
//                  statement.setInt(3, num)
//                  val result = statement.executeQuery
//                  connection.close()

        import sqlCon.implicits._
        val counDataFrame4 = coun4.toDF()
        counDataFrame4.show()


        val prop = new Properties()
        prop.setProperty("user", "root")
        prop.setProperty("password", "123456")
        //        prop.setProperty("driver", "com.mysql.jdbc.Driver")

        counDataFrame4.write.mode("append").jdbc("jdbc:mysql://10.202.27.7:3306/studentinfo", "student4", prop)
      }
    )
    //  一行数据
    //     line.print()
    // 监听器
    ssc.start()
    ssc.awaitTermination()

  }
}
