package com.niit.bigdataPro

//使用 spark streaming 实时统计 每隔2秒 在籍和不在籍男女生人数
import java.sql.{Connection, DriverManager, PreparedStatement}

import org.apache.kafka.common.serialization.StringDeserializer
import org.apache.spark.SparkConf
import org.apache.spark.streaming.dstream.DStream
import org.apache.spark.streaming.kafka010.{ConsumerStrategies, KafkaUtils, LocationStrategies}
import org.apache.spark.streaming.{Seconds, StreamingContext}

object countgender {
  case class  StuInfoData(classId:String,name:String,gender:String,birthdate:String,studentId:String,semester:String,formattedGpa:String,status:String)
  def main(args: Array[String]): Unit = {

    System.setProperty("hadoop.home.dir", "E:\\18code\\hadoop-2.7.3")
    System.setProperty("HADOOP_USER_NAME", "root")

    val group = "niit01"
    val topic = "stuInfo2"
    //程序入口
    val sparkConf = new SparkConf().setMaster("local[*]").setAppName("spark Steaming整合kafka")
    val ssc = new StreamingContext(sparkConf, Seconds(2))

    // kafka 配置项  导入java.until.Map
    val kafkaParams = Map[String,Object](
      "bootstrap.servers" -> "niit01:9092",
      "key.deserializer" -> classOf[StringDeserializer],
      "value.deserializer" -> classOf[StringDeserializer],
      "group.id"-> group,
      "auto.offset.reset"-> "earliest",
      "enable.auto.commit"->(false:java.lang.Boolean)
    )

    //获取数据
    //spark 连接 kafka
    // topic
    val topicName = Array(topic)
    val streamRDD=KafkaUtils.createDirectStream[String,String](
      ssc,//ssc sparkstreaming context
      LocationStrategies.PreferConsistent,// 位置策略
      ConsumerStrategies.Subscribe[String,String](topicName,kafkaParams)
    )

    val stuInfoData = streamRDD.map(
      kafkaData =>{
        //从kafkaData中获取数据
        val data = kafkaData.value()
        val datas = data.split("\t")

        StuInfoData(datas(0), datas(1), datas(2), datas(3), datas(4), datas(5), datas(6),datas(7))

      }
    )

    val mapRDD = stuInfoData.map(
      data =>{

            val sex = data.gender
            (sex,1)   //(word, count)
      }
    )
    val  reduceRDD:DStream[(String, Int)] = mapRDD.reduceByKey(_+_)

    //数据打印在控制台
    reduceRDD.print()

    reduceRDD.foreachRDD(rdd => {
      def func(records: Iterator[(String, Int)]) {
        var conn: Connection = null
        var stmt: PreparedStatement = null
        try {
          //定义MySQL是链接方式及其用户名和密码
          val url = "jdbc:mysql://192.168.195.11:3306/student?useUnicode=true&characterEncoding=UTF-8"//数据库为llianxi
          val user = "root"
          val password = "123456"
          conn = DriverManager.getConnection(url, user, password)
          records.foreach(p => {
            val sql = "insert into gender_result(gender,count) values (?,?)"//在llianxi数据库中的zklog表，有information，count两列
            stmt = conn.prepareStatement(sql);
            stmt.setString(1, p._1.toString)
            stmt.setInt(2,p._2.toInt)
            stmt.executeUpdate()
          })
        } catch {
          case e: Exception => e.printStackTrace()
        } finally {
          if (stmt != null) {
            stmt.close()
          }
          if (conn != null) {
            conn.close()
          }
        }
      }

      val repartitionedRDD = rdd.repartition(2)
           repartitionedRDD.foreachPartition(func)
    })


     //ssc 关闭
    ssc.start()
    ssc.awaitTermination()
    ssc.stop()
  }
  //创建一个样例类来操作数据


}

