package sparkstreaming

import java.sql.{Connection, DriverManager, PreparedStatement}
import java.util
import java.util.HashMap

import org.apache.kafka.clients.producer.{KafkaProducer, ProducerConfig, ProducerRecord}
import org.apache.kafka.common.serialization.StringDeserializer
import org.apache.spark.SparkConf
import org.apache.spark.streaming.kafka010.ConsumerStrategies.Subscribe
import org.apache.spark.streaming.kafka010.KafkaUtils
import org.apache.spark.streaming.kafka010.LocationStrategies.PreferConsistent
import org.apache.spark.streaming.{Seconds, StreamingContext}

object ALCount {
  def main(args: Array[String]): Unit = {
  //基础配置，注意driver，5.0版本和8.0版本不一样
    val driver = "com.mysql.jdbc.Driver"
    val url = "jdbc:mysql://192.168.245.10:3306/stuInfo"
    val username = "root"
    val password = "root"
    val tableName="alnum"
    val group = "niit111"
    val topic = "stuInfo"
    System.setProperty("hadoop.home.dir", "D:\\hadoop\\hadoop-2.10.1")
    System.setProperty("HADOOP_USER_NAME", "root")

    // 1. sparkconf  设置 是否本地运行 ，appname 应用程序名字， ssc 设置 n 秒
    // 2.  kafka 的配置项  broker ， key value 反序列化 , group id  (成员1 , 消费者组 多)  , kafka 消费 earliest
    // 3. spark 连接 kafka   订阅 kafka topic ，offset 提交 （maven 依赖包 ）
    // 4. 数据处理
    // 5. ssc 什么时候关闭   ， 接收器 ，等待接收器关闭 再关闭
    val conf = new SparkConf().setMaster("local[*]").setAppName("KafkaConsumer").set("spark.testing.memory", "512000000")
    val ssc = new StreamingContext(conf, Seconds(5))

    //checkpoint
    ssc.checkpoint("./checkpoint")
    //日志 error
    ssc.sparkContext.setLogLevel("error")

    //准备读取kafka参数  导入java.util.Map
    val kafkaParams = Map[String, Object](
      "bootstrap.servers" -> "GTL:9092",
      "key.deserializer" -> classOf[StringDeserializer],
      "value.deserializer" -> classOf[StringDeserializer],
      "max.poll.records" -> "1000", //设置最大拉取数据的时间为1s  每一秒拉取一次数据
      "group.id" -> group, //组id
      "auto.offset.reset" -> "earliest", //设置指针偏移重置从开始的数据开始
      "enable.auto.commit" -> (false: java.lang.Boolean) //设置关闭自动提交
    )

    val props = new util.HashMap[String, Object]()
    props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "GTL:9092")
    props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringSerializer")
    props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringSerializer")

    val topicName = Array(topic)
    val streamRDD = KafkaUtils.createDirectStream[String, String](
      ssc, //ssc spark-streaming context
      PreferConsistent, //位置策略
      Subscribe[String, String](topicName, kafkaParams)
    )
    //读取kafka中 stuInfo 的数据
    streamRDD.foreachRDD(kafkaRdd => {
      //若不为空，进入条件语句
      if (!kafkaRdd.isEmpty()) {

        val lines = kafkaRdd.map(_.value()).map(x => {
            // 每一行返回的 line 为 数组
            val line = x.split("\t")
            // 返回值为 ( 在校情况，男女人数)
            (line(2).toString,line(7).toString)
          }
        )
        val l = lines.map((_, 1)).reduceByKey(_ + _).collect()
        l.foreach(println)
 //sparksql部分
        try {
          Class.forName(driver)
          //得到连接
          val connection: Connection = DriverManager.getConnection(url, username, password)
          val statement: PreparedStatement = connection.prepareStatement(s"INSERT INTO $tableName (al,gender,num) VALUES (?, ?,?)")
          l.foreach { case ((col1, col2), count) =>
            statement.setString(2, col1)
            statement.setString(1, col2)
            statement.setInt(3, count)
            statement.addBatch()
          }

          statement.executeBatch()
          statement.close()

          println("导入数据完成！")
          connection.close
        } catch {
          case e: Exception => e.printStackTrace
        }


      }
    })


    // ssc 关闭

    ssc.start()
    ssc.awaitTermination()
    ssc.stop()
  }
}
