/**
 * @author Xue ShuWen
 * @date 2022/11/19 20:22
 */

//把随机数据传入stuInfo的studentInfo表中
package scala
import java.sql.{Connection, DriverManager, PreparedStatement}
import java.text.SimpleDateFormat
import java.util

import org.apache.kafka.clients.consumer.{ConsumerConfig, ConsumerRecord}
import org.apache.kafka.clients.producer.ProducerConfig
import org.apache.kafka.common.serialization.StringDeserializer
import org.apache.spark.rdd.RDD
import org.apache.spark.streaming.dstream.{DStream, InputDStream}
import org.apache.spark.streaming.kafka010.{ConsumerStrategies, KafkaUtils, LocationStrategies}
import org.apache.spark.streaming.{Seconds, StreamingContext}
import org.apache.spark.{SparkConf, SparkContext}

import scala.connMysql.{driver, password, url, username}
object studentInfo {

  val driver = "com.mysql.cj.jdbc.Driver"
  val url = "jdbc:mysql://localhost:3306/stuInfo"
  val username = "root"
  val password = "root"
  val group = "niit01"


  def main(args: Array[String]): Unit = {

    System.setProperty("hadoop.home.dir", "C:\\soft\\winutils-master\\hadoop-2.7.3")
    System.setProperty("HADOOP_USER_NAME", "root")

    val sparkConf: SparkConf = new SparkConf().setMaster("local[*]").setAppName("sparkKafka")
    val ssc = new StreamingContext(sparkConf, Seconds(2))
    ssc.sparkContext.setLogLevel("error")

    // 3. kafka 配置项 导入java.until.Map
    val kafkaParams = Map[String, Object](
      "bootstrap.servers" -> "niit01:9092",
      "key.deserializer" -> classOf[StringDeserializer],
      "value.deserializer" -> classOf[StringDeserializer],
      "group.id" -> group,
      "auto.offset.reset" -> "earliest",
      "enable.auto.commit" -> (false: java.lang.Boolean)
    )
    val props = new util.HashMap[String, Object]()
    props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "niit01:9092")
    props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringSerializer")
    props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringSerializer")

    // 4.设置Topic
    var topics = Array("stuInfo")
    val recordDStream: InputDStream[ConsumerRecord[String, String]] = KafkaUtils.createDirectStream[String, String](ssc,
      LocationStrategies.PreferConsistent, //位置策略,源码强烈推荐使用该策略,会让Spark的Executor和Kafka的Broker均匀对应
      ConsumerStrategies.Subscribe[String, String](topics, kafkaParams)) //消费策略,源码强烈推荐使用该策略
    val resultDStream: DStream[Array[String]] = recordDStream.map(_.value()).map(_.split("\t")).cache()

    //

    resultDStream.foreachRDD(rdd => {
      rdd.filter(_.length == 8).collect().foreach(student => {
        Class.forName(driver)
        val connection: Connection = DriverManager.getConnection(url, username, password)
        var sql = "insert into studentInfo values (?,?,?,?,?,?,?,?)"
        val statement: PreparedStatement = connection.prepareStatement(sql)
        statement.setInt(1, student(0).toInt);
        statement.setString(2, student(1).toString);
        statement.setInt(3, student(2).toInt);
        statement.setString(4, student(3).toString);
        statement.setInt(5, student(4).toInt);
        statement.setInt(6, student(5).toInt);
        statement.setString(7, student(6).toString);
        statement.setDouble(8, student(7).toDouble);
        statement.executeUpdate()
        statement.close()
        connection.close()
        //        val ob1 = rdd.flatMap(x => x.map((_,1))).filter(_._1.contains("1"))
        //        ob1.reduceByKey(_+_).collect().foreach(print)
        //        println(student(0))
        //        println(student(1))
        //        println(student(2))
        //        println(student(3))
        //        println(student(4))
        //        println(student(5))
        println(student(6) + "--------------------")
        print("jxzcp")
      })

    })
    ssc.start()
    ssc.awaitTermination()
    ssc.stop()
  }

  //        val ob1 = rdd.flatMap(x => x.map((_,1))).filter(_._1.contains("1"))
  //        ob1.reduceByKey(_+_).collect().foreach(print)
}


