package project

/**
 * @author 鲁新茹
 * @date 2023/6/19
 */
import java.sql.{Connection, DriverManager, PreparedStatement}

import org.apache.kafka.clients.consumer.ConsumerRecord
import org.apache.kafka.common.serialization.StringDeserializer
import org.apache.spark.SparkConf
import org.apache.spark.streaming.{Seconds, StreamingContext}
import org.apache.spark.streaming.dstream.InputDStream
import org.apache.spark.streaming.kafka010.{ConsumerStrategies, KafkaUtils, LocationStrategies}

object ImportData1 {
  def main(args: Array[String]): Unit = {
    // 1. sparkconf  设置 是否本地运行 ，appname 应用程序名字， ssc 设置 n 秒
    // 2.  kafka 的配置项  broker ， key value 反序列化 , group id  (成员1 , 消费者组 多)  , kafka 消费 earliest
    // 3. spark 连接 kafka   订阅 kafka topic ，offset 提交 （maven 依赖包 ）
    // 4. 数据处理
    // 5. ssc 什么时候关闭   ， 接收器 ，等待接收器关闭 再关闭
    //Spark配置对象
    val group = "niit2"
    val topic = "stuInfo3"
    val conf = new SparkConf().setMaster("local[*]").setAppName("KafkaConsumer").set("spark.testing.memory", "512000000")
    val ssc = new StreamingContext(conf, Seconds(5))
    ssc.sparkContext.setLogLevel("error")
    // hadoop home dir 进行配置
    System.setProperty("hadoop.home.dir", "E:\\18code\\hadoop-2.7.3")

    def mysqlConnection(): Connection = {
      DriverManager.getConnection("jdbc:mysql://192.168.57.11:3306/information?characterEncoding=UTF-8", "root", "root")
    }

    def sendDataToMysql(data: List[String], tableName: String): Unit = {
      //获取连接
      val connection: Connection = mysqlConnection()
      //创建一个变量用来保存sql语句
      val sql = s"insert into $tableName values (?,?,?,?,?,?,?,?)"
      //将一条数据存入到mysql
      val ps: PreparedStatement = connection.prepareStatement(sql)
      ps.setString(1, data.head)
      ps.setString(2, data(1))
      ps.setString(3, data(2))
      ps.setString(4, data(3))
      ps.setString(5, data(4))
      ps.setString(6, data(5))
      ps.setString(7, data(6))
      ps.setString(8,data(7))
      //提交
      ps.execute()
      connection.close()
    }




    //准备读取kafka参数  导入java.util.Map
    val kafkaParams = Map[String, Object](
      "bootstrap.servers" -> "niit01:9092",
      "key.deserializer" -> classOf[StringDeserializer],
      "value.deserializer" -> classOf[StringDeserializer],
      "max.poll.records" -> "1000", //设置最大拉取数据的时间为1s  每一秒拉取一次数据
      "group.id" -> group, //组id
      "auto.offset.reset" -> "earliest", //设置指针偏移重置从开始的数据开始
      "enable.auto.commit" -> (false: java.lang.Boolean) //设置关闭自动提交
    )

    val data: InputDStream[ConsumerRecord[String, String]] =
      KafkaUtils.createDirectStream[String, String](
        ssc,
        LocationStrategies.PreferConsistent,
        ConsumerStrategies.Subscribe[String, String](Array(topic), kafkaParams))
    data.foreachRDD(_.foreach(row => {
      //获取一行转化成list
      val list: List[String] = row.value().split("\t").toList
      sendDataToMysql(list, "student")
    })
    )




    // 程序逻辑停止
    // 1.启动采集器
    ssc.start()
    // 2.等待
    ssc.awaitTermination()
    // 3.关闭
    ssc.stop()




  }


}
