package org.example
import org.apache.spark.SparkConf
import org.apache.spark.streaming.{Seconds, StreamingContext}

import java.sql.{Connection, DriverManager, PreparedStatement}

object SocketToMySQL {
  def main(args: Array[String]) {
    // Spark配置
    val conf = new SparkConf()
      .setAppName("SocketToMySQL")
      .setMaster("local[2]")
    // 创建StreamingContext，批次间隔5秒
    val ssc = new StreamingContext(conf, Seconds(5))
    // 从socket源创建DStream
    val lines = ssc.socketTextStream("172.16.104.38", 8888)
    val processed = lines.flatMap(_.split("\\W+")) // 分割单词
      .filter(_.nonEmpty) // 过滤空字符串:ml-citation{ref="3" data="citationList"}
      .map(_.toLowerCase) // 统一小写处理:ml-citation{ref="3" data="citationList"}
      .filter(!_.contains("a")) // 过滤含字母a的单词:ml-citation{ref="2" data="citationList"}
      .map(word => (word, 1))
      .reduceByKey(_ + _)
    processed.foreachRDD { rdd =>
      val top5 = rdd.map(_.swap) // 转换为(count, word)
        .top(5)(Ordering[Int].on(_._1)) // 高效获取前5名:ml-citation{ref="7" data="citationList"}
        .map(_.swap) // 恢复(word, count)格式

      // 结果输出示例
      println("当前窗口Top5单词:")
      top5.foreach(println)
    }
    // 处理数据：拆分单词并计数
    val wordCounts = lines.flatMap(_.split(" "))
      .filter(_.nonEmpty)
      .map(word => (word, 1))
      .reduceByKey(_ + _)
    // 将结果保存到MySQL
    wordCounts.foreachRDD { rdd =>
      rdd.foreachPartition { partitionOfRecords =>
        var connection: Connection = null
        var preparedStatement: PreparedStatement = null
        try {
          // MySQL连接参数
          val url = "jdbc:mysql://localhost:3306/spark_db"
          val username = "root"
          val password = "123456"
          // 加载JDBC驱动
          Class.forName("com.mysql.jdbc.Driver")
          // 建立连接
          connection = DriverManager.getConnection(url, username, password)
          // 准备SQL语句（使用ON DUPLICATE KEY UPDATE实现更新）
          val sql = """
                    INSERT INTO word_counts (word, count)
                    VALUES (?, ?)
                    ON DUPLICATE KEY UPDATE count = count + VALUES(count)
                    """
          preparedStatement = connection.prepareStatement(sql)
          // 遍历分区中的记录
          partitionOfRecords.foreach { case (word, count) =>
            preparedStatement.setString(1, word)
            preparedStatement.setInt(2, count)
            preparedStatement.executeUpdate()
          }
        } catch {
          case e: Exception => e.printStackTrace()
        } finally {
          // 关闭资源
          if (preparedStatement != null) preparedStatement.close()
          if (connection != null) connection.close()
        }
      }
    }
    // 启动流计算
    ssc.start()
    ssc.awaitTermination()
  }
}