package dataconsume

import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.functions._
import java.net.URLDecoder
import java.util.Properties
import org.apache.kafka.clients.producer._
import scala.collection.JavaConverters._

object DataCleanAndSend {
  def main(args: Array[String]): Unit = {
    val spark = SparkSession.builder()
      .appName("学生成绩数据清洗与发送")
      .master("local")
      .getOrCreate()

    // 读取数据
    val resourcePath = URLDecoder.decode(
      getClass.getResource("/学生成绩预测.csv").getPath,
      "UTF-8"
    )
    println(s"尝试读取文件: $resourcePath")

    val df = spark.read.format("csv")
      .option("header", "true")
      .option("inferSchema", "true")
      .option("delimiter", ",")
      .load(resourcePath)

    // 数据清洗
    println("原始数据基本信息：")
    df.printSchema()
    println(s"原始数据行数：${df.count()}")

    val dfDistinct = df.dropDuplicates()
    println(s"去重后数据行数：${dfDistinct.count()}")

    val dfCleaned = dfDistinct.na.drop()
    println(s"删除空值后数据行数：${dfCleaned.count()}")
    dfCleaned.show()

    // 配置Kafka生产者
    val props = new Properties()
    props.put("bootstrap.servers", "localhost:9092")
    props.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer")
    props.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer")
    props.put("acks", "all") // 确保消息发送成功
    props.put("retries", "3") // 重试次数

    val producer = new KafkaProducer[String, String](props)
    val topic = "javaTopic"

    try {
      // 将清洗后的数据发送到Kafka
      val rows = dfCleaned.collect()
      println(s"开始向Kafka主题 $topic 发送 ${rows.length} 条数据")

      for (row <- rows) {
        // 将每行数据转换为CSV格式字符串
        // 修正：直接使用Scala Seq
        val line = row.toSeq.mkString(",")

        val record = new ProducerRecord[String, String](topic, line)

        // 异步发送并添加回调
        producer.send(record, new Callback() {
          override def onCompletion(metadata: RecordMetadata, exception: Exception): Unit = {
            if (exception != null) {
              System.err.println(s"发送失败 (offset: ${metadata.offset()}): ${exception.getMessage}")
            } else {
              println(s"发送成功 (offset: ${metadata.offset()}): ${line.take(50)}...")
            }
          }
        })

        // 添加小延迟避免压垮Kafka
        Thread.sleep(1000)
      }

      println("所有数据发送完成")
    } catch {
      case e: Exception =>
        System.err.println(s"发送过程中发生异常: ${e.getMessage}")
        e.printStackTrace()
    } finally {
      // 确保资源关闭
      producer.close()
      spark.stop()
    }
  }
}