package com.chinasoft.shop

import org.apache.spark.sql.{Dataset, SparkSession}
import java.sql.{Connection, DriverManager, PreparedStatement}

case class DianpingReview(
                           userId: Long,
                           restId: Long,
                           name: String,
                           rating: Double,
                           rating_env: Double,
                           rating_flavor: Double,
                           rating_service: Double,
                           timestamp: Long,
                           comment: String
                         )

object InsertData {
  def main(args: Array[String]): Unit = {
    // 1. 初始化SparkSession（添加超时配置，避免心跳超时）
    val spark = SparkSession.builder()
      .appName("InsertDataToMySQL")
      .master("local[*]")
      // 延长心跳超时时间（解决"Cannot receive any reply"警告）
      .config("spark.executor.heartbeatInterval", "30s")  // 心跳间隔延长到30秒
      .config("spark.network.timeout", "60s")            // 网络超时延长到60秒
      .getOrCreate()
    import spark.implicits._

    // 2. 读取CSV文件
    val csvPath = "G:/github/dataclean/target/dianping_clean.csv"
    val df = spark.read
      .option("header", "true")
      .option("multiLine", "true")
      .option("quote", "\"")
      .option("escape", "\"")
      .option("sep", ",")
      .option("mode", "PERMISSIVE")
      .csv(csvPath)

    // 3. 安全转换函数
    def safeToLong(s: String): Long = try {
      if (s == null || s.trim.isEmpty) 0L else s.trim.toLong
    } catch { case _: Exception => 0L }

    def safeToDouble(s: String): Double = try {
      if (s == null || s.trim.isEmpty) 0.0 else s.trim.toDouble
    } catch { case _: Exception => 0.0 }

    // 4. 转换为Dataset
    val ds: Dataset[DianpingReview] = df.map { row =>
      DianpingReview(
        userId         = safeToLong(row.getAs[String]("userId")),
        restId         = safeToLong(row.getAs[String]("restId")),
        name           = if (row.getAs[String]("name") == null) "" else row.getAs[String]("name"),
        rating         = safeToDouble(row.getAs[String]("rating")),
        rating_env     = safeToDouble(row.getAs[String]("rating_env")),
        rating_flavor  = safeToDouble(row.getAs[String]("rating_flavor")),
        rating_service = safeToDouble(row.getAs[String]("rating_service")),
        timestamp      = safeToLong(row.getAs[String]("timestamp")),
        comment        = if (row.getAs[String]("comment") == null) "" else row.getAs[String]("comment")
      )
    }

    // 5. 数据库配置
    val driver   = "com.mysql.cj.jdbc.Driver"
    val url      = "jdbc:mysql://localhost:3306/dazhong?useSSL=false&characterEncoding=utf8&serverTimezone=UTC"
    val userName = "root"
    val passWd   = "Etestnmm4l!"
    Class.forName(driver)

    // 6. 批量插入（带进度显示）
    // 全局计数器（用于累计总插入量，本地模式有效）
    var totalInserted = 0L
    // 每打印一次进度的间隔（可调整）
    val progressStep = 1000

    ds.foreachPartition { (iter: Iterator[DianpingReview]) =>
      var conn: Connection = null
      var ps: PreparedStatement = null
      val batchSize = 1000  // 每1000条提交一次
      var partitionCount = 0  // 当前分区已插入数量
      var totalPartitionCount = 0  // 当前分区累计数量

      try {
        conn = DriverManager.getConnection(url, userName, passWd)
        conn.setAutoCommit(false)
        val sql =
          """INSERT INTO dazhong_dianping (
            |  userId, restId, name, rating,
            |  rating_env, rating_flavor, rating_service,
            |  `timestamp`, comment
            |) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)
          """.stripMargin
        ps = conn.prepareStatement(sql)

        iter.foreach { review =>
          // 设置参数
          ps.setLong(1, review.userId)
          ps.setLong(2, review.restId)
          ps.setString(3, review.name)
          ps.setDouble(4, review.rating)
          ps.setDouble(5, review.rating_env)
          ps.setDouble(6, review.rating_flavor)
          ps.setDouble(7, review.rating_service)
          ps.setLong(8, review.timestamp)
          ps.setString(9, review.comment)

          ps.addBatch()
          partitionCount += 1
          totalPartitionCount += 1

          // 达到批量大小则提交
          if (partitionCount >= batchSize) {
            ps.executeBatch()
            conn.commit()
            // 打印当前分区进度
            println(s"分区进度：已插入 $totalPartitionCount 条数据")
            // 累加全局计数（本地模式单分区，直接累加）
            totalInserted += partitionCount
            // 打印全局进度
            if (totalInserted % progressStep == 0) {
              println(s"全局进度：累计插入 $totalInserted 条数据")
            }
            partitionCount = 0  // 重置批次计数
          }
        }

        // 提交剩余数据
        if (partitionCount > 0) {
          ps.executeBatch()
          conn.commit()
          totalPartitionCount += partitionCount
          totalInserted += partitionCount
          println(s"分区进度：已插入 $totalPartitionCount 条数据（分区完成）")
          println(s"全局进度：累计插入 $totalInserted 条数据（当前分区完成）")
        }

      } catch {
        case e: Exception =>
          println(s"插入失败: ${e.getMessage}")
          if (conn != null) conn.rollback()
      } finally {
        if (ps != null) ps.close()
        if (conn != null) conn.close()
      }
    }

    // 最终总进度
    println(s"数据插入完成！总插入量：$totalInserted 条")

    spark.stop()
  }
}