package com.chinasoft.shop

import org.apache.spark.sql.{Dataset, SparkSession}
import java.sql.{Connection, DriverManager, PreparedStatement}

// 定义样例类
case class DianpingReview(
                           userId: Long,
                           restId: Long,
                           name: String,
                           rating: Double,
                           rating_env: Long,
                           rating_flavor: Long,
                           rating_service: Long,
                           timestamp: Long,
                           comment: String
                         )

object InsertData {
  def main(args: Array[String]): Unit = {
    // 1. 初始化 SparkSession
    val spark = SparkSession.builder()
      .appName("InsertDataToMySQL")
      .master("local[*]")
      .getOrCreate()
    import spark.implicits._

    // 2. 读取 CSV，用内置的 parser 自动识别引号和逗号
    val csvPath = "G:/github/dataclean/target/dianping_clean.csv"
    val df = spark.read
      .option("header", "true")
      .option("multiLine", "true")     // 支持多行字段
      .option("quote", "\"")           // 引号字符
      .option("escape", "\"")          // 转义字符
      .option("mode", "PERMISSIVE")    // 容错模式
      .csv(csvPath)

    // 3. 强转类型并补充默认值
    val ds: Dataset[DianpingReview] = df.map { row =>
      DianpingReview(
        userId         = row.getAs[String]("userId").toLong,
        restId         = row.getAs[String]("restId").toLong,
        name           = row.getAs[String]("name"),
        rating         = row.getAs[String]("rating").toDouble,
        rating_env     = row.getAs[String]("rating_env").toLong,
        rating_flavor  = row.getAs[String]("rating_flavor").toLong,
        rating_service = row.getAs[String]("rating_service").toLong,
        timestamp      = row.getAs[String]("timestamp").toLong,
        comment        = Option(row.getAs[String]("comment")).getOrElse("")
      )
    }

    // 4. JDBC 配置
    val driver   = "com.mysql.cj.jdbc.Driver"
    val url      = "jdbc:mysql://localhost:3306/dazhong?useSSL=false&characterEncoding=utf8&serverTimezone=UTC"
    val userName = "root"
    val passWd   = "Etestnmm4l!"
    Class.forName(driver)

    // 5. 分区插入
    ds.rdd.foreachPartition { iter =>
      var conn: Connection = null
      var ps: PreparedStatement = null
      try {
        conn = DriverManager.getConnection(url, userName, passWd)
        val sql =
          """INSERT INTO dazhong_dianping
            | (userId, restId, name, rating,
            |  rating_env, rating_flavor, rating_service,
            |  `timestamp`, comment)
            | VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)""".stripMargin
        ps = conn.prepareStatement(sql)

        iter.foreach { r =>
          ps.setLong(1, r.userId)
          ps.setLong(2, r.restId)
          ps.setString(3, r.name)
          ps.setDouble(4, r.rating)
          ps.setLong(5, r.rating_env)
          ps.setLong(6, r.rating_flavor)
          ps.setLong(7, r.rating_service)
          ps.setLong(8, r.timestamp)
          ps.setString(9, r.comment)
          ps.executeUpdate()
        }
      } finally {
        if (ps != null) ps.close()
        if (conn != null) conn.close()
      }
    }

    // 6. 完成
    println(s"总共插入了 ${ds.count()} 条数据")
    spark.stop()
  }
}
