import java.sql.{Connection, DriverManager, PreparedStatement}

import org.apache.spark.SparkConf
import org.apache.spark.sql.SparkSession

case class Product(name: String, price: Float, category: String, comment_count: Int, link: String, src: String)

object DataLoader {
  val PRODUCT_PATH = "D:\\BigData\\project\\clothesRecommender\\recommender\\dataLoader\\src\\main\\resources\\products.csv"
  def main(args: Array[String]): Unit = {
    val spark = SparkSession.builder()
      .appName("DataLoader")
      .master("local[*]")
      .getOrCreate()

    val sc = spark.sparkContext
    val data = sc.textFile(PRODUCT_PATH)

    val productsRDD = data.map(line => {
      val fields = line.split(",")
      val name = fields(0).toUpperCase()
      val price = fields(1).toFloat
      val category = fields(2)
      val comment_count_str = fields(3)
      val link = fields(5)
      val src = fields(6)
      val comment_count = parseCommentCount(comment_count_str)
      Product(name, price, category, comment_count, link, src)
    })


    // 将RDD转换为DataFrame
    val productsDF = spark.createDataFrame(productsRDD)

    // 将DataFrame写入MySQL数据库
    val url = "jdbc:mysql:///clothes_recommender_system?useUnicode=true&characterEncoding=UTF-8&serverTimezone=UTC"
    val user = "root"
    val password = "123456"
    val table = "clothes"

    productsDF.write.format("jdbc").option("url", url).option("dbtable", table)
      .option("user", user).option("password", password)
      .option("driver", "com.mysql.cj.jdbc.Driver").mode("append").save()

    // 关闭SparkSession
    spark.close()
  }
  def parseCommentCount(countStr: String): Int = {
    val pattern = "(\\d+)(万|千)?\\+".r
    val result = pattern.findAllIn(countStr).matchData.toList
    if (result.nonEmpty) {
      val count = result.head.group(1).toFloat.toInt
      val unit = result.head.group(2)
      if (unit == "万") {
        count * 10000
      } else if (unit == "千") {
        count * 1000
      } else {
        count
      }
    } else {
      0
    }
  }
}
