package ds_industry_2025.ds.ds_03.T4

import org.apache.spark.sql.{DataFrame, Row, SparkSession}
import org.apache.spark.sql.expressions.Window
import org.apache.spark.sql.functions._

import java.util.Properties



/*
    2、根据第1小题的结果，对其进行聚合，其中对sku_id进行one-hot转换，将其转换为以下格式矩阵：第一列为用户id，其余列名为商品id，按照用
    户id进行升序排序，展示矩阵第一行前5列数据，将结果截图粘贴至客户端桌面【Release\任务C提交结果.docx】中对应的任务序号下。
 */
object t2 {
  def main(args: Array[String]): Unit = {
    val spark = SparkSession.builder()
      .master("local[*]")
      .appName("t1")
      .config("hive.exec.dynamic.partition.mode", "nonstrict")
      .config("spark.serializer", "org.apache.spark.serializer.KryoSerializer")
      .config("spark.sql.extensions", "org.apache.spark.sql.hudi.HoodieSparkSessionExtension")
      .enableHiveSupport()
      .getOrCreate()

    import spark.implicits._

    val conn = new Properties()
    conn.setProperty("user", "root")
    conn.setProperty("password", "123456")
    conn.setProperty("driver", "com.mysql.jdbc.Driver")

    //  todo 定义读取mysql数据的方法
    def read_mysql(tablename: String): DataFrame = {
      spark.read.jdbc("jdbc:mysql://192.168.40.110:3306/shtd_store?useSSL=false", tablename, conn)
    }

    val order_info = read_mysql("order_info")
    val detail_info = read_mysql("order_detail")
    val sku_info = read_mysql("sku_info")
    val user_info = read_mysql("user_info")

    // todo 剔除订单信息表和订单详细表里面的user_id和sku_id不存在当前维度表里面的数据
    val sku_ids = sku_info.select("id").distinct().withColumnRenamed("id", "id_1")
    val user_ids = user_info.select("id").distinct().withColumnRenamed("id", "id_1")
    val detail = detail_info.join(sku_ids, sku_ids("id_1") === detail_info("sku_id"))
    val order = order_info.join(user_ids, user_ids("id_1") === order_info("user_id"))

    val r1=order.join(detail, detail("order_id") === order("id"))
      .select("user_id", "sku_id")
      .distinct()
      .withColumn(
        "user_id", dense_rank() over (Window.orderBy("user_id"))
      )
      .withColumn("user_id", col("user_id") - 1)
      .withColumn(
        "sku_id",
        dense_rank() over (Window.orderBy("sku_id"))
      )
      .withColumn("sku_id", col("sku_id") - 1)
      .orderBy("user_id")

    //  todo 接下来开始第二题


    // todo 先将sku_id字段的值改成 sku_id+字段的值，排好序，去重，提取到
    val sku_ids_list:Array[String]= r1.select("sku_id")
      .withColumn("sku_id", concat(lit("sku_"), col("sku_id")))
      .distinct()
      .orderBy(
        split(col("sku_id"), "id")(1).cast("int")
      )
      .map(
        x => x(0).toString
      )
      .collect()

    sku_ids_list.foreach(println)


    // todo 这里的关键点在于转换之后需要sku_id字段的值和列名是一样的，使用agg(lit(1.0))才可以生效，如果不是和字段名一致不会生效
    val result = r1.withColumn("sku_id", concat(lit("sku_"), col("sku_id")))
      .groupBy("user_id")
      .pivot(col("sku_id"), sku_ids_list)
      .agg(lit(1.0))
      .na.fill(0.0)
      .orderBy("user_id")
      .withColumn("user_id", col("user_id").cast("double"))

    //  根据题目拿到第一行的五列数据

    result.limit(1)
      .select(result.columns.take(5).map(col): _*)
      .foreach{
        r => println(r.mkString(","))
      }


    spark.close()
  }

}
