package ds_industry_2025.ds.ds_07.T4

import org.apache.spark.sql.expressions.Window
import org.apache.spark.sql.functions._
import org.apache.spark.sql.{DataFrame, SparkSession}

import java.util.Properties
/*
    剔除订单信息表与订单详细信息表中用户id与商品id不存在于现有的维表中的记录，同时建议多利用缓存并充分考虑并行度来优化代码，达到更
    快的计算效果。

    1、据Hive的dwd库中相关表或MySQL数据库shtd_store中订单相关表（order_detail、order_info、sku_info），对用户购买过的商品
    进行去重，将其转换为以下格式：第一列为用户id mapping，第二列为用户购买过的商品id mapping，按照user_id与sku_id进行升序排序
    ，输出前5行，将结果截图粘贴至客户端桌面【Release\任务C提交结果.docx】中对应的任务序号下；
 */
object t1 {
  def main(args: Array[String]): Unit = {
    val spark = SparkSession.builder()
      .master("local[*]")
      .appName("t1")
      .config("hive.exec.dynamic.partition.mode","nonstrict")
      .config("spark.serializer","org.apache.spark.serializer.KryoSerializer")
      .config("spark.sql.extensions","org.apache.spark.sql.hudi.HoodieSparkSessionExtension")
      .enableHiveSupport()
      .getOrCreate()

    import spark.implicits._

    val conn=new Properties()
    conn.setProperty("user","root")
    conn.setProperty("password","123456")
    conn.setProperty("driver","com.mysql.jdbc.Driver")

    //  todo 创建读取mysql数据的方法
    def read(tablename:String):DataFrame={
      spark.read.jdbc("jdbc:mysql://192.168.40.110:3306/shtd_store?useSSL=false",tablename,conn)
    }

    //  todo 读取mysql数据
    val sku_info=read("sku_info")
    val user_info=read("user_info")
    val order_info=read("order_info")
    val order_detail=read("order_detail")

    //  todo 剔除订单表和详细订单表里面user_id和sku_id不存在当前维度表里面的数据
    val skus=sku_info.select("id").distinct().withColumnRenamed("id","id_1")
    val users=user_info.select("id").distinct().withColumnRenamed("id","id_1")
    val order=order_info.join(users,users("id_1")===order_info("user_id"),"inner")
    val detail=order_detail.join(skus,skus("id_1")===order_detail("sku_id"),"inner")

    //  todo 对数据进行索引化
    val result = order.join(detail, detail("order_id") === order("id"))
      .select("user_id", "sku_id")
      .distinct()
      .withColumn("user_id", dense_rank().over(Window.orderBy("user_id")) - 1)
      .withColumn("sku_id", dense_rank().over(Window.orderBy("sku_id")) - 1)
      .orderBy("user_id", "sku_id")
      .limit(5)

      println("----------------user_id_mapping和sku_id_mapping数据前5条如下")
    result.collect().foreach(
      r => println(r(0)+":"+r(1))
    )

    spark.close()
  }

}
