package ds_industry_2025.ds.ds_03.T4

import org.apache.spark.sql.expressions.Window
import org.apache.spark.sql.functions._
import org.apache.spark.sql.{DataFrame, SparkSession}

import java.util.Properties

/*
    1、据Hive的dwd库中相关表或MySQL数据库shtd_store中订单相关表（order_detail、order_info、sku_info），对用户购买过的商品
    进行去重，将其转换为以下格式：第一列为用户id mapping，第二列为用户购买过的商品id mapping，按照user_id与sku_id进行升序排序
    ，输出前5行，将结果截图粘贴至客户端桌面【Release\任务C提交结果.docx】中对应的任务序号下；
字段	类型	中文含义	备注
user_id	int	用户id的mapping对应键
sku_id	int	商品id的mapping对应键
提示：
Mapping操作：例如用户id：1、4、7、8、9，则做完mapping操作转为字典类型，键0对应用户id 1，键1对应用户id 4，以此类推
结果格式如下：
-------user_id_mapping与sku_id_mapping数据前5条如下：-------
0:0
0:89
1:1
1:2
1:3
 */
object t1 {
  def main(args: Array[String]): Unit = {
    val spark = SparkSession.builder()
      .master("local[*]")
      .appName("t1")
      .config("hive.exec.dynamic.partition.mode","nonstrict")
      .config("spark.serializer","org.apache.spark.serializer.KryoSerializer")
      .config("spark.sql.extensions","org.apache.spark.sql.hudi.HoodieSparkSessionExtension")
      .enableHiveSupport()
      .getOrCreate()

    val conn=new Properties()
    conn.setProperty("user","root")
    conn.setProperty("password","123456")
    conn.setProperty("driver","com.mysql.jdbc.Driver")

    //  todo 定义读取mysql数据的方法
    def read_mysql(tablename:String):DataFrame={
      spark.read.jdbc("jdbc:mysql://192.168.40.110:3306/shtd_store?useSSL=false",tablename,conn)
    }

    val order_info=read_mysql("order_info")
    val detail_info=read_mysql("order_detail")
    val sku_info=read_mysql("sku_info")
    val user_info=read_mysql("user_info")

    // todo 剔除订单信息表和订单详细表里面的user_id和sku_id不存在当前维度表里面的数据
    val sku_ids = sku_info.select("id").distinct().withColumnRenamed("id", "id_1")
    val user_ids=user_info.select("id").distinct().withColumnRenamed("id","id_1")
    val detail = detail_info.join(sku_ids, sku_ids("id_1") === detail_info("sku_id"))
    val order = order_info.join(user_ids, user_ids("id_1") === order_info("user_id"))

    order.join(detail,detail("order_id")===order("id"))
      .select("user_id","sku_id")
      .distinct()
      .withColumn(
        "user_id",dense_rank() over(Window.orderBy("user_id"))
      )
      .withColumn("user_id",col("user_id") -1)
      .withColumn(
        "sku_id",
        dense_rank() over(Window.orderBy("sku_id"))
      )
      .withColumn("sku_id",col("sku_id") -1)
      .orderBy("user_id")
      .limit(5)
      .foreach {
        row => println(s"${row.get(0)}:${row(1)}")
      }


    spark.close()

  }

}
