package ds_industry_2025.ds.ds01.sjwj2

import org.apache.spark.ml.Pipeline
import org.apache.spark.ml.feature.{OneHotEncoder, StandardScaler, VectorAssembler}
import org.apache.spark.ml.linalg.{DenseVector, SparseVector}
import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.expressions.Window
import org.apache.spark.sql.functions._

import java.util.Properties

/*
      2、根据Hive的dwd库中相关表或MySQL中shtd_store中相关商品表（sku_info），获取id、spu_id、price、weight、tm_id、
      category3_id 这六个字段并进行数据预处理，对price、weight进行规范化(StandardScaler)处理，对spu_id、tm_id、
      category3_id进行one-hot编码处理（若该商品属于该品牌则置为1，否则置为0）,并按照id进行升序排序，在集群中输出第一条
      数据前10列（无需展示字段名），将结果截图粘贴至客户端桌面【Release\任务C提交结果.docx】中对应的任务序号下。
 */
object t2 {
  def main(args: Array[String]): Unit = {
    val spark = SparkSession.builder()
      .master("local[*]")
      .appName("t2")
      .config("hive.exec.dynamic.partition.mode","nonstrict")
      .config("spark.serializer","org.apache.spark.serializer.KryoSerializer")
      .config("spark.sql.extensions","org.apache.spark.sql.hudi.HoodieSparkSessionExtnesion")
      .config("spark.sql.parquetLegacyFormat","ture")
      .enableHiveSupport()
      .getOrCreate()

    import spark.implicits._

    val conn=new Properties()
    conn.setProperty("user","root")
    conn.setProperty("password","123456")
    conn.setProperty("driver","com.mysql.jdbc.Driver")

    val sku_info=spark.read
      .jdbc("jdbc:mysql://192.168.40.110:3306/shtd_store?useSSL=false","sku_info",conn)
      .select("id","price","weight","spu_id","tm_id","category3_id")

    //  todo 索引化
    val sku_index = sku_info
      .withColumn("spu_id", dense_rank().over(Window.orderBy("spu_id")) - 1)
      .withColumn("tm_id", dense_rank().over(Window.orderBy("tm_id")) - 1)
      .withColumn("category3_id", dense_rank().over(Window.orderBy("category3_id")) - 1)
      .orderBy("id")

    //  todo 将结果保存到hive
    sku_index.write.format("hive").mode("overwrite")
      .saveAsTable("tzgc.t2")

    val one = new OneHotEncoder()
      .setInputCols(Array("spu_id","tm_id","category3_id"))
      .setOutputCols(Array("spu_id_o","tm_id_o","category3_id_o"))
      .setDropLast(false)

    val assembler1 = new VectorAssembler()
      .setInputCols(Array("price"))
      .setOutputCol("price_v")

    val assembler2=new VectorAssembler()
      .setInputCols(Array("weight"))
      .setOutputCol("weight_v")


    val scaler1 = new StandardScaler()
      .setInputCol("price_v")
      .setOutputCol("price_s")
      .setWithMean(true)

    val scaler2=new StandardScaler()
      .setInputCol("weight_v")
      .setOutputCol("weight_s")
      .setWithMean(true)

    val pipeline = new Pipeline()
      .setStages(Array(assembler1,assembler2,scaler1,scaler2,one))
      .fit(sku_index).transform(sku_index)

    spark.udf.register(
      "vector_1",
      (v1:SparseVector) => {v1.toArray.mkString(",")}
    )

    spark.udf.register(
      "vector",
      (v1:DenseVector) => {v1.apply(0)}
    )

    val result = pipeline.withColumn("price", expr("vector(price_s)"))
      .withColumn("weight", expr("vector(weight_s)"))
      .withColumn("spu_id_o", expr("vector_1(spu_id_o)"))
      .withColumn("tm_id_o", expr("vector_1(tm_id_o)"))
      .withColumn("category3_id_o", expr("vector_1(category3_id_o)"))
      .select("id", "price", "weight", "spu_id_o", "tm_id_o", "category3_id_o")
      .orderBy("id")

    println("----------第一条数据前10列结果展示为:-----------")
    result.limit(1).collect().foreach{
      r => println(r.toSeq.flatMap(r => r.toString.split(",")).take(10).mkString(","))
    }










    spark.close()
  }

}
