package ds_industry_2025.ds.YangJuan_2024.tzgc

import org.apache.spark.ml.Pipeline
import org.apache.spark.ml.feature.{OneHotEncoder, StandardScaler, VectorAssembler}
import org.apache.spark.ml.functions.vector_to_array
import org.apache.spark.ml.linalg.{DenseVector, SparseVector}
import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.expressions.Window
import org.apache.spark.sql.functions._

import java.util.Properties

/*
    根据 Hive 的 dwd 库中相关表或 MySQL 中 shtd_store 中相关商品表（sku_info），获取 id、
spu_id、price、weight、tm_id、category3_id 这六个字段并进行数据预处理，对 price、weight
进行规范化(StandardScaler)处理，对 spu_id、tm_id、category3_id 进行 one-hot 编码处理
（若该商品属于该品牌则置为 1，否则置为 0），并按照 id 进行升序排序，在集群中输出
第一条数据前 10 列（无需展示字段名），将结果截图粘贴至客户端桌面【Release\任务 C
提交结果.docx】中对应的任务序号下。
 */
object t2 {
  def main(args: Array[String]): Unit = {
    val spark = SparkSession.builder()
      .master("local[*]")
      .appName("t2")
      .config("hive.exec.dynamic.partition.mode","nonstrict")
      .config("spark.serializer","org.apache.spark.serializer.KryoSerializer")
      .config("spark.sql.extensions","org.apache.spark.sql.hudi.HoodieSparkSessionExtension")
      .enableHiveSupport()
      .getOrCreate()

    val conn=new Properties()
    conn.setProperty("user","root")
    conn.setProperty("password","123456")
    conn.setProperty("driver","com.mysql.jdbc.Driver")

    val sku_info = spark.read
      .jdbc("jdbc:mysql://192.168.40.110:3306/shtd_store?useSSL=false", "sku_info", conn)
      .select("id","price","weight","spu_id","tm_id","category3_id")

    //  todo 将需要one-hot编码的字段进行索引化
    val sku_index = sku_info
      .withColumn("spu_id", dense_rank().over(Window.orderBy("spu_id")) - 1)
      .withColumn("tm_id", dense_rank().over(Window.orderBy("tm_id")) - 1)
      .withColumn("category3_id", dense_rank().over(Window.orderBy("category3_id")) - 1)

    //  todo 把结果存入hive方便下一题的推荐系统使用
    sku_index.write.format("hive").mode("overwrite")
      .saveAsTable("tzgc.t2")

    val assembler1 = new VectorAssembler()
      .setInputCols(Array("price"))
      .setOutputCol("price_v")

    val assembler2=new VectorAssembler()
      .setInputCols(Array("weight"))
      .setOutputCol("weight_v")

    val scaler1 = new StandardScaler()
      .setInputCol("price_v")
      .setOutputCol("price_sca")

    val scaler2=new StandardScaler()
      .setInputCol("weight_v")
      .setOutputCol("weight_sca")

    val one_hot = new OneHotEncoder()
      .setInputCols(Array("spu_id","tm_id","category3_id"))
      .setOutputCols(Array("spu_id_one","tm_id_one","category3_id_one"))
      .setDropLast(false)

    val pipelin = new Pipeline()
      .setStages(Array(assembler1,assembler2,scaler1,scaler2,one_hot))
      .fit(sku_index).transform(sku_index)

    pipelin.show

    //  todo 这里千万注意不能出问题,这里不能v1.values.toArray.mkString(",")    会导致只有一个值
    // todo 因为vector.values()如果是稀疏向量只会得到一个值,所以要v1.toArray直接全部转成数组
    spark.udf.register(
      "vector_to_arr",
      (v1:SparseVector) => {v1.toArray.mkString(",")}
    )

    spark.udf.register(
      "vector",
      (v1:DenseVector) => {v1.apply(0)}
    )

    //  todo 使用函数
    val result = pipelin.withColumn("price_sca", expr("vector(price_sca)"))
      .withColumn("weight_sca", expr("vector(weight_sca)"))
      .withColumn("spu_id_one",expr("vector_to_arr(spu_id_one)") )
      .withColumn("tm_id_one", expr("vector_to_arr(tm_id_one)"))
      .withColumn("category3_id_one", expr("vector_to_arr(category3_id_one)"))
      .orderBy("id")
      .select("id","price_sca","weight_sca","spu_id_one","tm_id_one","category3_id_one")
      .limit(1)

    result.show

    println("----------第一条数据前10列的结果展示为---------------")
    result.collect().foreach{
      r => println(r.toSeq.flatMap(r => r.toString.split(",")).take(10).mkString(","))
    }



    spark.close()
  }

}
