package ds_industry_2025.ds.ds02.tzgc

import org.apache.spark.ml.Pipeline
import org.apache.spark.ml.feature.{OneHotEncoder, StandardScaler, VectorAssembler}
import org.apache.spark.ml.linalg.{DenseVector, SparseVector}
import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.expressions.Window
import org.apache.spark.sql.functions._

import java.util.Properties

/*
      2、根据Hudi的dwd库中相关表或MySQL中shtd_store中相关商品表（sku_info），获取id、spu_id、price、weight、tm_id、
      category3_id 这六个字段并进行数据预处理，对price、weight进行规范化(StandardScaler)处理，对spu_id、tm_id、
      category3_id进行one-hot编码处理（若该商品属于该品牌则置为1，否则置为0）,并按照id进行升序排序，在集群中输出第一条数据
      前10列（无需展示字段名），将结果截图粘贴至客户端桌面【Release\任务C提交结果.docx】中对应的任务序号下。
字段	类型	中文含义	备注
id	double	主键
price	double	价格
weight	double	重量
spu_id#1	double	spu_id 1	若属于该spu_id，则内容为1否则为0
spu_id#2	double	spu_id 2	若属于该spu_id，则内容为1否则为0
.....	double
tm_id#1	double	品牌1	若属于该品牌，则内容为1否则为0
tm_id#2	double	品牌2	若属于该品牌，则内容为1否则为0
……	double
category3_id#1	double	分类级别3 1	若属于该分类级别3，则内容为1否则为0
category3_id#2	double	分类级别3 2	若属于该分类级别3，则内容为1否则为0
……

结果格式如下：
--------------------第一条数据前10列结果展示为：---------------------
1.0,0.892346,1.72568,0.0,0.0,0.0,0.0,1.0,0.0,0.0
 */
object t2 {
  def main(args: Array[String]): Unit = {
    val spark = SparkSession.builder()
      .master("local[*]")
      .appName("t2")
      .config("hive.exec.dynamic.partition.mode","nonstrict")
      .config("spark.serializer","org.apache.spark.serializer.KryoSerializer")
      .config("spark.sql.extensions","org.apache.spark.sql.hudi.HoodieSparkSessionExtension")
      .enableHiveSupport()
      .getOrCreate()

    val conn=new Properties()
    conn.setProperty("user","root")
    conn.setProperty("password","123456")
    conn.setProperty("driver","com.mysql.jdbc.Driver")

    val sku_info = spark.read
      .jdbc("jdbc:mysql://192.168.40.110:3306/shtd_store?useSSL=false", "sku_info", conn)
      .select("id","price","weight","spu_id","tm_id","category3_id")

    //  todo 对需要进行one-hot编码的字段进行索引化
    val sku_index = sku_info.withColumn("spu_id", dense_rank().over(Window.orderBy("spu_id")) - 1)
      .withColumn("tm_id", dense_rank().over(Window.orderBy("tm_id")) - 1)
      .withColumn("category3_id", dense_rank().over(Window.orderBy("category3_id")) - 1)

    //  todo 把数据保留给下面的推荐系统使用
    sku_index.write.format("hive").mode("overwrite").saveAsTable("tzgc.t1")

    //  todo 定义one——hot编码
    val oneHotEncoder = new OneHotEncoder()
      .setInputCols(Array("spu_id","tm_id","category3_id"))
      .setOutputCols(Array("spu_id_one","tm_id_one","category3_id_one"))
      .setDropLast(false)

    //  todo 定义特征向量转化器
    val assembler1 = new VectorAssembler()
      .setInputCols(Array("price"))
      .setOutputCol("price_v")

    val assembler2=new VectorAssembler()
      .setInputCols(Array("weight"))
      .setOutputCol("weight_v")

    //  todo 定义标准化
    val scaler1 = new StandardScaler()
      .setInputCol("price_v")
      .setOutputCol("price_sca")
      .setWithMean(true)

    val scaler2=new StandardScaler()
      .setInputCol("weight_v")
      .setOutputCol("weight_sca")
      .setWithMean(true)

    //  todo 定义流水线
    val pipeline = new Pipeline()
      .setStages(Array(assembler1,assembler2,scaler1,scaler2,oneHotEncoder))
      .fit(sku_index).transform(sku_index)

    //  todo 定义处理稀疏向量的函数
    spark.udf.register(
      "vector_to_array",
      (v1:SparseVector) => {v1.toArray.mkString(",")}
    )

    // todo 定义处理密集向量的函数
    spark.udf.register(
      "vector",
      (v1:DenseVector) => v1.apply(0)
    )

    // todo 处理数据拿到结果
    val result = pipeline.withColumn("price_sca", expr("vector(price_sca)"))
      .withColumn("weight_sca", expr("vector(weight_sca)"))
      .withColumn("spu_id_one", expr("vector_to_array(spu_id_one)"))
      .withColumn("tm_id_one", expr("vector_to_array(tm_id_one)"))
      .withColumn("category3_id_one", expr("vector_to_array(category3_id_one)"))
      .select("id","price_sca","weight_sca","tm_id_one","tm_id_one","category3_id_one")
      .orderBy("id")
      .limit(1)

    result.show

    println("--------------第一条数据前10列结果展示为----------------")
    //  todo 这里不知道为什么的出来的结果开头多了一个"["，所以这里我拿drop函数切割掉了
    result.collect().foreach{
      r => println(r.toSeq.flatMap(t => r.toString().split(",")).take(10).mkString(",").drop(1))
    }

    spark.close()
  }

}
