package ds_industry_2025.ds.ds_03.sjwj3

import org.apache.spark.mllib.linalg.{DenseVector, Vector, Vectors}
import org.apache.spark.mllib.linalg.distributed.RowMatrix
import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.functions._
import org.apache.spark.sql.types.DoubleType

import scala.collection.mutable.ArrayBuffer

/*
      1、根据子任务一的结果，对其进行SVD分解，对数据进行降维保留前5个奇异值信息，根据该用户已购买的商品分别与未购买的商品计算
      余弦相似度再进行累加求均值，将均值最大的5件商品id进行输出作为推荐使用。将输出结果截图粘贴至客户端桌面【Release\任务C提交结
      果.docx】中对应的任务序号下。
结果格式如下：

------------------------推荐Top5结果如下------------------------
相似度top1(商品id：1，平均相似度：0.983456)
相似度top2(商品id：71，平均相似度：0.782672)
 */
object t3 {
  def main(args: Array[String]): Unit = {
    val spark = SparkSession.builder()
      .master("local[*]")
      .appName("t3")
      .config("hive.exec.dynamic.partition.mode", "nonstrict")
      .config("spark.serializer", "org.apache.spark.serializer.KryoSerializer")
      .config("spark.sql.extensions", "org.apache.spark.sql.hudi.HoodieSparkSessionExtension")
      .enableHiveSupport()
      .getOrCreate()

      import spark.implicits._
    val data = spark.table("dwd.t2")


    val cols = data.filter(col("user_id") === 0.0).limit(1)
      .select(data.columns.slice(1, data.columns.length).map(col): _*)
      .map(r => r.toSeq.map(r => r.toString.toDouble))
      .collect()(0)

    var buy=new ArrayBuffer[Double]()
    var no_buy=new ArrayBuffer[Double]()

    cols.zipWithIndex.foreach {
      e => {
        if (e._1.equals(0.0)) {
          no_buy += e._2
        } else {
          buy += e._2
        }
      }
    }


  //  todo 下面把数据转化成向量，并且要以rdd的方式存储
  val vector_data:Array[Vector] = data.rdd.map(
    r => {
      val v = Vectors.dense(r.toSeq.slice(1, r.toSeq.length).map(r => r.toString.toDouble).toArray)
      v
    }
  ).collect()

        val matrix = new RowMatrix(spark.sparkContext.parallelize(vector_data))

        //  todo svd分解，保留五个奇异值
        val svd = matrix.computeSVD(5, computeU = true)

        val v = svd.V

        val to_array:Array[(Vector,Int)] = v.rowIter.zipWithIndex.toArray

        val sku_t = spark.createDataFrame(
          to_array
        ).toDF("vec", "id")

        sku_t.show

        spark.udf.register(
          "cos",
          (v1:DenseVector,v2:DenseVector) => {
            1 - breeze.linalg.functions.cosineDistance(
              breeze.linalg.DenseVector(v1.values),
              breeze.linalg.DenseVector(v2.values)
            )
          }
        )

    val result = sku_t.crossJoin(sku_t)
      .toDF("vec", "id", "vec2", "id2")
      .filter(col("id") !== col("id2"))
      .withColumn("cos", expr("cos(vec,vec2)"))
      .filter(col("id").cast(DoubleType).isin(buy: _*) && col("id2").cast(DoubleType).isin(no_buy: _*))
      .groupBy("id2")
      .agg(avg("cos").as("avg_cos"))
      .orderBy(desc("avg_cos"))

    println("--------推荐top5结果如下-------------")
    result.limit(5).collect().zipWithIndex.foreach{
      case (r,index) =>
        val id=r.getAs[Int](0)
        val cos=r.getAs[Double](1)
        val str=s"相似度top${index+1}(商品id:${id},平均相似度:${cos})"
        println(str)
    }




    spark.close()
  }
}
