package com.njbdqn.call

import com.njbdqn.datahandler.KMeansDataHandler
import com.njbdqn.util.{HDFSConnection, MySQLConnection}
import org.apache.spark.ml.clustering.KMeans
import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.expressions.Window
import org.apache.spark.sql.functions.{count, desc, row_number}

/**
  * 计算用户的分组 并为每组推荐相应的商品
  */
object GroupCall {
  def call(spark:SparkSession) ={
    import spark.implicits._
    val resdf = KMeansDataHandler.user_group(spark).cache()
    val kms = new KMeans().setFeaturesCol("feature").setK(40)
    //每个用户所属的组(cust_id,groups)(1,0)
    val user_group_tab = kms.fit(resdf).transform(resdf).drop("feature")
      .withColumnRenamed("prediction","groups").cache()
    //订单表
    val orderTable = MySQLConnection.readMySQL(spark,"orders").select("ord_id","cust_id")
    //订单明细表
    val orddetailTable = MySQLConnection.readMySQL(spark,"orderItems").select("ord_id","good_id","buy_num")

    //获取每组用户购买商品的前30名
    val rank=30
    val wnd = Window.partitionBy("groups").orderBy(desc("group_buy_count"))
    val group_goods = user_group_tab.join(orderTable, Seq("cust_id"), "inner")
      .join(orddetailTable, Seq("ord_id"), "inner")
      .groupBy("groups", "good_id").agg(count("ord_id").as("group_buy_count"))
      .withColumn("rank", row_number() over (wnd))
      .filter($"rank" <= rank)
    //每个用户所属组推荐的商品(是为每个用户推荐的)
    val groupDF = user_group_tab.join(group_goods,Seq("groups"),"inner")
    HDFSConnection.writeDataToHDFS("/myshops/dwd_group",groupDF)

  }
}
