package com.njbdqn.call

import com.njbdqn.datahandler.KMeansDataHandler
import com.njbdqn.util.{HdfsConnection, MysqlConnection}
import org.apache.spark.ml.clustering.KMeans
import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.expressions.Window
import org.apache.spark.sql.functions.{count, desc, row_number}

/**
 * 计算用户的分组，并为分组推荐相应的商品
 */
object GroupCall {
  def call(spark:SparkSession)={
    import spark.implicits._
    val resdf = KMeansDataHandler.user_group(spark).cache()

    //订单表
    val orderTable = MysqlConnection.readTable(spark,"orders").select("ord_id","cust_id")

    //订单明细表
    val orderItemTable = MysqlConnection.readTable(spark,"orderItems").select("ord_id","good_id","buy_num")

    // 使用kmeans算法进行分组
    val kms = new KMeans().setFeaturesCol("feature").setK(48)
    val user_group_tab = kms.fit(resdf).transform(resdf)
      .withColumnRenamed("prediction","groups").drop("feature").cache()

    // 获取每组用户购买商品的前30名
    val rank = 30
    val window = Window.partitionBy("groups").orderBy(desc("group_buy_count"))
    val group_goods = user_group_tab.join(orderTable, Seq("cust_id"), "inner").join(orderItemTable, Seq("ord_id"), "inner")
      .groupBy("groups", "good_id")
      .agg(count("ord_id").as("group_buy_count"))
      .withColumn("rank", row_number().over(window))
      .filter($"rank" <= rank)

    // 每个用户所属组推荐的商品（为每个用户推荐的）
    val groupDF = user_group_tab.join(group_goods,Seq("groups"),"inner")
    HdfsConnection.writeDataToHdfs("/kb08/myshops/dwd_group",groupDF)
  }
}
