package com.njbdqn.call

import com.njbdqn.datahandler.ALSDataHandler
import com.njbdqn.util.HdfsConnection
import org.apache.spark.mllib.recommendation.{ALS, Rating}
import org.apache.spark.sql.SparkSession

object ALSCall {
  def call(spark:SparkSession)={
    val resRdd = ALSDataHandler.alsData(spark)
    // 将系数表转为Rating向量
    val alldata = resRdd.rdd.map(row => {
      Rating(row.getAs("uid").toString.toInt,
        row.getAs("gid").toString.toInt,
        row.getAs("score").toString.toFloat)
    })

    // 将获得的Rating集合拆分安装0.2,0.8比例拆分成两个集合
//    val Array(train,test) = alldata.randomSplit(Array(0.8,0.2))
    // 使用8成的数据训练模型
    //    val model = ALS.train(train,rank = 10,maxIter = 20,implicitPrefs = false)
    val model = new ALS().setRank(10).setIterations(20).setLambda(0.01)
      .setImplicitPrefs(false).run(alldata)

    //对模型进行测试
    val recommend = model.recommendProductsForUsers(30)

    import spark.implicits._
    // 将gid和uid转为cust_id和good_id
    val frame = recommend.flatMap({
      case (user: Int, ratings: Array[Rating]) =>
        ratings.map(rat => (user, rat.product, rat.rating))
    }).toDF("uid", "gid", "score")
    val userTab = ALSDataHandler.userToNum(spark)
    val goodTab = ALSDataHandler.goodToNum(spark)

    val ff = frame.join(userTab,Seq("uid"),"inner").join(goodTab,Seq("gid"),"inner")
      .drop("uid","gid")
    HdfsConnection.writeDataToHdfs("/kb08/myshops/dwd_als",ff)
  }
}
