package com.njbdqn.sort

import java.net.URI

import com.njbdqn.datahandler.ALSDataHandler.{UserAction, actToNum}
import com.njbdqn.datahandler.LRDataHandler
import com.njbdqn.util.{HDFSConnection, MySQLConnection}
import javassist.runtime.Desc
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.{FileSystem, Path}
import org.apache.spark.ml.classification.{LogisticRegression, LogisticRegressionModel}
import org.apache.spark.ml.linalg.DenseVector
import org.apache.spark.sql.expressions.Window
import org.apache.spark.sql.{Row, SparkSession}
import org.apache.spark.sql.functions._

/**
 * 对用户已经购买或存放到购物车中的数据建立LR模型
 * 然后再对剩余用户未购买或收藏的信息利用LR模型进行评分
 * 并将分数从高到低排序取每个用户的前30个商品作为推荐商品
 */
object LRGoodSort {

  def   grade(spark:SparkSession)={
    val rank = 10
    //获取用户已经接触的商品train和用户未接触的商品proe
    val (train,proe) = LRDataHandler.lrdata(spark)
    //先检查hdfs上否是有已经建造好的LR模型 如果有就直接获取
    //val model = LogisticRegressionModel.load("hdfs://192.168.88.150:9000/myshops/lrmodel")
    val fs = FileSystem.get(URI.create("hdfs://192.168.88.150:9000"),new Configuration())
    var model:LogisticRegressionModel=null
    if(!fs.exists(new Path("/myshops/lrmodel"))){
      //则需要建立一个LR模型并训练后存放到hdfs
      val  lr = new LogisticRegression().setMaxIter(20).setRegParam(0.01).setLabelCol("label")
        .setFeaturesCol("features")
      model = lr.fit(train)
      HDFSConnection.writeLRModelToHDFS(model,"/myshops/lrmodel")
    }else {
      //原来已经有训练好的模型
      model = HDFSConnection.readLRModelFromHDFS("/myshops/lrmodel")
    }
    //将需要推算的数据带入模型进行计算并得到结果
    val res = model.transform(proe)
    import spark.implicits._
    val wnd = Window.partitionBy("cust_id").orderBy(desc("score"))
    //获取所有普通用户的推荐商品
    val normal = res.select("cust_id","good_id","probability")
      .rdd.map{case (Row(uid:Double,gid:Double,score:DenseVector))=>(uid,gid,score(1))}
      .toDF("cust_id","good_id","score")
      .select($"cust_id",$"good_id",row_number().over(wnd).alias("rank"))
      .filter(s"rank<=${rank}")
      .select($"cust_id".cast("int")
        ,$"good_id".cast("int"),$"rank")
    //先计算冷用户 用户信息表有 但是日志信息中没有该用户信息
    //读出所有的热卖召回用户及商品
    val allHot = HDFSConnection.readDataFromHDFS(spark,"/myshops/dwd_hotsell")
    //读出所有有行为的用户
    val txt = spark.sparkContext.textFile("file:///d:/log/*.log").cache()
    import spark.implicits._
    val normalUser = txt.map(line => {
      val arr = line.split(" ")
      (arr(3),"yes")
    }).toDF("cust_id","flag").distinct().cache()
    //去除有行为的用户
    val win = Window.partitionBy("cust_id").orderBy(desc("sellnum"))
    val cold = allHot.join(normalUser,Seq("cust_id"),"left")
      .filter("flag is null")
      .select($"cust_id",$"good_id",row_number().over(win).alias("rank"))
      .filter(s"rank<=${rank}")
    //将冷启动用户推荐和普通用户推荐整合在一起存放到MySQL数据库中
    val tj = normal.union(cold)
    MySQLConnection.writeTable(spark,tj,"usercommend");
  }
}
