package com.njbdqn.sort

import java.net.URI

import com.njbdqn.datahandler.ALSDataHandler.{UserAction, actToNum}
import com.njbdqn.datahandler.LRDataHandler
import com.njbdqn.util.{HdfsConnection, MysqlConnection}
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.{FileSystem, Path}
import org.apache.spark.ml.classification.{LogisticRegression, LogisticRegressionModel}
import org.apache.spark.ml.linalg.DenseVector
import org.apache.spark.sql.expressions.Window
import org.apache.spark.sql.{Row, SparkSession}
import org.apache.spark.sql.functions._

/**
 * 对用户已经购买或存放到购物车中的数据进行建立LR模型
 * 再对剩余用户为购买或收藏的信息利用LR模型进行评分
 * 并将分数从高到低排序去每个用户的前30名的商品为推荐商品
 */
object LRGoodSort {
  def grade(spark:SparkSession)={
    val rank = 10
    // 获取用户已经接触的商品train和用户未接触的商品predict
    val (train,predict) = LRDataHandler.lrData(spark)
    // 先检查hdfs是否已经建造好的LR模型 如果有就直接获取
    val fs = FileSystem.get(URI.create("hdfs://192.168.152.140:9000"),new Configuration())

    // 使用用户接触过的商品作为建立LR模型的训练数据
    var model:LogisticRegressionModel = null
    if (!fs.exists(new Path("/kb08/myshops/lr_model"))){
      // 则需要建立一个LR模型并训练后存放到hdfs
      val lr = new LogisticRegression().setMaxIter(20).setLabelCol("label")
        .setFeaturesCol("feature").setRegParam(0.01)
      model = lr.fit(train)
      HdfsConnection.writeLRModeToHdfs(model,"/kb08/myshops/lr_model")
    }else{
      // 原来已经有训练好的模型
      model = HdfsConnection.readLRModeFromHdfs("/kb08/myshops/lr_model")
    }

    // 将需要推算的数据代入模型进行计算并得到结果
    import spark.implicits._
    val wnd = Window.partitionBy("cust_id").orderBy(desc("score"))
    val res = model.transform(predict)
    // 获取普通用户的推荐商品
    val normal = res.select("cust_id", "good_id", "probability")
      .rdd.map {
      case Row(uid: Double, gid: Double, score: DenseVector) => (uid, gid, score(1))
    }.toDF("cust_id", "good_id", "score")
      .select($"cust_id", $"good_id", row_number().over(wnd).as("rank"))
      .filter(s"rank<=${rank}")
      .select($"cust_id".cast("int").as("cust_id"),
        $"good_id".cast("int").as("good_id"),$"rank")
    // 先计算冷用户   用户信息表有，但是我们日志信息中没有该用户的信息
    // 读出所有的热卖召回及商品
    val allHot = HdfsConnection.readDataFromHdfs(spark,"/kb08/myshops/dwd_hotsell")
    // 读出所有有行为的用户
    val txt = spark.sparkContext.textFile("file:///F:\\IT\\study\\bigdata\\myact\\logs\\*.log").cache()
    val normalUser = txt.map(line => {
      val arr = line.split(" ")
      (arr(3),"yes")
    }).toDF("cust_id","flag").distinct().cache()
    // 去除有行为的用户
    val window = Window.partitionBy("cust_id").orderBy(desc("sellnum"))
    val cold = allHot.join(normalUser, Seq("cust_id"), "left")
      .filter("flag is null")
      .select($"cust_id",$"good_id",row_number().over(window).as("rank"))
      .filter(s"rank<=${rank}")
    //将冷用户推荐和普通用户推荐整合在一起存放到mysql数据库中
    val tj = normal.union(cold)
    MysqlConnection.writeTable(spark,tj,"usercommend")
  }
}
