package rate.recommend.streaming

import java.lang

import com.google.gson.Gson
import rate.bean.Rate
import rate.utils.RedisUtil
import org.apache.kafka.clients.consumer.ConsumerRecord
import org.apache.kafka.common.serialization.StringDeserializer
import org.apache.spark.ml.recommendation.ALSModel
import org.apache.spark.{SparkContext, streaming}
import org.apache.spark.sql.SparkSession
import org.apache.spark.streaming.StreamingContext
import org.apache.spark.streaming.dstream.{DStream, InputDStream}
import org.apache.spark.streaming.kafka010.{ConsumerStrategies, KafkaUtils, LocationStrategies}

/**
 * Desc 从 Kafka 消费消息，读取模型后用模型给用户推荐
 */
object StreamingRecommend {
    def main(args: Array[String]): Unit = { //准备环境
        val spark: SparkSession = SparkSession.builder()
          .appName("实时推荐")
          .master("local[*]")
          .config("spark.sql.shuffle.partitions", "4")
          .getOrCreate()
        val sc: SparkContext = spark.sparkContext
        sc.setLogLevel("WARN")
        val ssc = new StreamingContext(sc, streaming.Seconds(5))
        import spark.implicits._
        //加载数据
        val kafkaParams: Map[String, Object] = Map[String, Object](
            "bootstrap.servers" -> "192.168.17.100:9092", //kafka集群地址
            "key.deserializer" -> classOf[StringDeserializer], //key的反序列化规则
            "value.deserializer" -> classOf[StringDeserializer], //value的反序列化规则
            "group.id" -> "StreamingRecommend", //消费组名词
            "auto.offset.reset" -> "latest", //如果有offset记录从offset记录开始消费，如果没有就从最新的消息开始消费
            "auto.commit.interval.ms" -> "1000", //自动提交间隔
            "enable.auto.commit" -> (true: lang.Boolean) //是否自动提交
        )
        val topics = Array("rate")
        val kafkaDStream: InputDStream[ConsumerRecord[String, String]] =
            KafkaUtils.createDirectStream[String, String](ssc,
                LocationStrategies.PreferConsistent,
                ConsumerStrategies.Subscribe[String, String](topics, kafkaParams) //订阅
            )

        //处理数据
        val valueDStream: DStream[String] = kafkaDStream.map(record => {
            record.value()
        })
        valueDStream.foreachRDD(rdd => {
            if (!rdd.isEmpty()) {
                //该rdd表示每个批的数据
                //获取redis连接
                val jedis = RedisUtil.pool.getResource
                //加载模型路径
                val model_path = jedis.hget("rec_als_model", "model_path")

                //根据路径加载模型
                val model = ALSModel.load(model_path)

                //取出用户id
                val rateDF = rdd.coalesce(1).map(json => {
                    val gson = new Gson()
                    gson.fromJson(json, classOf[Rate])
                }).toDF

                val userIdDF = rateDF.select("user_id")
                //给用户推荐
                val recommendDF = model.recommendForUserSubset(userIdDF, 5)
                recommendDF.printSchema()
                recommendDF.show(false)

                //处理推荐结果：取出用户id和物品id拼接成字符串
                recommendDF.as[(Int, Array[(Int, Float)])].foreach(t => {
                    val userIdStr: String = "用户ID_" + t._1
                    val itemIdsStr: String = t._2.map("物品ID_" + _._1).mkString(",")
                    val jedis = RedisUtil.pool.getResource
                    jedis.hset(userIdStr, "recommend", itemIdsStr)
                    jedis.close()
                    ()
                })
                //关闭redis连接
                jedis.close()
            }
        })

        //启动并等待停止
        ssc.start()
        ssc.awaitTermination()
        //关闭资源
        ssc.stop(stopSparkContext = true, stopGracefully = true)
    }
}
