package cn.itcast.edu.analysis.streaming

import cn.itcast.edu.bean.Answer
import cn.itcast.edu.utils.RedisUtil
import com.google.gson.Gson
import org.apache.kafka.clients.consumer.ConsumerRecord
import org.apache.kafka.common.serialization.StringDeserializer
import org.apache.spark.ml.recommendation.ALSModel
import org.apache.spark.{SparkContext, streaming}
import org.apache.spark.sql.{DataFrame, Dataset, SaveMode, SparkSession}
import org.apache.spark.streaming.dstream.{DStream, InputDStream}
import org.apache.spark.streaming.kafka010.{ConsumerStrategies, KafkaUtils, LocationStrategies}
import org.apache.spark.streaming.{Seconds, StreamingContext}
import redis.clients.jedis.Jedis

/**
 * 从kafka消费用户,消息中有id 然后从reds中获取推荐模型路径,并从路径中加载推荐模型ALSModel
 * 使用模型给用户推荐易错题,
 */
object StreamingRecommend {
  def main(args: Array[String]): Unit = {
    // TODO 0,准备环境
    val spark: SparkSession = SparkSession.builder().appName("StreamingAnalysis").master("local[*]")
      .config("spark.sql.shuffle.partitions", "4") //本次测试时将分区数设置小一点,实际开发中可以根据集群规模调整大小,默认200
      .getOrCreate()
    val sc: SparkContext = spark.sparkContext
    sc.setLogLevel("WARN")
    import spark.implicits._
    import org.apache.spark.sql.functions._
    import spark.implicits._
    val ssc: StreamingContext = new StreamingContext(sc, streaming.Seconds(5))
    // TODO 1,加载数据
    val kafkaParams = Map[String, Object](
      "bootstrap.servers" -> "192.168.1.158:9092",//kafka集群地址
      "key.deserializer" -> classOf[StringDeserializer],//key的反序列化规则
      "value.deserializer" -> classOf[StringDeserializer],//value的反序列化规则
      "group.id" -> "StreamingRecommend",//消费者组名称
      //earliest:表示如果有offset记录从offset记录开始消费,如果没有从最早的消息开始消费
      //latest:表示如果有offset记录从offset记录开始消费,如果没有从最后/最新的消息开始消费
      //none:表示如果有offset记录从offset记录开始消费,如果没有就报错
      "auto.offset.reset" -> "latest",
      "auto.commit.interval.ms"->"1000",//自动提交的时间间隔
      "enable.auto.commit" -> (true: java.lang.Boolean)//是否自动提交
    )
    val topics = Array("edu")//要订阅的主题
    val kafkaDS:InputDStream[ConsumerRecord[String,String]] = KafkaUtils.createDirectStream[String, String](ssc,
      LocationStrategies.PreferConsistent,
      ConsumerStrategies.Subscribe[String, String](topics, kafkaParams)
    )
    // TODO 2,处理数据
    val valueDS: DStream[String] = kafkaDS.map(record => {
      record.value()
    })

//    val etlResult: DStream[String] = valueDS.filter(_.contains("_"))

    //print(etlResult)
    print("----------------------------------")

    print("打印结果结束")
    //{"student_id":"学生ID_19","textbook_id":"教材ID_2","grade_id":"年级ID_1","subject_id":"科目ID_3_英语","chapter_id":"章节ID_chapter_1","question_id":"题目ID_1214","score":8,"answer_time":"2021-11-14 11:21:48","ts":"Nov 14, 2021 11:21:48 AM"}
    // rdd 表示每个微批的数据  表示1个分区上加载模型
    //获取redis连接,加载模型路径,根据路径加载模型
    valueDS.foreachRDD(rdd=>{
      //判断不为空
      if(!rdd.isEmpty()){
        print("rdd打印测试------------------------------------------")
        val jedis = RedisUtil.pool.getResource
        val path: String = jedis.hget("als_model", "recommended_question_id")
        //根据路径加载模型
        val model:ALSModel = ALSModel.load(path)

        //取出用户id
        val answerDF:DataFrame = rdd.coalesce(1).map(json => {
          val gson = new Gson()
          gson.fromJson(json, classOf[Answer])
        }).toDF

        //将用户id转为数据,模型推荐的时候需要数字格式的id
        val id2int = udf((student_id:String)=>{
          student_id.split("_")(1).toInt
        })

        //获取int 类型的id
        val studnetIdDF = answerDF.select(id2int('student_id) as "student_id")

        //使用模型给用户推荐题目   退正在答题的用户推荐10个
        val recommendDF:DataFrame = model.recommendForUserSubset(studnetIdDF, 10)

        recommendDF.printSchema()
        recommendDF.show(false)
//  rdd打印测试------------------------------------------root
        // |-- student_id: integer (nullable = false)
        // |-- recommendations: array (nullable = true)
        // |    |-- element: struct (containsNull = true)
        // |    |    |-- question_id: integer (nullable = true)
        // |    |    |-- rating: float (nullable = true)

        //12|[[1707, 2.900552]...|
        //13|[[1390, 2.90392],...|


        //1...这里拿到学生id 和给学生 推荐题目拼接成字符串的题目id
        val recommondResult:DataFrame = recommendDF.as[(Int, Array[(Int, Float)])].map(t => {
          val studentId: String = "学生ID_" + t._1
          val questionIds: String = "题目ID_" + t._2.map(_._1).mkString(",")
          (studentId, questionIds)
        }).toDF("student_id", "recommendations")
        //2....其他信息获取
        //将answerDF和recommendResult进行join
        val allInfoDF:DataFrame = answerDF.join(recommondResult, "student_id")

        //输出结果到mysql/或者到其他地方............Mysql/HBase
        print("打印count...........................")
        print(allInfoDF.count())
        if(allInfoDF.count()>0){
          val properties = new java.util.Properties()
          properties.setProperty("user", "root")
          properties.setProperty("password", "root")
          allInfoDF
            .coalesce(1)
            .write
            //append这里是追加  Overwrite是覆盖
            .mode(SaveMode.Append)
            .jdbc("jdbc:mysql://localhost:3306/gubanjie?useUnicode=true&characterEncoding=utf8", "t_recommended", properties)
          print("存入成功了-------------------------")
        }

        //关闭redis连接 归还连接池
        jedis.close()
      }
    })



    //TODO 3.输出结果

    //TODO 4.启动并等待停止
    ssc.start()
    ssc.awaitTermination()

    //TODO 5.关闭资源
    ssc.stop(stopSparkContext = true, stopGracefully = true) //优雅关闭


  }
}
