package com.xxh.user.rec

import com.mongodb.spark.MongoSpark
import com.mongodb.spark.config.ReadConfig
import org.apache.kafka.clients.consumer.{ConsumerConfig, ConsumerRecord}
import org.apache.spark.sql.{DataFrame, SparkSession}
import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.streaming.{Seconds, StreamingContext}
import org.apache.spark.streaming.dstream.{DStream, InputDStream}
import org.apache.spark.streaming.kafka010.{ConsumerStrategies, KafkaUtils, LocationStrategies}

object onlineRec {

  def main(args: Array[String]): Unit = {
    //创建配置
    val conf = new SparkConf().setMaster("local[*]").setAppName("diy_Receiver")
    //建立与spark的连接
    val ct = new SparkContext(conf)
    //1 spark环境配置
    //2 数据的采集周期
    val ssc = new StreamingContext(ct, Seconds(3))

    //创建连接配置
    val kafkaParam: Map[String, Object] = Map[String, Object](
      ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG -> "localhost:9092",
      ConsumerConfig.GROUP_ID_CONFIG -> "test",
      "key.deserializer" -> "org.apache.kafka.common.serialization.StringDeserializer",
      "value.deserializer" -> "org.apache.kafka.common.serialization.StringDeserializer",
      "auto.offset.reset"->"latest"
    )

    //读取到日志数据
    val logStream: InputDStream[ConsumerRecord[String, String]] = KafkaUtils
      .createDirectStream[String, String](
        ssc,
        LocationStrategies.PreferConsistent,
        //1 topic名称
        //2 连接配置
        ConsumerStrategies.Subscribe[String, String](Set("test"), kafkaParam)
      )
    //流式处理....
//    val ratingStreaming: DStream[(Int, Int, Double, Int)] = logStream.map {
//      msg => {
//        //uid mid score time
//        var fields = msg.value().split("\\|")
//        (fields(0).toInt, fields(1).toInt, fields(2).toDouble, fields(3).toInt)
//      }
//    }
//
//    ratingStreaming.foreachRDD{
//      rdds=> rdds.foreach{
//        case (uid,mid,score,time)=>{
//          //1 从redis获取用户最近K次评分，保存成Array[(mid,score)]。
//
//          //2 从相似度矩阵取出当前电影最相似的N个电影，作为备选列表。
//
//          //3 对于备选电影，计算优先级。
//
//          //4 推荐电影保存到DB。
//
//        }
//      }
//    }

    val value: DStream[String] = logStream.map(msg => {
      msg.value()
    })
    value.print()


    //开启streaming
    ssc.start()
    ssc.awaitTermination()
  }



  def SaveToDB(df: DataFrame, collectionName: String): Unit = {
    df.write
      .option("uri", "mongodb://localhost:27017/movierec")
      .option("collection", collectionName)
      .format("com.mongodb.spark.sql")
      .mode("overwrite")
      .save()
  }


  //从Mongo获取dataFrame
  def readDataFrameFromMongo(collectionName: String, sc: SparkSession): DataFrame = {
    //读取配置
    val readConfig = ReadConfig(Map("collection" -> collectionName), Some(ReadConfig(sc)))
    return MongoSpark.load(sc, readConfig);
  }


}
