package com.lpssfxy.online.utils

import org.apache.log4j.Logger
import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.sql.SparkSession
import org.apache.spark.streaming.dstream.DStream
import org.apache.spark.streaming.kafka010.{ConsumerStrategies, KafkaUtils, LocationStrategies}
import org.apache.spark.streaming.{Seconds, StreamingContext}

/**
 * AppHelper 对象提供了一些实用方法，用于配置和管理Spark、Kafka和MongoDB相关的操作。
 * 这些方法包括创建Spark和Streaming上下文、获取配置参数以及将数据保存到MongoDB等。
 */
object AppUtils {

  private val logger = Logger.getLogger(getClass)

  // 定义常量和表名
  val MONGODB_RATING_COLLECTION = "rating"
  val STREAM_RECS = "streamRecs"
  val PRODUCT_RECS = "productRecs"

  val MAX_USER_RATING_NUM = 20
  val MAX_SIM_PRODUCTS_NUM = 20

  // 常量提取（避免魔法值）
  val REDIS_HOST = "192.168.85.144"
  val REDIS_PORT = 6379
  val REDIS_PASSWORD = "openGauss_1234"
  val MONGO_URI = "mongodb://fooadmin:123456@s3:27017/bigdata"
  val MONGO_DB = "bigdata"
  val KAFKA_TOPIC = "recommender"
  val SPARK_CORES = "local[*]"

  val kafkaParam = Map(
    "bootstrap.servers" -> "s1:9092,s2:9092,s3:9092",
    "key.deserializer" -> "org.apache.kafka.common.serialization.StringDeserializer",
    "value.deserializer" -> "org.apache.kafka.common.serialization.StringDeserializer",
    "group.id" -> "recommender",
    "auto.offset.reset" -> "earliest",
    "enable.auto.commit" -> "false"
  )

  /**
   * 获取spark运行时setMaster对应的运行模式
   * @param args
   * @return
   */
  def getMaster(args: Array[String]): String = {
    // 检查是否传入了 master 参数
    if (args.length < 1) {
      println("请传入 master 参数，例如：local[*] 或 yarn 等")
      System.exit(1)
    }
    // 从命令行参数中获取 master 的值
    args(0)
  }

  /**
   * 创建StreamingContext对象以及相关的SparkContext和SparkSession。
   *
   * @param appName 应用程序的名称。
   * @return 包含SparkContext、SparkSession和StreamingContext的元组。
   */
  def createStreamingContextEnv(appName: String,sparkCores: String): (SparkContext, SparkSession, StreamingContext) = {
    // 创建SparkConf对象，设置应用程序名称和Spark核心数
    val sparkConf = new SparkConf().setAppName(appName)//.setMaster(sparkCores)
    // 创建SparkSession对象
    val spark = SparkSession.builder().config(sparkConf).getOrCreate()
    // 获取SparkContext对象
    val sc = spark.sparkContext
    // 创建StreamingContext对象，设置批处理间隔为2秒
    val ssc = new StreamingContext(sc, Seconds(2))
    // 返回包含SparkContext、SparkSession和StreamingContext的元组
    (sc, spark, ssc)
  }
  /**
   * 创建KafkaStream流程，从Kafka中读取数据并转换为评分流。
   *
   * @param streamingContext 流式上下文
   * @return 评分流，数据格式为 (userId, productId, score, timestamp)
   */
  def createKafkaStream(streamingContext: StreamingContext): DStream[(Int, Int, Double, Int)] = {
    try {
      // 创建Kafka直连流
      val kafkaStream = KafkaUtils.createDirectStream[String, String](streamingContext,
        LocationStrategies.PreferConsistent,
        ConsumerStrategies.Subscribe[String, String](Array(KAFKA_TOPIC), kafkaParam)
      )
      // 对kafkaStream进行处理，产生评分流，其数据格式为：userId|productId|score|timestamp
      val ratingStream = kafkaStream.map { msg =>
        val attr = msg.value().split("\\|")
        (attr(0).toInt, attr(1).toInt, attr(2).toDouble, attr(3).toInt)
      }
      ratingStream
    } catch {
      case e: Exception =>
        logger.error("[CUSTOM]Error creating Kafka stream", e)
        throw e
    }
  }
}