package com.niit.mlib

import org.apache.kafka.clients.consumer.ConsumerConfig
import org.apache.kafka.common.serialization.StringDeserializer
import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.streaming.{Seconds, StreamingContext}
import org.apache.spark.streaming.kafka010.{ConsumerStrategies, KafkaUtils, LocationStrategies}
import java.sql.{Connection, DriverManager, PreparedStatement}

object Demo {

  // 数据库连接配置
  private val DB_URL = "jdbc:mysql://47.93.166.116:3306/final3"
  private val DB_USER = "root"
  private val DB_PASSWORD = "Root123!"

  def main(args: Array[String]): Unit = {
    // Spark 配置
    val conf = new SparkConf()
      .setMaster("local[*]")
      .setAppName("ItemBasedCategoryRecommendation")
      .set("spark.serializer", "org.apache.spark.serializer.KryoSerializer")
      .set("spark.sql.shuffle.partitions", "10")

    val sc = new SparkContext(conf)
    val ssc = new StreamingContext(sc, Seconds(60))
    ssc.sparkContext.setLogLevel("WARN")

    // Kafka 配置
    val kafkaParams = Map[String, Object](
      "bootstrap.servers" -> "192.168.136.128:9092",
      "key.deserializer" -> classOf[StringDeserializer],
      "value.deserializer" -> classOf[StringDeserializer],
      "group.id" -> "spark-item-category-group",
      "auto.offset.reset" -> "earliest",
      "enable.auto.commit" -> (false: java.lang.Boolean)
    )

    val topics = Array("orders")

    // 创建 Kafka DStream
    val kafkaStream = KafkaUtils.createDirectStream[String, String](
      ssc,
      LocationStrategies.PreferConsistent,
      ConsumerStrategies.Subscribe[String, String](topics, kafkaParams)
    )

    val lines = kafkaStream.map(_.value())

    val max_prefs_per_user = 20
    val topn = 4 // 修改为4，以便存储前4个推荐

    // Step 1. normalization
    val ui_rdd = lines.map { x =>
      val fields = x.split("\t")
      val userId = fields(4)
      val product = fields(0)
      val rating = fields(5).toDouble
      (userId, (product, rating))
    }.groupByKey().flatMap { x =>
      val user = x._1
      val is_list = x._2

      var is_arr = is_list.toArray

      var is_list_len = is_arr.length
      if (is_list_len > max_prefs_per_user) {
        is_list_len = max_prefs_per_user
      }

      var i_us_arr = scala.collection.mutable.ArrayBuffer[(String, (String, Double))]()
      for (i <- 0 until is_list_len) {
        i_us_arr += ((is_arr(i)._1, (user, is_arr(i)._2)))
      }
      i_us_arr
    }.groupByKey().flatMap { x =>
      val item = x._1
      val u_list = x._2
      val us_arr = u_list.toArray

      var sum: Double = 0.0
      for (i <- 0 until us_arr.length) {
        sum += math.pow(us_arr(i)._2, 2)
      }

      sum = math.sqrt(sum)

      var u_is_arr = scala.collection.mutable.ArrayBuffer[(String, (String, Double))]()
      for (i <- 0 until us_arr.length) {
        u_is_arr += ((us_arr(i)._1, (item, us_arr(i)._2 / sum)))
      }

      u_is_arr
    }.groupByKey()

    // Step 2. unpack:gen pairs
    val unpack_rdd = ui_rdd.flatMap { x =>
      val is_arr = x._2.toArray

      var ii_s_arr = scala.collection.mutable.ArrayBuffer[((String, String), Double)]()
      for (i <- 0 until is_arr.length - 1) {
        for (j <- i + 1 until is_arr.length) {
          ii_s_arr += (((is_arr(i)._1, is_arr(j)._1), is_arr(i)._2 * is_arr(j)._2))
          ii_s_arr += (((is_arr(j)._1, is_arr(i)._1), is_arr(i)._2 * is_arr(j)._2))
        }
      }
      ii_s_arr
    }

    // Step 3. pack and save to database
    unpack_rdd.groupByKey().map { x =>
      val ii_pair = x._1
      val s_list = x._2

      val s_arr = s_list.toArray

      var score: Double = 0.0
      for (i <- 0 until s_arr.length) {
        score += s_arr(i)
      }
      (ii_pair._1, (ii_pair._2, score))
    }.groupByKey().foreachRDD { rdd =>
      rdd.foreachPartition { partitionOfRecords =>
        var connection: Connection = null
        var pstmt: PreparedStatement = null

        try {
          // 建立数据库连接
          Class.forName("com.mysql.jdbc.Driver")
          connection = DriverManager.getConnection(DB_URL, DB_USER, DB_PASSWORD)

          // 对每个分区中的记录进行处理
          partitionOfRecords.foreach { record =>
            val item_a = record._1
            val item_list = record._2.toList

            // 按相似度分数降序排序
            val bs_arr = item_list.sortWith(_._2 > _._2)

            val len = math.min(bs_arr.size, topn)
            val s = new StringBuilder

            for (i <- 0 until len) {
              val item = bs_arr(i)._1
              val score = "%.4f".format(bs_arr(i)._2)
              s.append(item).append(":").append(score)
              if (i < len - 1) {
                s.append(",")
              }
            }

            // 插入或更新数据库记录
            val sql =
              "INSERT INTO task7 (category_name, top4_recommendations) " +
                "VALUES (?, ?) " +
                "ON DUPLICATE KEY UPDATE top4_recommendations = VALUES(top4_recommendations), " +
                "update_time = CURRENT_TIMESTAMP"

            pstmt = connection.prepareStatement(sql)
            pstmt.setString(1, item_a)
            pstmt.setString(2, s.toString)
            pstmt.executeUpdate()
            pstmt.close()
          }
        } catch {
          case e: Exception => e.printStackTrace()
        } finally {
          // 关闭资源
          if (pstmt != null) pstmt.close()
          if (connection != null) connection.close()
        }
      }
    }

    ssc.start()
    ssc.awaitTermination()
  }
}