package com.niit.mlib

import java.sql.{Connection, DriverManager, PreparedStatement}
import org.apache.kafka.clients.consumer.ConsumerRecord
import org.apache.kafka.common.serialization.StringDeserializer
import org.apache.spark.streaming.kafka010.{ConsumerStrategies, KafkaUtils, LocationStrategies}
import org.apache.spark.streaming.{Seconds, StreamingContext}
import org.apache.spark.{SparkConf, SparkContext}
import scala.math._

object ItemBase {

  def main(args: Array[String]): Unit = {

    val conf = new SparkConf()
      .setMaster("local[*]")
      .setAppName("ItemBasedCategoryRecommendation")
      .set("spark.serializer", "org.apache.spark.serializer.KryoSerializer")
      .set("spark.sql.shuffle.partitions", "10")

    val sc = new SparkContext(conf)
    val ssc = new StreamingContext(sc, Seconds(120))
    ssc.sparkContext.setLogLevel("WARN")

    // Kafka配置
    val kafkaParams = Map[String, Object](
      "bootstrap.servers" -> "192.168.136.128:9092",
      "key.deserializer" -> classOf[StringDeserializer],
      "value.deserializer" -> classOf[StringDeserializer],
      "group.id" -> "spark-item-category-group",
      "auto.offset.reset" -> "earliest",
      "enable.auto.commit" -> (false: java.lang.Boolean)
    )

    val topics = Array("orders")

    val kafkaStream = KafkaUtils.createDirectStream[String, String](
      ssc,
      LocationStrategies.PreferConsistent,
      ConsumerStrategies.Subscribe[String, String](topics, kafkaParams)
    )

    // ============================= 1. 数据解析 =============================
    val parsedStream = kafkaStream.map(record => {
      val fields = record.value().split("\t")
      try {
        val category = fields(0)
        val product = fields(1)
        val quantity = fields(2).toInt
        val rating = fields(5).toInt
        val isValid = fields(6).charAt(0)

        (category, product, quantity, rating, isValid)
      } catch {
        case e: Exception =>
          println(s"解析错误: ${record.value()} - ${e.getMessage}")
          ("", "", 0, 0, 'N')
      }
    }).filter(t => t._1.nonEmpty && t._5 == 'Y')

    // ============================= 2. 改进的物品权重计算 =============================
    parsedStream.foreachRDD { rdd =>
      if (!rdd.isEmpty()) {
        // 计算每个物品在类别内的规范化权重
        val normalizedItems = rdd.map { case (category, product, quantity, rating, _) =>
          // 使用数量评分的平方作为权重（避免过大值）
          val weight = pow(quantity * rating, 2).toDouble
          ((category, product), weight)
        }.groupByKey()  // 按(类别, 商品)分组
          .mapValues(_.sum) // 聚合权重

        // 计算每个类别的总权重（用于归一化）
        val categoryTotalWeights = normalizedItems.map {
          case ((category, _), weight) => (category, weight)
        }.reduceByKey(_ + _)

        // 将物品权重归一化处理
        val normalizedRDD = normalizedItems.map {
          case ((category, product), weight) => (category, (product, weight))
        }.join(categoryTotalWeights).map {
          case (category, ((product, weight), totalWeight)) =>
            // 归一化公式：商品权重 / 类别总权重
            val normalizedWeight = if (totalWeight > 0) weight / totalWeight else 0.0
            (category, product, normalizedWeight)
        }

        // ============================= 3. 类别Top3计算 =============================
        val top3PerCategory = normalizedRDD.map {
          case (category, product, weight) => (category, (product, weight))
        }.groupByKey()
          .mapValues { products =>
            // 选择Top3商品（按权重降序）
            products.toArray
              .sortBy(-_._2) // 降序排序
              .take(3)       // 取前3个
              .map { case (product, weight) =>
                // 格式化权重值为0.00-1.00之间的百分比
                s"$product:${"%.2f".format(weight * 100)}%"
              }
              .mkString(",")
          }

        // ============================= 4. 推荐结果存储 =============================
        saveToMySQL(top3PerCategory)

        // 调试输出
        println("\n=== Top3商品预览（归一化后）===")
        top3PerCategory.take(5).foreach { case (category, items) =>
          println(s"$category: $items")
        }
        println("=============================\n")
      } else {
        println("没有新的有效数据需要处理")
      }
    }

    ssc.start()
    ssc.awaitTermination()
  }

  def saveToMySQL(rdd: org.apache.spark.rdd.RDD[(String, String)]): Unit = {
    val coalescedRDD = rdd.coalesce(1)

    coalescedRDD.foreachPartition { partition =>
      var connection: Connection = null
      var statement: PreparedStatement = null
      try {
        connection = DriverManager.getConnection(
          "jdbc:mysql://47.93.166.116:3306/final3?useSSL=false&rewriteBatchedStatements=true",
          "root",
          "Root123!"
        )
        connection.setAutoCommit(false)

        // 使用UPSERT操作（更新或插入），保持原表结构和列名
        val sql =
          "INSERT INTO task6 (item_id, similar_items) VALUES (?, ?) " +
            "ON DUPLICATE KEY UPDATE similar_items = VALUES(similar_items)"

        statement = connection.prepareStatement(sql)

        var count = 0
        partition.foreach { case (category, topItems) =>
          // 保持原列名：item_id对应类别，similar_items对应Top3物品
          statement.setString(1, category)
          statement.setString(2, topItems)
          statement.addBatch()
          count += 1

          // 每10条执行一次批处理
          if (count % 10 == 0) {
            statement.executeBatch()
            connection.commit()
          }
        }

        // 执行剩余批处理
        if (count > 0) {
          statement.executeBatch()
          connection.commit()
        }

        println(s"成功更新/插入 $count 条类别Top3记录")
      } catch {
        case e: Exception =>
          if (connection != null) connection.rollback()
          println(s"保存数据时出错: ${e.getMessage}")
          e.printStackTrace()
      } finally {
        if (statement != null) statement.close()
        if (connection != null) connection.close()
      }
    }
  }
}