package com.niit.mlib

import java.sql.{Connection, DriverManager, PreparedStatement}
import java.util.Properties
import org.apache.spark.sql.types._
import org.apache.spark.sql.{SparkSession, functions => F}
import org.apache.spark.rdd.RDD
import org.apache.spark.broadcast.Broadcast
import scala.math.{pow, sqrt}
import org.apache.kafka.clients.consumer.{ConsumerConfig, KafkaConsumer, ConsumerRecords}
import scala.collection.JavaConverters._

object UserBase {
  // 数据库配置
  val DB_URL = "jdbc:mysql://47.93.166.116:3306/final3?useSSL=false"
  val DB_USER = "root"
  val DB_PASSWORD = "Root123!"

  // Kafka配置
  val KAFKA_BOOTSTRAP_SERVERS = "192.168.136.128:9092"
  val KAFKA_TOPIC = "orders"
  val KAFKA_GROUP_ID = "combined-recommendation-group"

  // 推荐配置
  val RECOMMENDATION_COUNT = 5
  val SIMILARITY_THRESHOLD = 0.1

  // 显式定义Schema
  val behaviorSchema = StructType(Array(
    StructField("id", LongType, nullable = true),
    StructField("user_id", StringType, nullable = true),
    StructField("product_id", StringType, nullable = true),
    StructField("rating", DoubleType, nullable = true),
    StructField("create_time", TimestampType, nullable = true)
  ))

  def main(args: Array[String]): Unit = {
    val mode = if (args.length > 0) args(0) else "both"

    mode match {
      case "consumer" => runKafkaConsumer()
      case "processor" => runRecommendationProcessor()
      case "both" =>
        val consumerThread = new Thread(() => runKafkaConsumer())
        consumerThread.start()
        while (true) {
          Thread.sleep(60000)
          runRecommendationProcessor()
        }
      case _ => println("无效模式参数。可用模式: consumer, processor, both")
    }
  }

  // Kafka消费者逻辑（保持不变）
  def runKafkaConsumer(): Unit = {
    val props = new Properties()
    props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, KAFKA_BOOTSTRAP_SERVERS)
    props.put(ConsumerConfig.GROUP_ID_CONFIG, KAFKA_GROUP_ID)
    props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringDeserializer")
    props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringDeserializer")
    props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest")

    val consumer = new KafkaConsumer[String, String](props)
    consumer.subscribe(java.util.Collections.singletonList(KAFKA_TOPIC))

    var connection: Connection = null
    var insertStmt: PreparedStatement = null

    try {
      connection = DriverManager.getConnection(DB_URL, DB_USER, DB_PASSWORD)
      connection.setAutoCommit(false)

      insertStmt = connection.prepareStatement(
        "INSERT INTO user_behavior (user_id, product_id, rating) VALUES (?, ?, ?) " +
          "ON DUPLICATE KEY UPDATE rating = VALUES(rating)"
      )

      println("开始从Kafka消费数据...")

      while (true) {
        val records: ConsumerRecords[String, String] = consumer.poll(100)
        import scala.collection.JavaConverters._
        for (record <- records.asScala) {
          val fields = record.value().split("\t")
          if (fields.length >= 6) {
            val userId = fields(4)
            val productId = fields(1)
            val rating = fields(5).toDouble

            insertStmt.setString(1, userId)
            insertStmt.setString(2, productId)
            insertStmt.setDouble(3, rating)
            insertStmt.executeUpdate()
          } else {
            println(s"无效记录: ${record.value()}")
          }
        }
        connection.commit()
      }
    } catch {
      case e: Exception =>
        e.printStackTrace()
        if (connection != null) connection.rollback()
    } finally {
      if (insertStmt != null) insertStmt.close()
      if (connection != null) connection.close()
      consumer.close()
    }
  }

  // 推荐处理器逻辑（核心修改）
  def runRecommendationProcessor(): Unit = {
    println("开始计算用户推荐...")
    val spark = SparkSession.builder()
      .appName("UserBasedRecommendationProcessor")
      .master("local[*]")
      .getOrCreate()

    try {
      // 使用显式Schema读取数据
      val userBehaviorDF = spark.read
        .schema(behaviorSchema)
        .format("jdbc")
        .option("url", DB_URL)
        .option("driver", "com.mysql.jdbc.Driver")
        .option("dbtable", "user_behavior")
        .option("user", DB_USER)
        .option("password", DB_PASSWORD)
        .load()
        .filter("rating IS NOT NULL")  // 过滤空值

      // 构建用户-物品矩阵
      val userItemMatrix = userBehaviorDF.rdd
        .map(row => (
          row.getAs[String]("user_id"),
          (row.getAs[String]("product_id"), row.getAs[Double]("rating"))
        ))
        .groupByKey()
        .map { case (user, items) => (user, items.toMap) }

      // 收集用户-物品矩阵到Driver端，并广播
      val userItemMap = userItemMatrix.collectAsMap()
      val broadcastUserItemMap = spark.sparkContext.broadcast(userItemMap)

      // 计算用户相似度
      val userSimilarity = userItemMatrix.cartesian(userItemMatrix)
        .filter { case (a, b) => a._1 != b._1 }  // 避免相同用户比较
        .map { case ((u1, items1), (u2, items2)) =>
          val commonItems = items1.keySet.intersect(items2.keySet)
          if (commonItems.isEmpty) (u1, u2, 0.0)
          else {
            val dotProduct = commonItems.map(item => items1(item) * items2(item)).sum
            val norm1 = sqrt(items1.values.map(x => x * x).sum)
            val norm2 = sqrt(items2.values.map(x => x * x).sum)
            val similarity = if (norm1 * norm2 == 0) 0.0 else dotProduct / (norm1 * norm2)
            (u1, u2, similarity)
          }
        }.filter(_._3 > SIMILARITY_THRESHOLD)

      // 生成推荐结果
      val userRecommendations = userSimilarity.flatMap { case (u1, u2, sim) =>
        val itemsMap = broadcastUserItemMap.value
        val itemsU2 = itemsMap.getOrElse(u2, Map.empty[String, Double]).keySet
        val itemsU1 = itemsMap.getOrElse(u1, Map.empty[String, Double]).keySet
        itemsU2.diff(itemsU1).map(item => (u1, (item, sim)))
      }.groupByKey().map { case (user, recItems) =>
        val topRec = recItems.toArray
          .sortBy(-_._2)  // 按相似度降序排序
          .take(RECOMMENDATION_COUNT)
          .map(_._1)       // 只取物品ID
        (user, topRec.mkString(","))
      }

      // 写入推荐表
      userRecommendations.foreachPartition { partition =>
        var connection: Connection = null
        var insertStmt: PreparedStatement = null
        try {
          connection = DriverManager.getConnection(DB_URL, DB_USER, DB_PASSWORD)
          connection.setAutoCommit(false)
          insertStmt = connection.prepareStatement(
            "INSERT INTO user_recommendations (user_id, recommended_products) " +
              "VALUES (?, ?) ON DUPLICATE KEY UPDATE recommended_products = VALUES(recommended_products)"
          )
          partition.foreach { case (userId, products) =>
            insertStmt.setString(1, userId)
            insertStmt.setString(2, products)
            insertStmt.executeUpdate()
          }
          connection.commit()
        } catch {
          case e: Exception =>
            e.printStackTrace()
            if (connection != null) connection.rollback()
        } finally {
          if (insertStmt != null) insertStmt.close()
          if (connection != null) connection.close()
        }
      }

      println("推荐计算完成，结果已存入数据库")

    } catch {
      case e: Exception =>
        e.printStackTrace()
    } finally {
      spark.stop()
    }
  }
}