import org.apache.kafka.clients.producer.{KafkaProducer, ProducerConfig, ProducerRecord}
import org.apache.kafka.common.serialization.{StringDeserializer, StringSerializer}
import org.apache.spark.SparkConf
import org.apache.spark.sql.{SaveMode, SparkSession}
import org.apache.spark.sql.functions._
import org.apache.spark.streaming.kafka010.{ConsumerStrategies, KafkaUtils, LocationStrategies}
import org.apache.spark.streaming.{Seconds, StreamingContext}

import java.util.Properties
import org.json4s.DefaultFormats
import org.json4s.jackson.Serialization

object SparkStream {

    def main(args: Array[String]): Unit = {
      // 创建Spark配置
      val conf = new SparkConf()
        .setMaster("local[*]")
        .setAppName("StudentStatsStream")
        .set("spark.executor.memory", "2g")
        .set("spark.driver.memory", "2g")
        .set("spark.executor.cores", "8")
        .set("spark.streaming.backpressure.enabled", "true")
        .set("spark.streaming.backpressure.initialRate", "1000")
        .set("spark.streaming.stopGracefullyOnShutdown", "true") // 优雅关闭
        .set("spark.sql.shuffle.partitions", "4") // 减少分区数，避免资源浪费

      // 创建StreamingContext和SparkSession
      val ssc = new StreamingContext(conf, Seconds(5)) // 增加批处理间隔
      val spark = SparkSession.builder.config(conf).getOrCreate()

      import spark.implicits._
      ssc.sparkContext.setLogLevel("ERROR")

      // MySQL配置
      val mysqlUrl = "jdbc:mysql://localhost:3306/stuInfo"
      val mysqlProperties = new Properties()
      mysqlProperties.put("user", "root")
      mysqlProperties.put("password", "123456")
      mysqlProperties.put("driver", "com.mysql.cj.jdbc.Driver")
      mysqlProperties.put("rewriteBatchedStatements", "true") // 启用批量写入
      mysqlProperties.put("useSSL", "false") // 禁用SSL（根据MySQL配置调整）

      // Kafka配置
      val kafkaParams = Map[String, Object](
        "bootstrap.servers" -> "192.168.235.128:9092",
        "key.deserializer" -> classOf[StringDeserializer],
        "value.deserializer" -> classOf[StringDeserializer],
        "group.id" -> "student_stats_group",
        "auto.offset.reset" -> "latest",
        "enable.auto.commit" -> (false: java.lang.Boolean)
      )

      // 创建Kafka生产者（用于发送结果）
      val producerProps = new Properties()
      producerProps.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "192.168.235.128:9092")
      producerProps.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, classOf[StringSerializer])
      producerProps.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, classOf[StringSerializer])
      val producer = new KafkaProducer[String, String](producerProps)


      // 订阅Kafka主题
      val topics = Array("stuInfo")
      val stream = KafkaUtils.createDirectStream[String, String](
        ssc,
        LocationStrategies.PreferConsistent,
        ConsumerStrategies.Subscribe[String, String](topics, kafkaParams)
      )

      // 解析数据并创建临时视图
      val dataStream = stream.map(_.value()).map(_.split("\t")).filter(_.length == 8)

      // JSON序列化支持
      implicit val formats = DefaultFormats

      // 辅助函数：发送结果到Kafka
      def sendResultToKafka(topic: String, introduction: String, result: String): Unit = {
        try {
          val message = Map("introduction" -> introduction, "result" -> result)
          val jsonMessage = Serialization.write(message)
          producer.send(new ProducerRecord[String, String](topic, jsonMessage))
        } catch {
          case e: Exception =>
            println(s"Error sending to Kafka: ${e.getMessage}")
            e.printStackTrace()
        }
      }

      // 辅助函数：安全写入MySQL，处理主键冲突
      def safeWriteToMySQL(df: org.apache.spark.sql.DataFrame, tableName: String): Unit = {
        try {
          // 使用foreachPartition优化写入性能
          df.foreachPartition { partition: Iterator[org.apache.spark.sql.Row] =>
            if (partition.nonEmpty) {
              // 获取数据库连接
              val conn = java.sql.DriverManager.getConnection(mysqlUrl, "root", "123456")
              try {
                conn.setAutoCommit(false)

                // 根据表名决定使用的SQL语句
                val sql = tableName match {
                  case "gender_counts" =>
                    "INSERT INTO gender_counts (gender_type, count, update_time) VALUES (?, ?, NOW()) " +
                      "ON DUPLICATE KEY UPDATE count = count + VALUES(count), update_time = NOW()"

                  case "semester_gender_counts" =>
                    "INSERT INTO semester_gender_counts (semester, gender_type, count, update_time) VALUES (?, ?, ?, NOW()) " +
                      "ON DUPLICATE KEY UPDATE count = count + VALUES(count), update_time = NOW()"

                  case "status_gender_counts" =>
                    "INSERT INTO status_gender_counts (status_type, gender_type, count, update_time) VALUES (?, ?, ?, NOW()) " +
                      "ON DUPLICATE KEY UPDATE count = count + VALUES(count), update_time = NOW()"

                  case "class_gender_counts" =>
                    "INSERT INTO class_gender_counts (class_name, gender_type, count, update_time) VALUES (?, ?, ?, NOW()) " +
                      "ON DUPLICATE KEY UPDATE count = count + VALUES(count), update_time = NOW()"

                  case "semester_class_gender_counts" =>
                    "INSERT INTO semester_class_gender_counts (semester, class_name, gender_type, count, update_time) VALUES (?, ?, ?, ?, NOW()) " +
                      "ON DUPLICATE KEY UPDATE count = count + VALUES(count), update_time = NOW()"

                  case "user_recommendations" =>
                    "INSERT INTO user_recommendations (user_id, recommended_gpa, update_time) VALUES (?, ?, NOW()) " +
                      "ON DUPLICATE KEY UPDATE recommended_gpa = VALUES(recommended_gpa), update_time = NOW()"

                  case "user_gpa_matrix" =>
                    "INSERT INTO user_gpa_matrix (user_id, gpas, update_time) VALUES (?, ?, NOW()) " +
                      "ON DUPLICATE KEY UPDATE gpas = VALUES(gpas), update_time = NOW()"

                  case "user_similar_recommendations" =>
                    "INSERT INTO user_similar_recommendations (user_id, recommended_gpas, update_time) VALUES (?, ?, NOW()) " +
                      "ON DUPLICATE KEY UPDATE recommended_gpas = VALUES(recommended_gpas), update_time = NOW()"

                  case _ => throw new IllegalArgumentException(s"Unsupported table: $tableName")
                }

                // 使用预编译语句
                val pstmt = conn.prepareStatement(sql)
                try {
                  // 批量处理每个分区的数据
                  partition.foreach { row: org.apache.spark.sql.Row =>
                    tableName match {
                      case "gender_counts" =>
                        pstmt.setString(1, row.getString(0)) // gender_type
                        pstmt.setLong(2, row.getLong(1)) // count

                      case "semester_gender_counts" =>
                        pstmt.setString(1, row.getString(0)) // semester
                        pstmt.setString(2, row.getString(1)) // gender_type
                        pstmt.setLong(3, row.getLong(2)) // count

                      case "status_gender_counts" =>
                        pstmt.setString(1, row.getString(0)) // status_type
                        pstmt.setString(2, row.getString(1)) // gender_type
                        pstmt.setLong(3, row.getLong(2)) // count

                      case "class_gender_counts" =>
                        pstmt.setString(1, row.getString(0)) // class_name
                        pstmt.setString(2, row.getString(1)) // gender_type
                        pstmt.setLong(3, row.getLong(2)) // count

                      case "semester_class_gender_counts" =>
                        pstmt.setString(1, row.getString(0)) // semester
                        pstmt.setString(2, row.getString(1)) // class_name
                        pstmt.setString(3, row.getString(2)) // gender_type
                        pstmt.setLong(4, row.getLong(3)) // count

                      case "user_recommendations" =>
                        pstmt.setString(1, row.getString(0)) // user_id
                        pstmt.setString(2, row.getString(1)) // recommended_gpa

                      case "user_gpa_matrix" =>
                        pstmt.setString(1, row.getString(0)) // user_id
                        pstmt.setString(2, row.getString(1)) // gpas (逗号分隔的字符串)

                      case "user_similar_recommendations" =>
                        pstmt.setString(1, row.getString(0)) // user_id
                        pstmt.setString(2, row.getString(1)) // recommended_gpas

                    }
                    pstmt.addBatch()
                  }

                  // 执行批量更新
                  pstmt.executeBatch()
                  conn.commit()
                } finally {
                  pstmt.close()
                }
              } catch {
                case e: Exception =>
                  println(s"Error writing to $tableName: ${e.getMessage}")
                  conn.rollback()
                  e.printStackTrace()
              } finally {
                conn.setAutoCommit(true)
                conn.close()
              }
            }
          }
        } catch {
          case e: Exception =>
            println(s"Error processing partition for $tableName: ${e.getMessage}")
            e.printStackTrace()
        }
      }

      // 1) 使用Spark SQL统计所有男女人数各自的总和
      dataStream.foreachRDD { rdd =>
        if (!rdd.isEmpty()) {
          val df = rdd.map(p => (p(2))).toDF("gender")
          df.createOrReplaceTempView("gender_view")

          val genderCounts = spark.sql(
            """SELECT
              CASE WHEN gender = '1' THEN 'Male' ELSE 'Female' END AS gender_type,
              COUNT(*) AS count
            FROM gender_view
          GROUP BY  gender"""
          )

          // 使用自定义方法安全写入MySQL
          safeWriteToMySQL(genderCounts, "gender_counts")

          // 发送到Kafka
          val results = genderCounts.collect().map(row =>
            s"${row.getString(0)}: ${row.getLong(1)}"
          ).mkString(", ")
          sendResultToKafka("test3", "gender_counts:", results)
        }
      }

      // 2) 使用Spark SQL统计各个学期男女人数总和
      dataStream.foreachRDD { rdd =>
        if (!rdd.isEmpty()) {
          val df = rdd.map(p => (p(5), p(2))).toDF("semester", "gender")
          df.createOrReplaceTempView("semester_gender_view")

          val semesterGenderCounts = spark.sql(
            """SELECT
              semester,
              CASE WHEN gender = '1' THEN 'Male' ELSE 'Female' END AS gender_type,
              COUNT(*) AS count
            FROM semester_gender_view
            GROUP BY semester, gender"""
          )

          // 使用自定义方法安全写入MySQL
          safeWriteToMySQL(semesterGenderCounts, "semester_gender_counts")

          // 发送到Kafka
          val results = semesterGenderCounts.collect().map(row =>
            s"semester: ${row.getString(0)}, gender_type: ${row.getString(1)}, count: ${row.getLong(2)}"
          ).mkString("; ")
          sendResultToKafka("test3", "semester_gender_counts:", results)
        }
      }

      // 3) 使用Spark SQL统计在校和不在校男女生
      dataStream.foreachRDD { rdd =>
        if (!rdd.isEmpty()) {
          val df = rdd.map(p => (p(7), p(2))).toDF("status", "gender")
          df.createOrReplaceTempView("status_gender_view")

          val statusGenderCounts = spark.sql(
            """SELECT
              CASE WHEN status = 'A' THEN 'Yes' ELSE 'No' END AS status_type,
              CASE WHEN gender = '1' THEN 'Male' ELSE 'Female' END AS gender_type,
              COUNT(*) AS count
            FROM status_gender_view
            GROUP BY status, gender"""
          )

          // 使用自定义方法安全写入MySQL
          safeWriteToMySQL(statusGenderCounts, "status_gender_counts")

          // 发送到Kafka
          val results = statusGenderCounts.collect().map(row =>
            s"status_type: ${row.getString(0)}, gender_type: ${row.getString(1)}, count: ${row.getLong(2)}"
          ).mkString("; ")
          sendResultToKafka("test3", "status_gender_counts:", results)
        }
      }

      // 4) 使用Spark SQL统计各个班级的男女的人数
      dataStream.foreachRDD { rdd =>
        if (!rdd.isEmpty()) {
          val df = rdd.map(p => (p(0), p(2))).toDF("className", "gender")
          df.createOrReplaceTempView("class_gender_view")

          val classGenderCounts = spark.sql(
            """SELECT
              className AS class_name,
              CASE WHEN gender = '1' THEN 'Male' ELSE 'Female' END AS gender_type,
              COUNT(*) AS count
            FROM class_gender_view
            GROUP BY className, gender"""
          )

          // 使用自定义方法安全写入MySQL
          safeWriteToMySQL(classGenderCounts, "class_gender_counts")

          // 发送到Kafka
          val results = classGenderCounts.collect().map(row =>
            s"class_name: ${row.getString(0)}, gender_type: ${row.getString(1)}, count: ${row.getLong(2)}"
          ).mkString("; ")
          sendResultToKafka("test3", "class_gender_counts:", results)
        }
      }

      // 5) 使用Spark RDD统计不同学期各个班级的男女各自的人数
      dataStream.foreachRDD { rdd =>
        if (!rdd.isEmpty()) {
          val counts = rdd.map { parts =>
            val semester = parts(5)
            val className = parts(0)
            val gender = if (parts(2) == "1") "Male" else "Female"
            ((semester, className, gender), 1)
          }.reduceByKey(_ + _) // 结果为(Int类型)

          // **关键修改：将Int转为Long**
          val countDF = counts.map { case ((semester, className, gender), count) =>
            (semester, className, gender, count.toLong) // 显式转换为Long
          }.toDF("semester", "class_name", "gender_type", "count")

          safeWriteToMySQL(countDF, "semester_class_gender_counts")

          // 发送到Kafka时也需使用Long类型
          val results = countDF.collect().map(row =>
            s"semester: ${row.getString(0)}, class_name: ${row.getString(1)}, gender_type: ${row.getString(2)}, count: ${row.getLong(3)}" // 改用getLong
          ).mkString("; ")
          sendResultToKafka("test3", "semester_class_gender_counts:", results)
        }
      }

      // 7) 协同过滤推荐
      dataStream.foreachRDD { rdd =>
        if (!rdd.isEmpty()) {
          // ============= a. 建立用户-绩点关系矩阵 =============
          val userGpaMatrix = rdd.map { parts =>
              val userId = parts(1)
              val gpas = parts(6).split(",").map(_.trim).filter(_.nonEmpty) // 拆分并过滤空值
              (userId, gpas)
            }.filter(_._2.nonEmpty) // 过滤空绩点
            .groupByKey()
            .mapValues(_.flatten.toList) // 聚合同一用户的所有绩点

          val gpaDF = userGpaMatrix.map { case (userId, gpas) =>
            (userId, gpas.mkString(",")) // 将绩点数组转为逗号分隔的字符串
          }.toDF("user_id", "gpas")

          // 写入MySQL
          safeWriteToMySQL(gpaDF, "user_gpa_matrix")
          // 发送到Kafka
          val userGpaResults = userGpaMatrix.collect().map { case (userId, gpas) =>
            s"user_id: $userId, gpas: ${gpas.mkString(", ")}"
          }.mkString("; ")
          sendResultToKafka("test3", "user_gpa_matrix:", userGpaResults)
          println(s"[USER_GPA] Sent ${userGpaMatrix.count()} user-gpa records to Kafka") // 新增日志

          // ============= b. 建立绩点-绩点关系矩阵 =============
          val gpaPairs = rdd.flatMap { parts =>
            val gpas = parts(6).split(",").map(_.trim).filter(_.nonEmpty) // 拆分并过滤空值
            if (gpas.length >= 2) { // 至少2个绩点才生成配对
              gpas.combinations(2).map(pair => (pair.sorted.mkString("-"), 1)) // 排序避免重复（如"3.5-4.0"和"4.0-3.5"视为同一对）
            } else {
              Seq.empty[(String, Int)] // 否则返回空
            }
          }.reduceByKey(_ + _) // 统计配对出现次数

          // 发送到Kafka（处理空数据情况）
          val gpaGpaResults = if (gpaPairs.count() > 0) {
            gpaPairs.collect().map { case (gpaPair, count) =>
              s"gpa_pair: $gpaPair, count: $count"
            }.mkString("; ")
          } else {
            "No valid gpa pairs found" // 无数据时返回提示
          }
          sendResultToKafka("test3", "gpa_gpa_matrix:", gpaGpaResults)
          println(s"[GPA_PAIR] Sent ${gpaPairs.count()} gpa pairs to Kafka") // 新增日志

          // ============= c. 基于CF生成用户推荐列表 =============
          val localGpaPairs = gpaPairs.collect() // Array[(String, Int)]
          val userRecommendations = userGpaMatrix.flatMap { case (userId, gpas) =>
            val validGpas = gpas.filter(_.nonEmpty).toSet
            if (validGpas.isEmpty) {
              Seq.empty[(String, String)]
            } else {
              val relatedPairs = localGpaPairs.filter { case (gpaPair, _) =>
                gpaPair.split("-").exists(validGpas.contains)
              }
              val candidateGpas = relatedPairs.flatMap { case (gpaPair, _) =>
                gpaPair.split("-").filter(gpa => !validGpas.contains(gpa))
              }.distinct
              val sortedCandidates = candidateGpas.sortBy { candidateGpa =>
                relatedPairs.filter { case (pair, _) => pair.contains(candidateGpa) }.map(_._2).sum
              }.reverse
              sortedCandidates.take(5).map(gpa => (userId, gpa))
            }
          }.toDF("user_id", "recommended_gpa")

          // 写入MySQL
          if (userRecommendations.count() > 0) {
            safeWriteToMySQL(userRecommendations, "user_recommendations")
            println(s"[RECOMMEND] Wrote ${userRecommendations.count()} recommendations to MySQL")
            val recommendationResults = userRecommendations.collect().map(row =>
              s"user_id: ${row.getString(0)}, recommended_gpa: ${row.getString(1)}"
            ).mkString("; ")
            sendResultToKafka("test3", "user_recommendations:", recommendationResults)
          } else {
            println("[RECOMMEND] No recommendations generated")
          }

          // ============= 新增：d. 基于用户相似度的绩点推荐 =============
          // 将RDD转为Map(userId -> List[GPA])，并收集到Driver端
          val localUserGpaMap = userGpaMatrix
            .mapValues(_.map(_.toDouble)) // 转换为Double类型进行相似度计算
            .collectAsMap()

          // 计算每个用户的相似度矩阵（注意：仅适用于小规模数据）
          val userSimilarityRDD = userGpaMatrix.map { case (userIdA, gpasA) =>
            // 过滤掉自身，并计算与其他用户的相似度
            val similarities = localUserGpaMap.flatMap { case (userIdB, gpasB) =>
              if (userIdA != userIdB && gpasA.nonEmpty && gpasB.nonEmpty) {
                val similarity = cosineSimilarity(gpasA.map(_.toDouble), gpasB)
                Some((userIdB, similarity))
              } else {
                None
              }
            }.toList

            // 按相似度排序，取Top K个相似用户
            val topKSimilarUsers = similarities.sortBy(-_._2).take(5) // K=5
            (userIdA, topKSimilarUsers)
          }

          // 为每个用户生成推荐绩点（基于相似用户的绩点）
          val userSimilarityRecommendations = userSimilarityRDD.flatMap { case (userId, similarUsers) =>
            // 获取目标用户已有的绩点
            val userGpaSet = localUserGpaMap.getOrElse(userId, List.empty[Double]).toSet

            // 从相似用户中收集他们的绩点，并排除目标用户已有的
            val recommendedGpas = similarUsers.flatMap { case (similarUserId, similarity) =>
              localUserGpaMap.getOrElse(similarUserId, List.empty[Double])
                .filterNot(userGpaSet.contains)
                .map(gpa => (gpa, similarity)) // 记录绩点及其来源用户的相似度
            }

            // 按相似度加权排序，生成最终推荐
            if (recommendedGpas.nonEmpty) {
              val topRecommendations = recommendedGpas
                .groupBy(_._1) // 按绩点分组
                .mapValues(_.map(_._2).sum) // 计算每个绩点的相似度总和
                .toList
                .sortBy(-_._2) // 按相似度降序排列
                .take(3) // 取Top 3
                .map(_._1.toString) // 转换为字符串

              Some((userId, topRecommendations.mkString(",")))
            } else {
              None
            }
          }.toDF("user_id", "similar_recommended_gpas")

          // 写入MySQL
          if (userSimilarityRecommendations.count() > 0) {
            safeWriteToMySQL(userSimilarityRecommendations, "user_similar_recommendations")
            println(s"[USER_SIMILAR] Wrote ${userSimilarityRecommendations.count()} similarity-based recommendations")

            // 发送到Kafka
            val similarityResults = userSimilarityRecommendations.collect().map(row =>
              s"user_id: ${row.getString(0)}, similar_recommended_gpas: ${row.getString(1)}"
            ).mkString("; ")
            sendResultToKafka("test3", "user_similar_recommendations:", similarityResults)
          } else {
            println("[USER_SIMILAR] No similarity-based recommendations generated")
          }
        }
      }

      // 辅助函数：计算余弦相似度
      def cosineSimilarity(v1: Seq[Double], v2: Seq[Double]): Double = {
        val dotProduct = v1.zip(v2).map { case (a, b) => a * b }.sum
        val normV1 = math.sqrt(v1.map(a => a * a).sum)
        val normV2 = math.sqrt(v2.map(a => a * a).sum)

        if (normV1 == 0 || normV2 == 0) 0.0
        else dotProduct / (normV1 * normV2)
      }

      // 启动流处理
      ssc.start()
      ssc.awaitTermination()
    }
}