package com.niit.mlib

import java.sql.DriverManager
import java.sql.PreparedStatement
import java.sql.Connection
import org.apache.kafka.clients.consumer.ConsumerRecord
import org.apache.spark.streaming.kafka010.ConsumerStrategies.Subscribe
import org.apache.spark.streaming.kafka010.KafkaUtils
import org.apache.spark.streaming.kafka010.LocationStrategies.PreferConsistent
import org.apache.spark.streaming.{Seconds, StreamingContext}
import org.apache.spark.{SparkConf, SparkContext}
import scala.collection.mutable.ArrayBuffer
import scala.math.{pow, sqrt}
import org.apache.kafka.common.serialization.StringDeserializer

object SparkMlibFromKafka {
  def main(args: Array[String]): Unit = {
    val conf = new SparkConf().setMaster("local[*]").setAppName("SparkMlibFromKafka")
    val sc = new SparkContext(conf)
    val ssc = new StreamingContext(sc, Seconds(120))

    // Kafka 配置
    val kafkaParams = Map[String, Object](
      "bootstrap.servers" -> "192.168.136.128:9092",
      "key.deserializer" -> classOf[StringDeserializer],
      "value.deserializer" -> classOf[StringDeserializer],
      "group.id" -> "spark-mllib-group",
      "auto.offset.reset" -> "earliest",
      "enable.auto.commit" -> (false: java.lang.Boolean)
    )

    val topics = Array("orders")

    // 从 Kafka 读取数据
    val kafkaStream = KafkaUtils.createDirectStream[String, String](
      ssc,
      PreferConsistent,
      Subscribe[String, String](topics, kafkaParams)
    )

    // 处理数据
    val processedStream = kafkaStream.map { record: ConsumerRecord[String, String] =>
      val fields = record.value().split("\t")
      println(s"从 Kafka 读取到的数据: ${record.value()}")
      (fields(0), (fields(1), fields(5).toDouble))
    }.groupByKey().flatMap { x =>
      val user = x._1
      val is_list = x._2
      println(s"按用户分组后，用户 $user 对应的数据数量: ${is_list.size}")
      var is_arr = is_list.toArray

      var is_list_len = is_arr.length
      if (is_list_len > 20) {
        is_list_len = 20
      }

      var i_us_arr = new ArrayBuffer[(String, (String, Double))]
      for (i <- 0 until is_list_len) {
        i_us_arr += ((is_arr(i)._1, (user, is_arr(i)._2)))
      }
      println(s"处理后，与用户 $user 相关的数据数量: ${i_us_arr.size}")
      i_us_arr
    }.groupByKey().flatMap { x =>
      val item = x._1
      val u_list = x._2
      println(s"按物品分组后，物品 $item 对应的数据数量: ${u_list.size}")
      val us_arr = u_list.toArray

      var sum: Double = 0.0
      for (i <- 0 until us_arr.length) {
        sum += pow(us_arr(i)._2, 2)
      }

      sum = sqrt(sum)

      var u_is_arr = new ArrayBuffer[(String, (String, Double))]
      for (i <- 0 until us_arr.length) {
        u_is_arr += ((us_arr(i)._1, (item, us_arr(i)._2 / sum)))
      }
      println(s"处理后，与物品 $item 相关的数据数量: ${u_is_arr.size}")
      u_is_arr
    }.groupByKey()

    // 计算相似度并输出结果
    val unpack_rdd = processedStream.flatMap { x =>
      val is_arr = x._2.toArray
      println(s"计算相似度前，数据数量: ${is_arr.length}")
      var ii_s_arr = new ArrayBuffer[((String, String), Double)]()
      for (i <- 0 until is_arr.length - 1) {
        for (j <- i + 1 until is_arr.length) {
          ii_s_arr += (((is_arr(i)._1, is_arr(j)._1), is_arr(i)._2 * is_arr(j)._2))
          ii_s_arr += (((is_arr(j)._1, is_arr(i)._1), is_arr(i)._2 * is_arr(j)._2))
        }
      }
      println(s"计算相似度后，相似度数据数量: ${ii_s_arr.size}")
      ii_s_arr
    }

    val topn = 5
    val resultStream = unpack_rdd.groupByKey().map { x =>
      val ii_pair = x._1
      val s_list = x._2
      println(s"按相似度分组后，组 $ii_pair 对应的数据数量: ${s_list.size}")
      val s_arr = s_list.toArray

      var score: Double = 0.0
      for (i <- 0 until s_arr.length) {
        score += s_arr(i)
      }
      (ii_pair._1, (ii_pair._2, score))
    }.groupByKey().map { x =>
      val item_a = x._1
      val item_list = x._2
      println(s"按物品分组后，物品 $item_a 对应的数据数量: ${item_list.size}")
      val bs_arr = item_list.toArray.sortWith(_._2 > _._2)

      var len = bs_arr.length
      if (len > topn) {
        len = topn
      }

      val s = new StringBuilder
      for (i <- 0 until len) {
        val item = bs_arr(i)._1
        val score = "%1.4f" format bs_arr(i)._2
        s.append(item + ":" + score)
        s.append(",")
      }
      println(s"最终生成的推荐结果: $item_a\t$s")
      item_a + "\t" + s
    }

    // 存储到 MySQL 数据库 - 修改部分
    resultStream.foreachRDD { rdd =>
      rdd.foreachPartition { partitionOfRecords =>
        // 在每个分区中创建一个数据库连接，而不是每条记录创建一个
        var connection: Connection = null
        var statement: PreparedStatement = null

        try {
          // MySQL 数据库连接配置
          val url = "jdbc:mysql://localhost:3306/huel2"
          val user = "root"
          val password = "123456"

          // 创建连接
          connection = DriverManager.getConnection(url, user, password)

          // 准备 SQL 语句
          val sql = "INSERT INTO task6 (item, similar_items) VALUES (?, ?)"
          statement = connection.prepareStatement(sql)

          // 处理分区中的每条记录
          partitionOfRecords.foreach { result =>
            val parts = result.split("\t")
            val item = parts(0)
            val similarItems = parts(1)

            // 设置参数
            statement.setString(1, item)
            statement.setString(2, similarItems)

            // 执行插入
            try {
              val rowsAffected = statement.executeUpdate()
              if (rowsAffected == 0) {
                println(s"插入数据失败，可能存在重复数据: $result")
              }
            } catch {
              case e: Exception =>
                println(s"插入数据时数据库操作出错: ${e.getMessage}")
                e.printStackTrace()
            }
          }
        } catch {
          case e: Exception =>
            println(s"数据库操作出错: ${e.getMessage}")
            e.printStackTrace()
        } finally {
          // 确保资源被关闭
          if (statement != null) statement.close()
          if (connection != null) connection.close()
        }
      }
    }

    ssc.start()
    ssc.awaitTermination()
  }
}