package spark

import java.sql.DriverManager
import java.sql.PreparedStatement
import java.sql.Connection
import org.apache.spark.SparkConf
import org.apache.spark.streaming.kafka010._
import org.apache.spark.streaming.{Seconds, StreamingContext}
import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.functions._
import org.apache.spark.sql.Row

object CombinedOrderStats {
  def main(args: Array[String]): Unit = {
    val conf = new SparkConf().setAppName("CombinedOrderStats").setMaster("local[*]")
    val ssc = new StreamingContext(conf, Seconds(8))
    ssc.sparkContext.setLogLevel("WARN")
    val spark = SparkSession.builder.config(conf).getOrCreate()

    val kafkaParams = Map(
      "bootstrap.servers" -> "192.168.136.128:9092",
      "key.deserializer" -> "org.apache.kafka.common.serialization.StringDeserializer",
      "value.deserializer" -> "org.apache.kafka.common.serialization.StringDeserializer",
      "group.id" -> "niit",
      "auto.offset.reset" -> "earliest"
    )

    val stream = KafkaUtils.createDirectStream[String, String](
      ssc,
      LocationStrategies.PreferConsistent,
      ConsumerStrategies.Subscribe[String, String](Array("orders"), kafkaParams)
    )

    // 功能 1: 统计有效和无效订单的总数
    val validInvalidPairs = stream.map(record => {
      val fields = record.value().split("\t")
      val isValid = fields(6)
      val isValidBoolean = isValid match {
        case "Y" => true
        case "N" => false
        case _ => false
      }
      (isValidBoolean, 1L)
    })
    val validInvalidCountStream = validInvalidPairs.reduceByKey(_ + _)
    validInvalidCountStream.print()
    validInvalidCountStream.foreachRDD { rdd =>
      if (!rdd.isEmpty()) {
        rdd.foreachPartition { partition: Iterator[(Boolean, Long)] =>
          writeToMySQL(partition, "jdbc:mysql://47.93.166.116:3306/final3?useSSL=false&serverTimezone=UTC", "root", "Root123!", "task1") { (conn, stmt, data) =>
            val (isValid, count) = data
            stmt.setBoolean(1, isValid)
            stmt.setLong(2, count)
          }
        }
      }
    }

    // 功能 2: 统计每个产品的有效和无效订单数量
    val productValidPairs = stream.map { record =>
      val fields = record.value().split("\t")
      val product = fields(1)
      val isValid = fields(6)
      val isValidBoolean = isValid match {
        case "Y" => true
        case "N" => false
        case _ => false
      }
      ((product, isValidBoolean), 1L)
    }
    val productValidCountStream = productValidPairs.reduceByKey(_ + _)
    productValidCountStream.print()
    productValidCountStream.foreachRDD { rdd =>
      if (!rdd.isEmpty()) {
        rdd.foreachPartition { partition: Iterator[((String, Boolean), Long)] =>
          writeToMySQL(partition, "jdbc:mysql://47.93.166.116:3306/final3?useSSL=false&serverTimezone=UTC", "root", "Root123!", "task2") { (conn, stmt, data) =>
            val ((product, isValid), count) = data
            stmt.setString(1, product)
            stmt.setBoolean(2, isValid)
            stmt.setLong(3, count)
          }
        }
      }
    }

    // 功能 3: 统计每个类别的订单数量
    val categoryPairs = stream.map(record => {
      val fields = record.value().split("\t")
      val category = fields(0)
      (category, 1L)
    })
    val categoryCountStream = categoryPairs.reduceByKey(_ + _)
    categoryCountStream.print()
    categoryCountStream.foreachRDD { rdd =>
      if (!rdd.isEmpty()) {
        rdd.foreachPartition { partition: Iterator[(String, Long)] =>
          writeToMySQL(partition, "jdbc:mysql://47.93.166.116:3306/final3?useSSL=false&serverTimezone=UTC", "root", "Root123!", "task3") { (conn, stmt, data) =>
            val (category, count) = data
            stmt.setString(1, category)
            stmt.setLong(2, count)
          }
        }
      }
    }

    // 功能 4: 统计每个类别的有效和无效订单数量（使用 DataFrame）
    import spark.implicits._
    stream.foreachRDD { rdd =>
      if (!rdd.isEmpty()) {
        val df = rdd.map(record => {
          val fields = record.value().split("\t")
          (fields(0), fields(6))
        }).toDF("category", "isValid")
        val result = df.groupBy("category", "isValid")
          .agg(count("*").alias("count"))
          .na.fill(0)
        result.show()
        result.foreachPartition { partition: Iterator[Row] =>
          writeToMySQL(partition, "jdbc:mysql://47.93.166.116:3306/final3?useSSL=false&serverTimezone=UTC", "root", "Root123!", "task4") { (conn, stmt, data) =>
            val row = data
            stmt.setString(1, row.getString(0))
            stmt.setString(2, row.getString(1))
            stmt.setLong(3, row.getLong(2))
          }
        }
      }
    }

    // 功能 5: 统计每个类别下各个订单的有效和无效数量
    val mappedStream = stream.map { record =>
      val fields = record.value().split("\t")
      val category = fields(0)
      val orderName = fields(1)
      val isValid = fields(6)
      ((category, orderName, isValid), 1)
    }
    val stats = mappedStream.reduceByKey(_ + _)
    stats.print()
    stats.foreachRDD { rdd =>
      if (!rdd.isEmpty()) {
        rdd.foreachPartition { partition: Iterator[((String, String, String), Int)] =>
          // 修复：将partition转为List以便可以多次使用
          val dataList = partition.toList

          // 打印数据
          dataList.foreach { case ((category, orderName, isValid), count) =>
            println(s"类别: $category, 订单: $orderName, 有效性: $isValid, 数量: $count")
          }

          // 使用转换后的List写入数据库
          writeToMySQL(dataList.iterator, "jdbc:mysql://47.93.166.116:3306/final3?useSSL=false&serverTimezone=UTC", "root", "Root123!", "task5") { (conn, stmt, data) =>
            val ((category, orderName, isValid), count) = data
            stmt.setString(1, category)
            stmt.setString(2, orderName)
            stmt.setString(3, isValid)
            stmt.setInt(4, count)
          }
        }
      }
    }

    ssc.start()
    ssc.awaitTermination()
  }

  def writeToMySQL[T](partition: Iterator[T], url: String, user: String, password: String, table: String)(setParams: (Connection, PreparedStatement, T) => Unit): Unit = {
    var conn: Connection = null
    var stmt: PreparedStatement = null
    try {
      conn = DriverManager.getConnection(url, user, password)
      conn.setAutoCommit(false)

      // 根据不同表结构预定义SQL语句
      val sql = table match {
        case "task1" => "INSERT INTO task1 (is_valid, count) VALUES (?, ?)"
        case "task2" => "INSERT INTO task2 (product, is_valid, count) VALUES (?, ?, ?)"
        case "task3" => "INSERT INTO task3 (category, count) VALUES (?, ?)"
        case "task4" => "INSERT INTO task4 (category, is_valid, count) VALUES (?, ?, ?)"
        case "task5" => "INSERT INTO task5 (category, order_name, is_valid, count) VALUES (?, ?, ?, ?)"
        case _ => throw new IllegalArgumentException(s"Unsupported table: $table")
      }

      // 优化：在循环外创建PreparedStatement
      stmt = conn.prepareStatement(sql)

      // 使用批处理提高性能
      var batchSize = 0
      val BATCH_SIZE = 1000

      partition.foreach { data =>
        setParams(conn, stmt, data)
        stmt.addBatch()
        batchSize += 1

        // 达到批处理大小后执行
        if (batchSize % BATCH_SIZE == 0) {
          stmt.executeBatch()
          batchSize = 0
        }
      }

      // 执行剩余的批处理
      if (batchSize > 0) {
        stmt.executeBatch()
      }

      conn.commit()
    } catch {
      case e: Exception =>
        e.printStackTrace()
        if (conn != null) conn.rollback()
    } finally {
      if (stmt != null) try { stmt.close() } catch { case e: Exception => e.printStackTrace() }
      if (conn != null) try { conn.close() } catch { case e: Exception => e.printStackTrace() }
    }
  }
}