package spark

//各个订单的各个类别的有效和无效数量
import java.sql.DriverManager
import java.sql.PreparedStatement
import java.sql.Connection
import org.apache.spark.SparkConf
import org.apache.spark.streaming.kafka010._
import org.apache.spark.streaming.{Seconds, StreamingContext}

object OrderCategoryStatsRDD5 {
  def main(args: Array[String]): Unit = {
//基于 Spark Streaming 的实时数据处理系统
//孟佳怡
    //环境初始化与配置
    val conf = new SparkConf().setAppName("OrderCategoryStatsRDD").setMaster("local[*]")
    val ssc = new StreamingContext(conf, Seconds(8))
    ssc.sparkContext.setLogLevel("WARN")

    // Kafka 数据接入
    val kafkaParams = Map(
      "bootstrap.servers" -> "192.168.136.128:9092",
      "key.deserializer" -> "org.apache.kafka.common.serialization.StringDeserializer",
      "value.deserializer" -> "org.apache.kafka.common.serialization.StringDeserializer",
      "group.id" -> "niit",
      "auto.offset.reset" -> "earliest"
    )

    val stream = KafkaUtils.createDirectStream[String, String](
      ssc,
      LocationStrategies.PreferConsistent,
      ConsumerStrategies.Subscribe[String, String](Array("orders"), kafkaParams)
    )

    //数据解析与映射
    // 映射为 ((类别, 订单名, 是否有效), 1)
    val mappedStream = stream.map { record =>
      val fields = record.value().split("\t")
      val category = fields(0)      // 类别在第1个字段
      val orderName = fields(1)     // 订单名在第2个字段
      val isValid = fields(6)       // 有效性在第6个字段
      ((category, orderName, isValid), 1)
    }

    //聚合统计，对相同键的值进行累加，得到每个组合的计数
    // 统计每个类别下各个订单的有效和无效数量
    val stats = mappedStream.reduceByKey(_ + _)
    stats.print()

    // 结果输出与持久化
    // 格式化输出结果
    //对每个 RDD 执行双重操作，将结果打印到控制台 ，将结果批量写入MySQL数据库表task5
      //外层结构
    stats.foreachRDD { rdd =>
      if (!rdd.isEmpty()) {
        rdd.foreachPartition { partition =>      //foreachPartition 在每个分区上执行操作，比逐条处理效率更高格式化输出每条记录：类别、订单名、有效性和计数
          partition.foreach { case ((category, orderName, isValid), count) =>
            println(s"类别: $category, 订单: $orderName, 有效性: $isValid, 数量: $count")
          }
        }

        // 将结果写入MySQL
        rdd.foreachPartition { partition =>
          var conn: Connection = null
          var stmt: PreparedStatement = null
          try {
            // 创建数据库连接
            val url = "jdbc:mysql://localhost:3306/huel2?useSSL=false&serverTimezone=UTC"
            val user = "root"
            val password = "123456"
            conn = DriverManager.getConnection(url, user, password)

            // 批量插入数据
            conn.setAutoCommit(false)
            //分区数据遍历，每条记录是一个元组
            partition.foreach { case ((category, orderName, isValid), count) =>
              //创建插入 SQL
              val sql = "INSERT INTO task5 (category, order_name, is_valid, count) VALUES (?,?,?,?)"
              stmt = conn.prepareStatement(sql)
              //依次设置四个参数
              stmt.setString(1, category)
              stmt.setString(2, orderName)
              stmt.setString(3, isValid)
              stmt.setLong(4, count)
              //执行与关闭
              stmt.executeUpdate()
              stmt.close()
            }
            conn.commit()
          } catch {
            case e: Exception =>
              e.printStackTrace()
              if (conn != null) conn.rollback()
          } finally {
            if (stmt != null) try { stmt.close() } catch { case e: Exception => e.printStackTrace() }
            if (conn != null) try { conn.close() } catch { case e: Exception => e.printStackTrace() }
          }
        }
      }
    }

    ssc.start()
    ssc.awaitTermination()
  }
}