package ds_industry_2025.ds.Formal_volume2.T3

import org.apache.spark.sql.{SparkSession, functions}
import org.apache.spark.sql.expressions.Window
import org.apache.spark.sql.functions._

import java.util.Properties
/*
    5、根据MySQL的shtd_store.order_info表。按照id进行升序累加订单的金额，取出累加值与目标值2023060600相差最小的前10条的
    订单id，按照差值从小到大进行排序,将计算结果存入MySQL数据库shtd_result的order_final_money_amount_diff表中（表结构如下）
    ，然后在Linux的MySQL的命令行中查询shtd_result. order_final_money_amount_diff所有数据，将SQL语句复制粘贴至客户端桌面
    【Release\任务B提交结果.docx】中对应的任务序号下，将执行结果截图粘贴至客户端桌面【Release\任务B提交结果.docx】中对应的任务
    序号下;
 */
object t5 {
  def main(args: Array[String]): Unit = {
    val spark = SparkSession.builder()
      .master("local[*]")
      .appName("T5")
      .config("hive.exec.dynamic.partition","nonstrict")
      .config("spark.serializer","org.apache.spark.serializer.KryoSerializer")
      .config("spark.sql.extensions","org.apache.spark.sql.hudi.HoodieSparKSessionExtension")
      .enableHiveSupport()
      .getOrCreate()

    val conn=new Properties()
    conn.setProperty("user","root")
    conn.setProperty("password","123456")
    conn.setProperty("driver","com.mysql.jdbc.Driver")

    val order_info = spark.read
      .jdbc("jdbc:mysql://192.168.40.110:3306/shtd_store?useSSL=false", "order_info", conn)
      .orderBy(asc("id"))
      .createOrReplaceTempView("data")

//    //  todo rows between :定义多少行到多少行
//    // todo UNBOUNDED PRECEDING：从窗口的第一行开始。  因为这两个单词的意思是无限的前  unnounded(无界的) preceding(前)
//    //  todo current row:当前行
//    val result = spark.sql(
//      """
//        |select
//        |order_id,
//        |diff_money
//        |from(
//        |select
//        |order_id,
//        |abs(current_money - 2023060600) as diff_money
//        |from(
//        |select
//        |id as order_id,
//        |sum(final_total_amount)
//        |over(order by id rows between unbounded preceding and current row) as current_money
//        |from data
//        |) as r1
//        |) as r2
//        |order by diff_money
//        |""".stripMargin).limit(10)
//
//    result.show

    //  todo 下面展示dataframe api的写法
    val data=spark.read
      .jdbc("jdbc:mysql://192.168.40.110:3306/shtd_store?useSSL=false","order_info",conn)
      .orderBy("id")
      .select("id","final_total_amount")

    //  todo 定义开窗函数的窗口排序规则和行范围
    val window1 = Window.orderBy("id").rowsBetween(Window.unboundedPreceding, Window.currentRow)
    data.withColumn(
      "current_money",
      functions.sum("final_total_amount").over(window1)
    )
      .withColumn(
        "diff_money",
        functions.abs(col("current_money") - 2023060600)
      )
      .orderBy("diff_money")
      .limit(10)
      .show


    spark.close()
  }

}
