package DianShang_2024.ds_server.indicator

import org.apache.spark.sql.catalyst.dsl.expressions.StringToAttributeConversionHelper
import org.apache.spark.sql.expressions.Window
import org.apache.spark.sql.{DataFrame, SparkSession}
import org.apache.spark.sql.functions.{current_timestamp, date_format, lit, to_timestamp, window}

object trait10 {
  def main(args: Array[String]): Unit = {
        /*
              10、根据dwd层或dws层的相关表，请计算2022年4月26日凌晨0点0分0秒到早上9点59分59秒为止的数据，以5个小时为时间窗口，滑动的步长为1小时，做滑动窗口计
              算该窗口内订单总金额和订单总量，时间不满5小时不触发计算（即从凌晨5点0分0秒开始触发计算），存入ClickHouse数据库shtd_result
              的slidewindowconsumption表中，然后在Linux的ClickHouse命令行中根据订单时间段升序排序，查询出前5条，将核心业务代码中的开窗相关代码与MySQL查询
              结果展示出来。
         */
    //  准备sparksql的环境
    val spark=SparkSession.builder()
      .master("local[*]")
      .appName("指标计算第十题")
      .enableHiveSupport()
      .getOrCreate()

    //  首先拿到数据源
    /*
              如果是20220315095959这种格式的字符串，转化为时间类型，需要先使用to_timestamp(time,'yyyyMMddHHmmss')转化为yyyyMMddHHmmss，然后再
              使用date_format()转化为yyyy-MM-dd HH:mm:ss
         */
    spark.sql(
        """
          |select
          |order_id ,
          |order_sn ,
          |customer_id ,
          |shipping_user ,
          |province ,
          |city ,
          |address ,
          |order_source ,
          |payment_method ,
          |order_money ,
          |district_money ,
          |shipping_money ,
          |payment_money ,
          |shipping_comp_name ,
          |shipping_sn ,
          |date_format(to_timestamp(create_time,'yyyyMMddHHmmss'),'yyyy-MM-dd HH:mm:ss') as create_time ,
          |shipping_time ,
          |pay_time ,
          |receive_time ,
          |order_status ,
          |order_point ,
          |invoice_title ,
          |modified_time,
          |etl_date
          |from ods_server.order_master01
          |where etl_date='20240311'  and length(city) <= 8
          |""".stripMargin)
      .withColumn("dwd_insert_user", lit("user1"))
      .withColumn(
        "dwd_insert_time",
        to_timestamp(date_format(current_timestamp(), "yyyy-MM-dd HH:mm:ss"), "yyyy-MM-dd HH:mm:ss").cast("timestamp")
      )
      .withColumn("dwd_modify_user", lit("user1"))
      .withColumn(
        "dwd_modify_time",
        to_timestamp(date_format(current_timestamp(), "yyyy-MM-dd HH:mm:ss"), "yyyy-MM-dd HH:mm:ss").cast("timestamp")
      )
      .createOrReplaceTempView("temp_table")

    //  然后拿到规定时间内的数据
    spark.sql(
      """
        |select
        |*
        |from temp_table
        |where create_time > '2022-04-26 00:00:00' and create_time < '2022-04-26 10:00:00'
        |""".stripMargin).createOrReplaceTempView("data_time")

    //  对规定时间内的数据的create_time转化为yyyy-MM-dd HH的格式，方便后面的计算查询
    spark.sql(
      """
        |select
        |date_format(create_time,'yyyy-MM-dd HH') as create_time,
        |order_money
        |from data_time
        |""".stripMargin).createOrReplaceTempView("temp01")

    spark.sql("select * from temp01 limit 20").show

    /*
          over(order by create_time  rows  4 preceding)这里没有进行分区，是按照当前行和前4行的值成为一个窗口进行运算
          这里的处理逻辑:1.首先将每个时间段里面，金额的总值，订单的数量，平均每单的值
                       2.然后使用窗口函数，不分区，按照时间段排序，用当前行和前四行的值进行计算，得到的值和题目要求的是一样的
     */
    val result_data=spark.sql(
      """
        |select
        |create_time,
        |sum(money_sum) over(order by create_time  rows  4 preceding) as money_sum,
        |sum(money_count) over(order by create_time rows 4 preceding ) as   money_count,
        |avg(money_avg) over(order by create_time rows 4 preceding ) as money_avg
        |from(
        |select
        |create_time,
        |sum(order_money) as money_sum,
        |count(order_money) as money_count,
        |avg(order_money) as money_avg
        |from temp01
        |group by create_time
        |) as t1
        |where hour(create_time) > 4
        |""".stripMargin)
    result_data.createOrReplaceTempView("result_table")

    spark.sql("select  * from result_table limit 20").show

    // 将数据加载到clickhouse
result_data.write
  .format("jdbc")
  .option("url","jdbc:clickhouse://192.168.40.110/shtd_result")
  .option("user","default")
  .option("password","")
  .option("driver","com.clickhouse.jdbc.ClickHouseDriver")
  .option("dbtable","slidewindowconsumption")
  .mode("append")
  .save()






    //  关闭sparksql的环境
    spark.close()
  }
}
