package DianShang_2024.ds_01.indicator

import org.apache.spark.sql.{DataFrame, SparkSession}

import java.util.Properties
import scala.util.Properties

object indicator02 {
  def main(args: Array[String]): Unit = {
    /*
          根据dwd层表统计每个省份、每个地区、每个月下单的数量和下单的总金额，存入MySQL数据库shtd_result的provinceeverymonth表中（表结构如下），
          然后在Linux的MySQL命令行中根据订单总数、订单总金额、省份表主键均为降序排序，查询出前5条，将SQL语句复制粘贴至客户端桌面
          【Release\任务B提交结果.docx】中对应的任务序号下，将执行结果截图粘贴至客户端桌面【Release\任务B提交结果.docx】中对应的任务序号下
     */

    //  准备sparksql的环境
    val spark=SparkSession.builder()
      .master("local[*]")
      .appName("指标计算第二题")
      .enableHiveSupport()
      .getOrCreate()

    spark.sql("use dwd")

    //  配置连接mysql的配置
    val jdbc_conf= new Properties()
    jdbc_conf.setProperty("user","root")
    jdbc_conf.setProperty("password","123456")
    jdbc_conf.setProperty("driver","com.mysql.jdbc.Driver")



  //  利用sparksql读取dwd层的数据进行分析，得到想要的结果存成一个dataframe
    /*
        year(时间):拿到时间里面的年份
        and  t1.etl_date='20231017' and t2.etl_date='20231017':是为了进行条件限制，限制只有这个日期的才链接

     */
    val analyze:DataFrame=spark.sql(
      """
        |select
        |t1.province_id as provinceid,
        |t2.name as provincename,
        |t2.region_id as regionid,
        |t3.region_name as regionname,
        |sum(t1.final_total_amount) as totalconsumpation,
        |count(*) as totalorder,
        |year(t1.create_time) as year,
        |month(t1.create_time) as month
        |from dwd.fact_order_info as t1
        |join dwd.dim_province as t2
        |on t1.province_id=t2.id   and  t1.etl_date='20231017' and t2.etl_date='20231017'
        |join dwd.dim_region as t3
        |on t2.region_id=t3.id and t3.etl_date='20231017'
        |group by  provinceid,provincename,regionid,regionname,year,month
        |""".stripMargin)

  //  将分析后得到的数据存入mysql
    analyze
      .write
      .mode("overwrite")
      .jdbc("jdbc:mysql://192.168.40.110:3306/shtd_result?useSSL=false","provinceeverymonth",jdbc_conf)




    //  关闭sparksql的环境
    spark.close()
  }

}
