package DianShang_2024.ds_server.indicator

import org.apache.spark.sql.{DataFrame, SaveMode, SparkSession}

import java.sql.DriverManager
import java.util.Properties

object trait03 {
  def main(args: Array[String]): Unit = {
    /*
          3、请根据dwd或者dws层表计算出每个城市每个月平均订单金额和该城市所在省份平均订单金额相比较结果（“高/低/相同”）,存入ClickHouse数据库shtd_result的
          cityavgcmpprovince表中（表结构如下），然后在Linux的ClickHouse命令行中根据城市平均订单金额、省份平均订单金额均为降序排序，查询出前5条；
     */

    //  准备sparksql的环境
    val spark=SparkSession.builder()
      .master("local[*]")
      .appName("指标计算第三题")
      .enableHiveSupport()
      .getOrCreate()

    spark.sql("use dws")

    // province_table:首先计算出每个省份每个月的平均订单金额
    //  不能直接使用自定义(通过函数计算得到)的字段名进行运算，所以下面使用到了子查询，这样才可以
    spark.sql(
      """
        |select
        |province_name,
        |month,
        |AllMonth_order_province,
        |AllMonth_money_province,
        |cast((AllMonth_money_province / AllMonth_order_province) as int)  as avg_province
        |from(
        |select
        |province as province_name,
        |month(date_format(to_timestamp(create_time,'yyyyMMdd'),'yyyy-MM-dd')) as month,
        |sum(order_money) as AllMonth_money_province,
        |count(*)  as AllMonth_order_province
        |from dwd_server.fact_order_master
        |group by province_name,month
        |) as province_t1
        |""".stripMargin).createOrReplaceTempView("province_table")

    spark.sql("select * from province_table limit 20").show

    //  city_table:下面计算每个城市每个月的订单平均金额
    spark.sql(
      """
        |select
        |city_name,
        |province_name,
        |month,
        |AllMonth_money_city,
        |AllMonth_order_city,
        |cast((AllMonth_money_city / AllMonth_order_city) as int) as avg_city
        |from(
        |select
        |city as city_name,
        |province as province_name,
        |month(date_format(to_timestamp(create_time,'yyyyMMdd'),'yyyy-MM-dd')) as month,
        |sum(order_money) as AllMonth_money_city,
        |count(*) as AllMonth_order_city
        |from dwd_server.fact_order_master
        |group by province_name,city_name,month
        |) as t1_city
        |""".stripMargin).createOrReplaceTempView("city_table")

    spark.sql("select * from city_table limit 20").show

    //  all_table:将省每月平均订单金额和城市每个月平均订单金额进行比较,然后合并表
    //  comparison:比较
    val result_data:DataFrame=spark.sql(
      """
        |select
        |t1.province_name as province_name,
        |t1.month as month ,
        |t1.city_name as city_name,
        |t1.avg_city as avg_city,
        |t2.avg_province as avg_province,
        |if(
        |avg_city > avg_province ,'高',if(
        |avg_city < avg_province ,'低','相同'
        |)
        |)as comparison
        |from city_table as t1
        |join province_table as t2
        |on t1.province_name=t2.province_name
        |""".stripMargin)

      result_data.show()



    //  查看数据库里面表的信息

    //  设置clickhouse的连接信息
    val url="jdbc:clickhouse://192.168.40.110:8123"
    val user="default"
    val password=""

    //  建立与clickhouse的连接
    val connection=DriverManager.getConnection(url,user,password)
    //  statement是一个可以向数据库发送一个sql语句的对象
    val statement=connection.createStatement()

    //  执行查询
    /*
          executeQuery:返回结果集,返回的是ResultSet 对象，该对象包含了查询结果集。
          execute:不返回结果集，返回的是布尔类型，true表示有结果集，false表示没有结果集
     */
    val database_name="shtd_result"
    val sql=s"select name from system.tables where database= '$database_name' "
    //  执行查询，返回resultset对象
    val result=statement.executeQuery(sql)
    //  对查询返回的结果进行打印
    while(result.next()){
      println(result.getString(1))
    }


    //  关闭连接
    connection.close()

    //  将数据写入到clickhouse的表里面
    //  需要注意的是mode不可以使用覆盖，否则会将虚拟机里面的表删除掉，然后报错说表不存在
    result_data.write
      .format("jdbc")
      .option("url","jdbc:clickhouse://192.168.40.110:8123/shtd_result")
      .option("user",user)
      .option("password","")
      .option("driver","com.clickhouse.jdbc.ClickHouseDriver")
      .option("dbtable","cityavgcmpprovince")
      .mode("append")
      .save()






    //  关闭sparksql的环境
    spark.close()
  }

}
