package ds_industry_2025.industry

import org.apache.spark.sql.SparkSession

// todo hudi卷子的工业类型 dwd层的表格准备
object hudi_dwd_table_perparation {
  def main(args: Array[String]): Unit = {
    val spark = SparkSession.builder()
      .master("local[*]")
      .appName("test")
      .config("hive.exec.dynamic.partition.mode","nonstrict")
      .config("spark.serializer","org.apache.spark.serializer.KryoSerializer")
      .config("spark.sql.extensions","org.apache.spark.sql.hudi.HoodieSparkSessionExtension")
      .enableHiveSupport()
      .getOrCreate()

      //  todo 创建表的方法
    def create_table():Unit={
      spark.sql("drop database if exists hudi_gy_dwd cascade")
      spark.sql("create database if not exists hudi_gy_dwd ")
      spark.sql("use hudi_gy_dwd")

      //  fact_environment_data
      spark.sql("drop table if exists fact_environment_data")
      spark.sql(
        """
          |create table if not exists fact_environment_data(
          |EnvoId String,
          |BaseID String,
          |CO2 String,
          |PM25 String,
          |PM10 String,
          |Temperature String,
          |Humidity String,
          |TVOC String,
          |CH2O String,
          |Smoke String,
          |InPutTime String,
          |dwd_insert_user String,
          |dwd_insert_time timestamp,
          |dwd_modify_user String,
          |dwd_modify_time timestamp
          |)using hudi
          |tblproperties(
          |type="cow",
          |primaryKey="EnvoId",
          |preCombineField="InPutTime",
          |hoodie.datasource.hive_sync.mode="hms"
          |)
          |partitioned by(etldate String)
          |""".stripMargin)
      println("fact_environment_data表格创建完成")

      // fact_change_record
      spark.sql("drop table if exists fact_change_record")
      spark.sql(
        """
          |create table if not exists fact_change_record(
          |ChangeID int,
          |ChangeMachineID int,
          |ChangeMachineRecordID int,
          |ChangeRecordState string,
          |ChangeStartTime timestamp,
          |ChangeEndTime timestamp,
          |ChangeRecordData string,
          |ChangeHandleState int,
          |dwd_insert_user string,
          |dwd_insert_time timestamp,
          |dwd_modify_user string,
          |dwd_modify_time timestamp
          |)using hudi
          |tblproperties(
          |type="cow",
          |primaryKey="ChangeID,ChangeMachineID",
          |preCombineField="ChangeEndTime",
          |hoodie.datasource.hive_aync.mode="hms"
          |)
          |partitioned by(etldate string)
          |""".stripMargin)

      println("fact_change_record表格创建完成")

      //  dim_machine
      spark.sql("drop table if exists dim_machine")
      spark.sql(
        """
          |create table if not exists dim_machine(
          |BaseMachineID int,
          |MachineFactory int,
          |MachineNo string,
          |MachineName string,
          |MachineIP string,
          |MachinePort int,
          |MachineAddDate timestamp,
          |MachineRemarks string,
          |MachineAddEmpID int,
          |MachineResponsEmpID int,
          |MachineLedgerXml string,
          |ISWS int,
          |dwd_insert_user string,
          |dwd_insert_time timestamp,
          |dwd_modify_user string,
          |dwd_modify_time timestamp
          |)using hudi
          |tblproperties(
          |type="cow",
          |primaryKey="BaseMachineID",
          |preCombineField="MachineAddDate",
          |hoodie.datasource.hive_aync.mode="hhms"
          |)
          |partitioned by(etldate string)
          |""".stripMargin)

      println("dim_machine表格创建完成")

      //  fact_produce_record
      spark.sql("drop table if exists fact_produce_record")
      spark.sql(
        """
          |create table if not exists fact_produce_record(
          |ProduceRecordID int,
          |ProduceMachineID int,
          |ProduceCodeNumber string,
          |ProduceStartWaitTime timestamp,
          |ProduceCodeStartTime timestamp,
          |ProduceCodeEndTime timestamp,
          |ProduceCodeCycleTime int,
          |ProduceEndTime timestamp,
          |ProduceTotalOut int,
          |ProduceInspect int,
          |dwd_insert_user string,
          |dwd_insert_time timestamp,
          |dwd_modify_user string,
          |dwd_modify_time timestamp
          |)using hudi
          |tblproperties(
          |type="cow",
          |primaryKey="ProduceRecordID,ProduceMachineID",
          |preCombineField="ProduceCodeEndTime",
          |hoodie.datasource.hive_aync="hms"
          |)
          |partitioned by(etldate string)
          |""".stripMargin)

      println("fact_produce_record表格创建完成")

      //  fact_machine_data
      spark.sql("drop table if exists fact_machine_data")
      spark.sql(
        """
          |create table if not exists fact_machine_data(
          |MachineRecordID int,
          |MachineID int,
          |MachineRecordState string,
          |MachineRecordData string,
          |MachineRecordDate timestamp,
          |dwd_insert_user string,
          |dwd_insert_time timestamp,
          |dwd_modify_user string,
          |dwd_modify_time timestamp
          |)using hudi
          |tblproperties(
          |type="cow",
          |primaryKey="MachineRecordID",
          |preCombineField="MachineRecordDate",
          |hoodie.datasource.hive_aync.mode="hms"
          |)
          |partitioned by(etldate string)
          |""".stripMargin)

      println("fact_machine_data表格创建完成")

    }

    // todo 创建表
    create_table()



    spark.close()

  }

}
