package industry_2024.industry_10.clean

import org.apache.spark.sql.SparkSession

object table_preparation {
  def main(args: Array[String]): Unit = {
    val spark=SparkSession.builder()
      .master("local[*]")
      .appName("数据清洗表格创建")
      .config("hive.exec.dynamic.partition.mode","nonstrict")
      .config("spark.serializer","org.apache.spark.serializer.KryoSerializer")
      .config("spark.sql.extensions","org.apache.spark.sql.hudi.HoodieSparkSessionExtension")
      .enableHiveSupport()
      .getOrCreate()

    //  数据库要在hive的客户端创建，在这里使用sql创建的不行
    spark.sql("use hudi_gy_dwd10")

    //  fact_environment_data
    spark.sql("drop table if exists fact_environment_data")
    spark.sql(
      """
        |create table if not exists fact_environment_data(
        |EnvoId String,
        |BaseID String,
        |CO2 String,
        |PM25 String,
        |PM10 String,
        |Temperature String,
        |Humidity String,
        |TVOC String,
        |CH2O String,
        |Smoke String,
        |InPutTime String,
        |dwd_insert_user String,
        |dwd_insert_time timestamp,
        |dwd_modify_user String,
        |dwd_modify_time timestamp
        |)using hudi
        |tblproperties(
        |type="cow",
        |primaryKey="EnvoId",
        |preCombineField="InPutTime",
        |hoodie.datasource.hive_sync.mode="hms"
        |)
        |partitioned by(etldate String)
        |""".stripMargin)

    // fact_change_record
    spark.sql("drop table if exists fact_change_record")
    spark.sql(
      """
        |create table if not exists fact_change_record(
        |ChangeID int,
        |ChangeMachineID int,
        |ChangeMachineRecordID int,
        |ChangeRecordState String,
        |ChangeStartTime timestamp,
        |ChangeEndTime timestamp,
        |ChangeRecordData String,
        |ChangeHandleState int,
        |dwd_insert_user String,
        |dwd_insert_time timestamp,
        |dwd_modify_user String,
        |dwd_modify_time timestamp
        |)using hudi
        |tblproperties(
        |type="cow",
        |primaryKey="ChangeID,ChangeMachineID",
        |preCombineField="ChangeEndTime",
        |hoodie.datasource.hive_sync.mode="hms"
        |)
        |partitioned by(etldate String)
        |""".stripMargin)

    //  dim_machine
    spark.sql("drop table if exists dim_machine")
    spark.sql(
      """
        |create table if not exists dim_machine(
        |BaseMachineID int,
        |MachineFactory int,
        |MachineNo String,
        |MachineName String,
        |MachineIP String,
        |MachinePort int,
        |MachineAddDate timestamp,
        |MachineRemarks String,
        |MachineAddEmpID int,
        |MachineResponsEmpID int,
        |MachineLedgerXml String,
        |ISWS int,
        |dwd_insert_user String,
        |dwd_insert_time timestamp,
        |dwd_modify_user String,
        |dwd_modify_time timestamp
        |)using hudi
        |tblproperties(
        |type="cow",
        |primaryKey="BaseMachineID",
        |preCombineField="MachineAddDate",
        |hoodie.datasource.hive_sync.mode="hms"
        |)
        |partitioned by(etldate String)
        |""".stripMargin)

    //  fact_produce_record
    spark.sql("drop table if exists fact_produce_record")
    spark.sql(
      """
        |create table if not exists fact_produce_record(
        |ProduceRecordID int,
        |ProduceMachineID int,
        |ProduceCodeNumber String,
        |ProduceStartWaitTime timestamp,
        |ProduceCodeStartTime timestamp,
        |ProduceCodeEndTime timestamp,
        |ProduceCodeCycleTime int,
        |ProduceEndTime timestamp,
        |ProduceTotalOut int,
        |ProduceInspect int,
        |dwd_insert_user String,
        |dwd_insert_time timestamp,
        |dwd_modify_user String,
        |dwd_modify_time timestamp
        |)using hudi
        |tblproperties(
        |type="cow",
        |primaryKey="ProduceRecordID,ProduceMachineID",
        |preCombineField="ProduceCodeEndTime",
        |hoodie.datasource.hive_sync.mode="hms"
        |)
        |partitioned by(etldate String)
        |""".stripMargin)

    //  fact_machine_data
    spark.sql("drop table if exists fact_machine_data")
    spark.sql(
      """
        |create table if not exists fact_machine_data(
        |MachineRecordID int,
        |MachineID int,
        |MachineRecordState String,
        |MachineRecordData String,
        |MachineRecordDate timestamp,
        |dwd_insert_user String,
        |dwd_insert_time timestamp,
        |dwd_modify_user String,
        |dwd_modify_time timestamp
        |)using hudi
        |tblproperties(
        |type="cow",
        |primaryKey="MachineRecordID",
        |preCombineField="MachineRecordDate",
        |hoodie.datasource.hive_sync.mode="hms"
        |)
        |partitioned by(etldate String)
        |""".stripMargin)

    spark.sql("show tables").show



    spark.close()
  }

}
