package ds_industry_2025.industry

import org.apache.spark.sql.SparkSession

//  todo 工业卷子的ods层的表格准备 hive类型
object hive_ods_table_perparation {
  def main(args: Array[String]): Unit = {
    val spark = SparkSession.builder()
      .master("local[*]")
      .appName("工业卷子的ods层表格准备hive类型")
      .config("hive.exec.dynamic.partition.mode","nonstrict")
      .config("spark.serializer","org.apache.spark.serializer.KryoSerializer")
      .config("spark.sql.extensions","org.apache.spark.sql.hudi.HoodieSparkSessionExtension")
      .enableHiveSupport()
      .getOrCreate()

    spark.sql("drop database if exists ods cascade")
    spark.sql("create database if not exists ods")
    spark.sql("use ods")

    //  changerecord
    spark.sql("drop table if exists changerecord")
    spark.sql(
      """
        |create table if not exists changerecord(
        |ChangeID int,
        |ChangeMachineID int,
        |ChangeMachineRecordID int,
        |ChangeRecordState string,
        |ChangeStartTime timestamp,
        |ChangeEndTime timestamp,
        |ChangeRecordData string,
        |ChangeHandleState int
        |)
        |partitioned by(etldate string)
        |""".stripMargin)

    println("changerecord表创建完成")


    //  basemachine
    spark.sql("drop table if  exists basemachine")
    spark.sql(
      """
        |create table if not exists basemachine(
        |BaseMachineID int,
        |MachineFactory int,
        |MachineNo string,
        |MachineName string,
        |MachineIP string,
        |MachinePort int,
        |MachineAddDate timestamp,
        |MachineRemarks string,
        |MachineAddEmpID int,
        |MachineResponsEmpID int,
        |MachineLedgerXml string,
        |ISWS int
        |)
        |partitioned by(etldate string)
        |""".stripMargin)
    println("basemachine表创建完成")

    //  producerecord
    spark.sql("drop table if exists producerecord")
    spark.sql(
      """
        |create table if not exists producerecord(
        |ProduceRecordID int,
        |ProduceMachineID int,
        |ProduceCodeNumber string,
        |ProduceStartWaitTime timestamp,
        |ProduceCodeStartTime timestamp,
        |ProduceCodeEndTime timestamp,
        |ProduceCodeCycleTime int,
        |ProduceEndTime timestamp,
        |ProduceTotalOut int,
        |ProduceInspect int
        |)
        |partitioned by(etldate string)
        |""".stripMargin)
    println("producerecord表创建完成")


    //  machinedata
    spark.sql("drop table if exists machinedata")
    spark.sql(
      """
        |create table if not  exists  machinedata(
        |MachineRecordID int,
        |MachineID int,
        |MachineRecordState string,
        |MachineRecordData string,
        |MachineRecordDate timestamp
        |)
        |partitioned by(etldate string)
        |""".stripMargin)
    println("machinedata表创建完成")

    //  environmentdata
    spark.sql("drop table if exists environmentdata")
    spark.sql(
      """
        |create table if not exists environmentdata(
        |EnvoId string,
        |BaseID string,
        |CO2 string,
        |PM25 string,
        |PM10 string,
        |Temperature string,
        |Humidity string,
        |TVOC string,
        |CH2O string,
        |Smoke string,
        |InPutTime string
        |)
        |partitioned by(etldate string)
        |""".stripMargin)

    println("environmentdata表创建完成")



    spark.close()

  }

}
