package industry_2024.industry_10.extract

import org.apache.hudi.DataSourceWriteOptions.{PARTITIONPATH_FIELD, PRECOMBINE_FIELD, RECORDKEY_FIELD}
import org.apache.hudi.QuickstartUtils.getQuickstartWriteConfigs
import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.functions.{col, date_format, lit}

import java.text.SimpleDateFormat
import java.util.{Calendar, Properties}

object extract_count {
  def main(args: Array[String]): Unit = {
    val spark=SparkSession.builder()
      .master("local[*]")
      .appName("数据抽取")
      .config("hive.exec.dynamic.partition.mode","nonstrict")
      .config("spark.serializer","org.apache.spark.serializer.KryoSerializer")
      .config("spark.sql.extensions","org.apache.spark.sql.hudi.HoodieSparkSessionExtension")
      //  第三题写入时间类型需要加下面两条配置
      //  使用 LEGACY 模式：设置 spark.sql.legacy.avro.datetimeRebaseModeInWrite 为 LEGACY。这样可以在写入 Avro 文件时，进行日期时间
      //  重基，以保持与旧版本 Spark 或 Hive 的兼容性。这是为了最大程度地保证与旧系统的互操作性。
      .config("spark.sql.legacy.avro.datetimeRebaseModeInWrite", "LEGACY") //设置重基准模式
      .config("spark.sql.legacy.avro.datetimeRebaseModeInRead", "LEGACY")
      .enableHiveSupport()
      .getOrCreate()

    val connect=new Properties()
    connect.setProperty("user","root")
    connect.setProperty("password","123456")
    connect.setProperty("driver","com.mysql.jdbc.Driver")

    val day:Calendar=Calendar.getInstance()
    day.add(Calendar.DATE,-1)
    val yesterday=new SimpleDateFormat("yyyyMMdd").format(day.getTime)

    //  创建不需要做任何改动将数据抽取到hive表的方法,第一题也没有时间类型的数据需要更改时间类型的
    def to_hive01(mysql_name:String,hive_name:String,primarykey:String,precombinefield:String):Unit={
      val hdfs_path=s"hdfs://192.168.40.110:9000/user/hive/warehouse/hudi_gy_ods10.db/${hive_name}"
      spark.read.jdbc("jdbc:mysql://192.168.40.110:3306/shtd_industry?useSSL=false",mysql_name,connect)
        .withColumn("etldate",lit(yesterday))
        .write.mode("append")
        .format("hudi")
        .options(getQuickstartWriteConfigs)
        .option(RECORDKEY_FIELD.key(),primarykey)
        .option(PRECOMBINE_FIELD.key(),precombinefield)
        .option(PARTITIONPATH_FIELD.key(),"etldate")
        .option("hoodie.table.name",hive_name)
        .save(hdfs_path)
    }

    //  第二题
    def to_hive02(mysql_name: String, hive_name: String, primarykey: String, precombinefield: String): Unit = {
      val hdfs_path = s"hdfs://192.168.40.110:9000/user/hive/warehouse/hudi_gy_ods10.db/${hive_name}"
      spark.read.jdbc("jdbc:mysql://192.168.40.110:3306/shtd_industry?useSSL=false", mysql_name, connect)
        .withColumn("ChangeStartTime",date_format(col("ChangeStartTime"),"yyyy-MM-dd HH:mm:ss"))
        .withColumn("ChangeEndTime",date_format(col("ChangeEndTime"),"yyyy-MM-dd HH:mm:ss"))
        .withColumn("etldate", lit(yesterday))
//        .show
        .write.mode("append")
        .format("hudi")
        .options(getQuickstartWriteConfigs)
        .option(RECORDKEY_FIELD.key(), primarykey)
        .option(PRECOMBINE_FIELD.key(), precombinefield)
        .option(PARTITIONPATH_FIELD.key(), "etldate")
        .option("hoodie.table.name", hive_name)
        .save(hdfs_path)
    }

    //  第三题
    def to_hive03(mysql_name: String, hive_name: String, primarykey: String, precombinefield: String): Unit = {
      val hdfs_path = s"hdfs://192.168.40.110:9000/user/hive/warehouse/hudi_gy_ods10.db/${hive_name}"
      spark.read.jdbc("jdbc:mysql://192.168.40.110:3306/shtd_industry?useSSL=false", mysql_name, connect)
        .withColumn("MachineAddDate",date_format(col("MachineAddDate"),"yyyy-MM-dd HH:mm:ss"))
        .withColumn("etldate", lit(yesterday))
        .write.mode("append")
        .format("hudi")
        .options(getQuickstartWriteConfigs)
        .option(RECORDKEY_FIELD.key(), primarykey)
        .option(PRECOMBINE_FIELD.key(), precombinefield)
        .option(PARTITIONPATH_FIELD.key(), "etldate")
        .option("hoodie.table.name", hive_name)
        .save(hdfs_path)
    }



    //  定义需要剔除字段再抽入进去hive的方法
    def to_hive04(mysql_name: String, hive_name: String, primarykey: String, precombinefield: String,drop_name:String): Unit = {
      val hdfs_path = s"hdfs://192.168.40.110:9000/user/hive/warehouse/hudi_gy_ods10.db/${hive_name}"
      spark.read.jdbc("jdbc:mysql://192.168.40.110:3306/shtd_industry?useSSL=false", mysql_name, connect)
        .withColumn("ProduceStartWaitTime",date_format(col("ProduceStartWaitTime"),"yyyy-MM-dd HH:mm:ss"))
        .withColumn("ProduceCodeStartTime",date_format(col("ProduceCodeStartTime"),"yyyy-MM-dd HH:mm:ss"))
        .withColumn("ProduceCodeEndTime",date_format(col("ProduceCodeEndTime"),"yyyy-MM-dd HH:mm:ss"))
        .withColumn("ProduceEndTime",date_format(col("ProduceEndTime"),"yyyy-MM-dd HH:mm:ss"))
        .withColumn("etldate", lit(yesterday))
        .write.mode("append")
        .format("hudi")
        .options(getQuickstartWriteConfigs)
        .option(RECORDKEY_FIELD.key(), primarykey)
        .option(PRECOMBINE_FIELD.key(), precombinefield)
        .option(PARTITIONPATH_FIELD.key(), "etldate")
        .option("hoodie.table.name", hive_name)
        .save(hdfs_path)
    }

    //  第五题
    def to_hive05(mysql_name: String, hive_name: String, primarykey: String, precombinefield: String): Unit = {
      val hdfs_path = s"hdfs://192.168.40.110:9000/user/hive/warehouse/hudi_gy_ods10.db/${hive_name}"
      spark.read.jdbc("jdbc:mysql://192.168.40.110:3306/shtd_industry?useSSL=false", mysql_name, connect)
        .withColumn("MachineRecordDate", date_format(col("MachineRecordDate"), "yyyy-MM-dd HH:mm:ss"))
        .withColumn("etldate", lit(yesterday))
        .write.mode("append")
        .format("hudi")
        .options(getQuickstartWriteConfigs)
        .option(RECORDKEY_FIELD.key(), primarykey)
        .option(PRECOMBINE_FIELD.key(), precombinefield)
        .option(PARTITIONPATH_FIELD.key(), "etldate")
        .option("hoodie.table.name", hive_name)
        .save(hdfs_path)
    }



//    to_hive01("EnvironmentData","environmentdata","EnvoId","InPutTime")
//      to_hive02("ChangeRecord","changerecord","ChangeID,ChangeMachineID","ChangeEndTime")
//    to_hive03("BaseMachine","basemachine","BaseMachineID","MachineAddDate")
//      to_hive04("ProduceRecord","producerecord","ProduceRecordID,ProduceMachineID","ProduceCodeEndTime","ProducePrgCode")
      to_hive05("MachineData","machinedata","MachineRecordID","MachineRecordDate")




    spark.close()
  }

}
