package com.bigdata.lk

import org.apache.spark.sql.SparkSession

object CustomerExtraction {

  def main(args: Array[String]): Unit = {

    //TODO 由于均为全量抽取Mysql数据到Hive表中，其余模块均同
    val spark = SparkSession.builder()
      .appName("CustomerExtraction")
      //配置SparkHive
      .config("spark.sql.warehouse.dir","hdfs://master:9000/user/hive/warehouse")
      //开启Hive支持
      .enableHiveSupport()
      .getOrCreate()
    //数据库地址
    val url = "jdbc:mysql://192.168.23.51/shtd_store?useSSL=false"
    //读取数据库表
    val jdbcDF = spark.sqlContext.read.format("jdbc").options(
      Map(
        "url" -> url,
        "user" -> "root",
        "password" -> "123456",
        "dbtable" -> "CUSTOMER"
      )
    ).load()
    //建立临时表
    jdbcDF.createOrReplaceTempView("customer_mysql")
    //读取临时表数据
    val mysqlDF = spark.sqlContext.sql("select * from customer_mysql")
      .createOrReplaceTempView("customer_mysql_temp")
    //使用Hive库中的ods库
    spark.sqlContext.sql("use ods")
    //使用HiveSQL将数据插入分区表中
    spark.sqlContext.sql(
      """
        |insert into table customer_lk
        |partition(etldate='20230414')
        |select
        |*
        |from
        |customer_mysql_temp
        |""".stripMargin)



  }

}
