package com.shujia.city.dwd

import org.apache.spark.internal.Logging
import org.apache.spark.sql.{DataFrame, SaveMode, SparkSession}

abstract class SparkTool extends Logging {

  /**
   * 在main函数中编写通用的代码逻辑
   */
  def main(args: Array[String]): Unit = {
    logInfo("spark任务已启动")

    if (args.length == 0) {
      logError("分区参数没有指定")
      return
    }
    //分区参数
    val ds: String = args.head

    val spark: SparkSession = SparkSession
      .builder()
      .appName(this.getClass.getSimpleName.replace("$", ""))
      .enableHiveSupport()
      .getOrCreate()

    //调用子类实现的run方法
    this.run(spark, ds)
    logInfo("spark任务执行完成")
  }

  /**
   * 在run函数中写数据处理的逻辑
   * import spark.implicits._
   * import org.apache.spark.sql.functions._
   */
  def run(spark: SparkSession, ds: String): Unit


  /**
   * 用于将数据保存围殴csv格式方法
   */
  def writeToCsv(df: DataFrame, path: String, sep: String = "\t", saveMode: SaveMode = SaveMode.Overwrite): Unit = {
    df
      .write
      .format("csv") //保存为csv格式如果数据中由空，会保存一个双引号
      .mode(saveMode)
      .option("sep", sep)
      .save(path)
  }


  /**
   * 增加分区
   */
  def addPartition(spark: SparkSession, tableName: String, ds: String): Unit = {
    spark.sql(
      s"""
         |alter table  $tableName add if not exists partition(ds='$ds')
         |""".stripMargin)
  }


  /**
   * 使用hve原生方式保存数据
   */
  def saveToTable(spark: SparkSession, df: DataFrame, tableName: String, ds: String): Unit = {
    df.createOrReplaceTempView("tmp")
    spark.sql(
      s"""
         |insert overwrite table $tableName partition(ds='$ds')
         |select * from
         |tmp
         |""".stripMargin)
  }


}
