package com.leal.util

import org.apache.spark.sql.{DataFrame, SparkSession}
import org.apache.spark.{SparkConf, SparkContext}

import java.util

/**
 * @title: SparkUtil
 * @projectName bigdata
 * @description: Spark common utils
 * @author leali
 * @date 2022/5/14 17:09
 */
object SparkUtil {

  def printLog(info: String): Unit = {
    println(DateUtil.getCurrentTime() + " Deal " + info)
  }

  def initSpark(mode: String = "NODE_HIVE_MAC", master: String = "local[*]", appName: String = " Spark Test", enableHive: Boolean = false): SparkSession = {
    if (!enableHive) {
      initSimpleSparkSession(master, appName)
    }
    else {
      val envInfo: util.LinkedHashMap[String, String] = YamlUtil.getEnvInfo(mode)
      SparkSession
        .builder()
        .appName("hive example")
        // spark://node01:7077
        .master(master)
        // spark 2+
        .config("spark.sql.warehouse.dir", envInfo.get("warehouse"))
        .config("hive.metastore.uris", envInfo.get("metastore"))
        .config("spark.serializer", "org.apache.spark.serializer.KryoSerializer")
        .config("spark.shuffle.file.buffer", "64K")
        .config("spark.reducer.maxSizeInFlight", "96M")
        .config("spark.executor.heartbeatInterval", "30000ms")
        .enableHiveSupport()
        .getOrCreate()
    }
  }

  /**
   *  close spark session
   * @param spark SparkSession
   * @return
   */
    def closeSpark(spark: SparkSession): Unit = {
      if(spark != null){
        spark.stop()
        spark.close()
      }
  }


  def initSimpleSparkContent(appName: String): SparkContext = {
    new SparkContext(new SparkConf().setAppName(appName).setMaster("local[*]"))
  }

  private def initSimpleSparkSession(master: String = "local[*]", appName: String): SparkSession = {
    SparkSession
      .builder()
      .appName("hive example")
      // spark://node01:7077
      .master(master)
      .getOrCreate()
  }

  /**
   * get data frame by spark sql
   *
   * @param spark SparkSession
   * @param sql   sql
   * @return
   */
  def getDataFrameBySql(spark: SparkSession, sql: String): DataFrame = {
    val sqlList: Array[String] = sql.split(";")
    val size: Int = sqlList.length
    if (size > 1) {
      for (i <- 0 to size - 2) {
        spark.sql(sqlList(i))
      }
      // 最后一条sql 结果
      spark.sql(sqlList(size - 1))
    } else {
      spark.sql(sql)
    }
  }
}
