package com.study.utils

import org.apache.spark.sql.functions.expr
import org.apache.spark.sql.types.StructType
import org.apache.spark.sql.{Column, DataFrame, Row, SparkSession, functions}

object SparkUtils {

  def sparkSessionBuild(): SparkSession = {

    SparkSession.builder()
      .config("spark.master","local[2]")
      .config("spark.serializer", "org.apache.spark.serializer.KryoSerializer")
      .config("spark.sql.orc.impl","native")
      .getOrCreate()
  }

//  def sparkSessionBuild(devEnv: Boolean = false): SparkSession = {
//    val builder = SparkSession.builder()
//      .config("spark.port.maxRetries", 30)
//      .config("spark.serializer", "org.apache.spark.serializer.KryoSerializer")
//      .config("spark.scheduler.mode", "FAIR")
//      .config("spark.sql.parquet.compression.codec", "snappy")
//      .config("spark.sql.files.ignoreCorruptFiles", "true")
//      .config("spark.sql.hive.convertMetastoreOrc", "true")
//      .config("spark.sql.hive.convertMetastoreParquet", "true")
//      .config("spark.sql.hive.convertInsertingPartitionedTable", "true")
//      .config("spark.rdd.compress", "true")
//      .enableHiveSupport()
//
//    if (devEnv)
//      builder.appName("spark-dev-demo")
//        .master("local[*]")
//
//    builder.getOrCreate()
//  }

  //df.groupBy(expr("length(word)")).count()
  /**
   * 将Array[String]包装为Array[Column]
   * @param exprs 字符串类型表达式
   * @return Column类型表达式
   */
  def generateColumn(exprs:Array[String]):Array[Column] = {
    preprocessStringArgArray(exprs)
    val columns = new Array[Column](exprs.length)
    for (i <- exprs.indices){
      columns(i) = functions.expr(exprs(i))
    }
    columns
  }

  /**
   * 将字符串拆分成数组
   * @param exprStr select字段
   * @return
   */
  def generateSelectExprs(exprStr:String):Array[String] = {
    exprStr.split(",", -1)
  }


  def generateExprCols(exprs:Array[String]): Array[Column] = {
    preprocessStringArgArray(exprs)
    exprs.map(expr)
  }


  /**
   * 字符串拆分时，特殊字符替换
   */
  def preprocessStringArgArray(args:Array[String]): Unit ={
    for (i <- args.indices) {
      args(i) = args(i).replaceAll("#COMMA",",")
    }
  }

  /**
   * 创建一个空数据的dataFrame
   * @param structType 表结果
   * @param spark 环境
   * @return
   */
  def dynamicCreateDateFrame(structType: StructType, spark: SparkSession): DataFrame = {
    spark.createDataFrame(spark.sparkContext.emptyRDD[Row],structType)
  }


}
