package hamster.csustef.warehouse.util

import java.io.File

import org.apache.spark.sql.SparkSession

import scala.io.{BufferedSource, Source}

/**
 * ods层数据清洗落地到dwd层
 */
object SparkSqlUtil {


  def executeSqlFile(filePath: String): Unit = {

    val session: SparkSession = SparkSession.builder().master("local[*]")
      .appName("SparkExecuteSqlFile")
      .enableHiveSupport() //支持hive
      .getOrCreate()
    session.sparkContext.setLogLevel("error")

    val sqls: Array[String] = readSqlFile(filePath)
    println("=================执行开始=================")
    for (sql <- sqls) {
      println("=================sql语句内容=================")
      println(sql + ";")
      println("=================sql语句输出=================")
      session.sql(sql.trim()).show()
    }
    println("=================执行结束=================")
    session.stop()
  }

  //读取外部sql文件文件
  def readSqlFile(filePath: String): Array[String] = {
    val file: File = new File(filePath)
    import java.io.FileInputStream
    val stream: FileInputStream = new FileInputStream(file)
    val buff: BufferedSource = Source.fromInputStream(stream)
    //读取拼装SQL
    val sql: String = buff.getLines().mkString("\n")
    sql.split("\\;")
  }


}
