package com.ctbri.manage.quality.scala.original
import org.apache.spark.sql._
import org.apache.spark.sql.types._
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
 * @author songyunlong
 * @createTime 2023/6/4 22:58
 * @description
 */
trait BasicOp {
    def envSet(appName:String, masterSet:String, logLevel:String): SparkSession={
        val spark = SparkSession.builder.appName(appName)
            .master(masterSet)
            .getOrCreate
        spark.sparkContext.setLogLevel(logLevel)
        spark
    }
    def envSetHive(appName:String, masterSet:String, logLevel:String): SparkSession={
        val spark = SparkSession.builder.appName(appName)
            .enableHiveSupport.getOrCreate
        spark.sparkContext.setLogLevel(logLevel)
        spark
    }
    def getDataFromExcel(path:String, spark:SparkSession):Dataset[Row]={
        spark.read
            .format("com.crealytics.spark.excel")
            .option("header", "true")
            .option("treatEmptyValuesAsNulls", "true")
            // 自动推断schema
            .option("inferSchema", "true")
            .load(path);
    }

    def getDataFromExcel(path: String, spark: SparkSession, structType:StructType): Dataset[Row] = {
        spark.read
            .format("com.crealytics.spark.excel")
            .option("header", "true")
            .option("treatEmptyValuesAsNulls", "true")
            .schema(structType)
            .load(path);
    }
    def getDataFromJdbc(spark:SparkSession,
                         url:String, driver:String, user:String, password:String,
                         dbtable:String):Dataset[Row]={
        spark.read.format("jdbc")
            .option("url", url)
            .option("driver", driver)
            .option("user", user)
            .option("password", password)
            .option("dbtable", dbtable)
            .load;
    }
    val logger: Logger = LoggerFactory.getLogger(this.getClass)
    def getDataFromHive(spark:SparkSession, databaseName:String, tableName:String):Dataset[Row]={
        val sql = "select * from " + databaseName + "." + tableName;
        this.logger.warn("SqlText is " + sql);
        spark.sql(sql);
    }

    def getDataTotalRowNum(data: Dataset[Row]): Long = data.count
    def getDataTotalColNum(data: Dataset[Row]): Long = data.columns.length

    /**
     * 存储稽查结果
     * @param data
     * @param path: 注意一定要用绝对路径！！！且一定要单独建立文件夹！！！
     * @param fileStyle
     */
    def resultSave(data: Dataset[Row], path: String, fileStyle: String): Unit={
        data.repartition(1)
            .write
            .option("header", "true")
            .mode("overwrite")
            .format(fileStyle)
            .save(path)
    }
}
