package cn.hnzycfc.spark.datagen.youtube.util

import cn.hnzycfc.spark.datagen.youtube.command.DataGenCli
import cn.hnzycfc.spark.datagen.youtube.entity.CommandEntity
import com.typesafe.config.{Config, ConfigFactory, ConfigValue}
import org.apache.log4j.Logger
import org.apache.spark.SparkConf
import org.apache.spark.sql.{DataFrame, SparkSession}

import java.util.Map.Entry

object SparkAppHelper {
    private val logger: Logger = Logger.getLogger(SparkAppHelper.getClass)
    private val config: Config = ConfigFactory.load("application.conf")

    def getSparkSession() = {
        val conf = new SparkConf

        // 检查是否加载到配置
        if(conf.getAll.size == 0) {
            // 加载所有默认配置
            // 配置是通过profile动态生成
            val set = config.entrySet()
            val sysProps = System.getProperties

            logger.info("加载应用默认配置:")
            set.forEach(entry => {
                if(!sysProps.containsKey(entry.getKey)) {
                    conf.set(entry.getKey, entry.getValue.render().replace("\"",""))
                    logger.info(s"${entry.getKey}, ${entry.getValue}")
                }
            })
        }

        SparkSession
            .builder()
            .config(conf)
            .getOrCreate()
    }

    def sinkData(df:DataFrame, cmdEntity:CommandEntity) = {
        if(cmdEntity.fileType.equals("orc")) {
            df.write.option("orc.compress", "snappy").orc(cmdEntity.outputLocation)
        }
        else if(cmdEntity.fileType.equals("csv")) {
            df.write.option("sep", "\001")
                .csv(cmdEntity.outputLocation)
        }
    }
}
