package study.sql

import org.apache.spark.SparkConf
import org.apache.spark.sql.{Dataset, Row, SparkSession}

/**
 * @author zh
 * @date 2023/7/18 15:45
 */
object TestSparkSql02 {
  def main(args: Array[String]): Unit = {
    val sparkConf = new SparkConf().setMaster("yarn-cluster")
      .set("spark.submit.deployMode", "cluster")
      .set("spark.yarn.queue", "dev")
      .set("spark.cleaner.referenceTracking", "true")
      .set("spark.yarn.jars", "/home/work/bin/spark/jars")
      .setSparkHome("/home/work/bin/spark")
      .set("spark.master", "spark://kylin-data00:7077")
       //                .set("spark.driver.host","kylin-data00")
    //                .set("hive.metastore.uris", "thrift://127.0.0.1:9083")
      .set ("spark.sql.warehouse.dir", "hdfs://kylin-data00:50070/user/hive/warehouse").setAppName("TestSQL01") //                .set("spark.driver.cores","6")  //设置driver的CPU核数
    //                .set("spark.driver.maxResultSize","16g") //设置driver端结果存放的最大容量，这里设置成为20G，超过20G的数据,job就直接放弃，不运行了
    //                .set("spark.driver.memory","12g")  //driver给的内存大小
    //                .set("spark.executor.memory","12g")// 每个executor的内存;
    //                .set("spark.worker.timeout" ,"500") //基于standAlone模式下提交任务，worker的连接超时时间
    //                .set("spark.cores.max" , "10")  //基于standAlone和mesos模式下部署，最大的CPU和数量
    //                .set("spark.driver.host", "localhost")  //配置driver地址
    //                .set("spark.dynamicAllocation.enabled","true")
    //                .set("spark.shuffle.service.enabled","true")
    //                .set("spark.dynamicAllocation.executorIdleTimeout","120s")  //executor空闲时间超过这个值，该executor就会被回收
    //                .set("spark.dynamicAllocation.minExecutors","0")  //最少的executor个数
    //                .set("spark.dynamicAllocation.maxExecutors","32")  //最大的executor个数  根据自己实际情况调整
    //                .set("spark.dynamicAllocation.initialExecutors","4")//初始executor个数
    //                .set("spark.dynamicAllocation.schedulerBacklogTimeout","5s")  //pending 状态的task时间，过了这个时间继续pending ，申请新的executor
    //2. 获取sparkSession
    val spark: SparkSession = SparkSession.builder
      .master("spark://kylin-data00:7077")
      .enableHiveSupport
      .config(sparkConf).getOrCreate
    val dataset: Dataset[Row] = spark.sql("select * from app_40005.ODS_KYLIN_DICT_CSV_BRAND limit 10")
    dataset.show()
  }
}
