package com.example

import org.apache.spark.SparkConf
import org.apache.spark.sql.SparkSession

import java.net.InetAddress

object Server {
  def main(args: Array[String]): Unit = {
    System.setProperty("HADOOP_HOME", "D:\\software\\hadoop-3.2.2")
    System.setProperty("hadoop.home.dir", "D:\\software\\hadoop-3.2.2")
    System.setProperty("SPARK_HOME", "D:\\software\\spark-3.1.2-bin-hadoop3.2")

    val conf: SparkConf = new SparkConf()
    // 公用配置
    conf.set("spark.kryoserializer.buffer.max", "1024m")
    conf.set("spark.dynamicAllocation.enabled", "true")
    conf.set("spark.dynamicAllocation.maxExecutors", "30")
    conf.set("spark.shuffle.service.enabled", "true")
    conf.set("spark.sql.shuffle.partitions", "100")
    conf.set("spark.sql.adaptive.join.enabled", "true")
    conf.set("spark.sql.autoBroadcastJoinThreshold", "20971520")
    conf.set("spark.sql.auto.repartition", "true")
    conf.set("hive.exec.dynamic.partition.mode", "nonstrict")
    conf.set("hive.exec.dynamic.partition", "true")
    conf.set("hive.exec.max.dynamic.partitions", "3000")
    conf.set("spark.debug.maxToStringFields", "100")

    // 测试环境专用配置
    conf.setMaster("spark://172.28.40.172:7077")
    conf.set("spark.driver.host", InetAddress.getLocalHost.getHostAddress)
    conf.set("spark.hadoop.fs.defaultFS", "hdfs://172.28.40.170:9000")
    conf.set("spark.hive.metastore.uris", "thrift://172.28.40.172:9083")


    val spark: SparkSession =
      SparkSession
        .builder()
        .config(conf)
        .enableHiveSupport()
        .getOrCreate()

    spark.sql("show databases").show()
    spark.sql("select label_name, count(1) from userprofile.customer_label group by label_name").show()
  }
}
