package trace;

import org.apache.spark.SparkConf;
import org.apache.spark.sql.SparkSession;
import org.dom4j.DocumentException;
import trace.config.SparkSaConfig;

public class databases {
    public static SparkSession getSpark() throws DocumentException {
        String cfgfile = "call-config.xml";
        String clusterName = "beh001";
        SparkSaConfig config = new SparkSaConfig(cfgfile);

        System.setProperty("user.name", "hdfs");
        System.setProperty("HADOOP_USER_NAME", config.hdfsUser);
        SparkConf conf = new SparkConf()
                .set("spark.serializer", "org.apache.spark.serializer.KryoSerializer")
                .set("spark.rdd.comperess", "true")
//                .setMaster(master)      // 本地运行
                .setMaster("yarn") // 集群运行
                .setAppName(config.name)
                .registerKryoClasses(new Class[]{String.class});
        SparkSession spark = SparkSession.builder()
                .config(conf)
                .config("spark.serializer", "org.apache.spark.serializer.KryoSerializer")
                .config("spark.rdd.comperess", "true")
                .appName(config.name)
                .config("mapreduce.fileoutputcommitter.marksuccessfuljobs", "false")
                .config("fs.defaultFS", "hdfs://" + clusterName)
                .config("dfs.nameservices", clusterName)
                .config("dfs.ha.namenodes." + clusterName, "nn1,nn2")
                .config("dfs.namenode.rpc-address." + clusterName + ".nn1", "132.90.131.17:9000")
                .config("dfs.namenode.rpc-address." + clusterName + ".nn2", "132.90.131.19:9000")
                .config("dfs.client.failover.proxy.provider." + clusterName, "org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider")
                // 根据需要添加更多配置...
                .getOrCreate();
        return spark;
    }
}
