package com.edata.bigdata.util

import org.apache.spark.sql.SparkSession


trait EDataSparkSession extends Serializable {

  var SESSION: SparkSession
  var HDFS_ENTRYPOINT = "localhost:8082,localhost:8082"
  var S3_ENTRYPOINT = "localhost:9000"
  var S3_USER = "admin"
  var S3_PASSWORD = "12345678"
  var S3_BUCKET = "test"
  var JDBC_PREFIX = "jdbc:postgresql://"
  var JDBC_IP = "localhost"
  var JDBC_PORT = "5432"
  var JDBC_DATABASE = "5432"
  var JDBC_USER = "devuser"
  var JDBC_PASSWORD = "admin123"
  var JDBC_BATCHPARAMS = "rewriteBatchedStatements=true"
  var MG_IP = "172.168.36.159"
  var MG_PORT = "27017"
  var MG_DATABASE = "edata_test"
  var MG_COLLECTION = "edata_test"
  var MG_USER = ""
  var MG_PASSWORD = ""
  var KF_PRO_BOOTSTRAP = "127.0.0.1:9092"
  var KF_PRO_TOPIC = "spark_kafka"
  var KF_CSM_BOOTSTRAP = "127.0.0.1:9092"
  var KF_CSM_GROUP_ID = "direct"
  var KF_CSM_TOPIC = "spark_kafka"
  var NEBULA_CONN_META_ADDRESS="127.0.0.1:9559"
  var NEBULA_CONN_GRAPH_ADDRESS="127.0.0.1:9669"
  var NEBULA_CONN_GRAPH_USER="root"
  var NEBULA_CONN_GRAPH_PASSWORD="123"
  var NEBULA_CONN_MAX_CONN_SIZE="1"
  var NEBULA_CONN_TIMEOUT="30"
  var NEBULA_CONN_ENABLE_SSL="false"


}
