SPARK_HOME=/app/spark/spark2.2
if [ -z "${SPARK_HOME}" ]; then
    echo "SPARK_HOME environment variable not defined."
    exit 1
fi
if [ $# -lt 1 ]; then
 echo "------------------------------"
 echo "USAGE: must with main class.eg . com.roy.sparkDemos.streaming.JavaKafkaWordCount"
 echo "------------------------------"
 exit 8
fi
MAIN_NAME="sparkDemos"
MAIN_CLASS=$1

CLASS_PARAMS=$2" "$3" "$4" "$5
echo ${CLASS_PARAMS}

PROJECT=sparkDemos
cd `dirname $0`
#MAIN_JAR="./lib/$PROJECT-*.jar"
MAIN_JAR="./sparkDemos.jar"
PATH_LIB=./lib
JARS=`ls $PATH_LIB/*.jar | head -1`
for jar in `ls $PATH_LIB/*.jar | grep -v $PROJECT | grep -v $JARS`
do
  JARS="$JARS,""$jar"
done
appId=`yarn --config ${SPARK_HOME}/hconf application -list | grep $MAIN_NAME | awk '{print $1}'`
for id in $appId
do
   echo "kill app "$id
   yarn --config ${SPARK_HOME}/hconf application -kill $id
done
nohup ${SPARK_HOME}/bin/spark-submit \
--name $MAIN_NAME \
--class $MAIN_CLASS \
--master yarn     \
--driver-java-options "-Dappname=$MAIN_NAME" \
--conf "spark.executor.extraJavaOptions=-Dappname=$MAIN_NAME"   \
--conf "spark.cleaner.ttl=3600"   \
--conf "spark.streaming.receiver.maxRate=1000"  \
--conf "spark.streaming.backpressure.enabled=true"  \
--conf "spark.sql.shuffle.partitions=4"   \
--conf "spark.streaming.concurrentJobs=4"   \
--conf "spark.default.parallelism=144"   \
--conf "spark.yarn.driver.memoryOverhead=1024"   \
--conf "spark.yarn.executor.memoryOverhead=2048"   \
--conf "spark.serializer=org.apache.spark.serializer.KryoSerializer"   \
--conf "spark.streaming.blockInterval=1250ms"   \
--conf "spark.scheduler.mode=FAIR"   \
--deploy-mode cluster     \
--driver-memory 2g     \
--num-executors 2 \
--executor-cores 2     \
--executor-memory 2g     \
--queue default     \
--jars $JARS    $MAIN_JAR $CLASS_PARAMS 1>sparkDemos.out 2>sparkDemos.err &
