#!/bin/bash
export HADOOP_USER_NAME=hdfs
/usr/hdp/current/spark2-client/bin/spark-submit --class com.cetc.sdp.kmga.cs.stream.nv.AuditStreamDriver --master yarn --deploy-mode cluster \
 --executor-memory 4g --executor-cores 8 --num-executors 4 \
 --conf spark.streaming.backpressure.enabled=true \
 --conf spark.streaming.kafka.maxRatePerPartition=250 \
 --conf "spark.executor.extraJavaOptions=-XX:+UseConcMarkSweepGC" \
 --conf spark.yarn.maxAppAttempts=4 \
 --conf spark.locality.wait=100ms \
 --conf spark.yarn.am.attemptFailuresValidityInterval=30m \
 --conf spark.yarn.max.executor.failures=16 \
 --conf spark.yarn.executor.failuresValidityInterval=30m \
 --conf spark.task.maxFailures=8 \
 --conf spark.streaming.stopGracefullyOnShutdown=true \
 --conf spark.yarn.submit.waitAppCompletion=false \
 --queue SparkStreaming \
/home/bbp/collection_service-1.0.jar AuditStreaming 30 60
