

# 第一步创建spark-context
# curl -i -d "" 'http://10.5.24.18:8090/contexts/datascience-context?num-cpu-cores=5&memory-per-node=512M&spark.sql.crossJoin.enabled=true&context-factory=spark.jobserver.context.SessionContextFactory'
# curl -i -d "" 'http://10.5.24.18:8090/contexts/datascience-context?spark.executor.instances=25&spark.executor.memory=2g&spark.executor.cores=2&spark.sql.crossJoin.enabled=true&context-factory=spark.jobserver.context.SessionContextFactory'
# curl -i -d "" 'http://10.5.24.18:8090/contexts/datascience-context?spark.executor.instances=50&spark.executor.memory=2g&spark.executor.cores=2&spark.driver.memory=4g&spark.driver.maxResultSize=16g&spark.dirver.cores=16&spark.sql.crossJoin.enabled=true&context-factory=spark.jobserver.context.SessionContextFactory'
# 第二步上传jar包
mvn assembly:assembly package
#curl -X POST  http://10.5.24.18:8090/binaries/datascience-algo-1.0.0-SNAPSHOT.jar -H "Content-Type: application/java-archive" --data-binary @/Users/wangyizhong/code/aiworks/datascience-algo/target/datascience-algo-1.0.0-SNAPSHOT-jar-with-dependencies.jar
# 第三步执行算法提交，提交指定spark-context
#curl -d  'input.string=k-Means -s pipeline.wyz_test_1222_samll -k 3 -f "avg_ttime,stdv_ttime,geom_ttime,geos_ttime" -m 20 -t pipeline.solid_kmeans_12221 -uk 57936785531600 -idcol _record_id_ -id 5568' "http://10.5.24.18:8090/jobs?appName=datascience-algo-1.0.0-SNAPSHOT.jar&classPath=org.zjvis.datascience.spark.AlgorithmAdaptor&context=datascience-context&sync=true&timeout=100000"
# 删除包
# curl -X DELETE 10.5.24.18:8090/binaries/datascience-algo-1.0.0-SNAPSHOT.jar

function help() {

	echo  """usage: sh cmd.sh tableName times
		tableName: 测试表名
		times: 执行次数
	"""
	exit
}

if [[ $# -ne 2 ]];then

	help
fi

# pipeline.wyz_test_1222_samll_2k
# dataset.v_53_1611890301071_m 


# prod
# dataset.p_103_1618383341324_m
# dataset.b_103_1617155111413_m

# 表名
tableName=$1
# 测试次数
times=$2

HOSTNAME=10.5.24.18:8090
CONTEXT=datascience-context0


function getTimeStamp() {
	timeStamp=`date +%s`
	echo $timeStamp
}

start=`getTimeStamp`
for((i=0;i<times;++i)) {
	begin=`getTimeStamp`
	echo "#${i} start=${begin}"
	#curl -d  'input.string=k-Means -s '${tableName}' -k 3 -f "avg_ttime,stdv_ttime,geom_ttime,geos_ttime" -m 20 -t pipeline.solid_kmeans_2104141_t_'${i}' -uk 57936785531600 -idcol _record_id_ -id 5568' "http://${HOSTNAME}/jobs?appName=datascience-algo-1.0.0-SNAPSHOT.jar&classPath=org.zjvis.datascience.spark.AlgorithmAdaptor&context=datascience-context0&sync=true&timeout=100000"
	#curl -d  'input.string=pca -s '${tableName}' -k 2 -f "avg_ttime,stdv_ttime,geom_ttime,geos_ttime" -t pipeline.solid_pca_210414_t_'${i}' -uk 57936785531600 -idcol _record_id_ -id 5567' "http://${HOSTNAME}/jobs?appName=datascience-algo-1.0.0-SNAPSHOT.jar&classPath=org.zjvis.datascience.spark.AlgorithmAdaptor&context=datascience-context0&sync=true&timeout=100000"
	#curl -d  'input.string=statistics-anomaly -s '${tableName}' -f geos_ttime -t pipeline.statistics_anomaly_210414_t_'${i}' -uk 57936785531600 -idcol _record_id_ -id 5567' "http://${HOSTNAME}/jobs?appName=datascience-algo-1.0.0-SNAPSHOT.jar&classPath=org.zjvis.datascience.spark.AlgorithmAdaptor&context=datascience-context0&sync=true&timeout=100000"
	#curl -d  'input.string=logistic -s '${tableName}' -f "avg_ttime,stdv_ttime,geom_ttime,geos_ttime" -m 20 -t pipeline.logistic_2104141_t_'${i}' -uk 57936785531600 -idcol _record_id_ -label month -enet 0.5 -tol 0.00001 -id 5567' "http://${HOSTNAME}/jobs?appName=datascience-algo-1.0.0-SNAPSHOT.jar&classPath=org.zjvis.datascience.spark.AlgorithmAdaptor&context=datascience-context0&sync=true&timeout=100000"
	#curl -d  'input.string=linear -s '${tableName}' -f "avg_ttime,stdv_ttime,geom_ttime,geos_ttime" -m 20 -t pipeline.linear_1222_t_'${i}' -uk 57936785531600 -idcol _record_id_ -label month -enet 0.5 -tol 0.00001 -id 5567' "http://${HOSTNAME}/jobs?appName=datascience-algo-1.0.0-SNAPSHOT.jar&classPath=org.zjvis.datascience.spark.AlgorithmAdaptor&context=datascience-context0&sync=true&timeout=100000"
	#curl -d  'input.string=fp-growth -s '${tableName}' -f "a1,a2,a3,a4,a5,a6" -t pipeline.fpgrowth_2104141_t_'${i}' -t2 pipeline.fpgrowth2_2104141_'${i}' -uk 57936785531600 -idcol _record_id_ -mins 0.3 -minc 0.6 -minf 3 -id 5567' "http://${HOSTNAME}/jobs?appName=datascience-algo-1.0.0-SNAPSHOT.jar&classPath=org.zjvis.datascience.spark.AlgorithmAdaptor&context=datascience-context0&sync=true&timeout=100000"
	curl -d  'input.string=prefix-span -s '${tableName}' -f "a1,a2,a3,a4,a5,a6" -t pipeline.prefix_span_210414_t_'${i}' -uk 57936785531600 -idcol _record_id_ -mins 0.3 -minf 3 -id 5567' "http://${HOSTNAME}/jobs?appName=datascience-algo-1.0.0-SNAPSHOT.jar&classPath=org.zjvis.datascience.spark.AlgorithmAdaptor&context=datascience-context0&sync=true&timeout=100000"
	#curl -d  'input.string=isolation-forest -s '${tableName}' -f "avg_ttime,stdv_ttime,geom_ttime,geos_ttime" -t pipeline.isolation_forest_210414_t_'${i}' -uk 57936785531600 -idcol _record_id_ -tn 100 -ms 20 -c 0.1 -md 20 -id 5567' "http://${HOSTNAME}/jobs?appName=datascience-algo-1.0.0-SNAPSHOT.jar&classPath=org.zjvis.datascience.spark.AlgorithmAdaptor&context=datascience-context0&sync=true&timeout=100000"
	end=`getTimeStamp`
	during=$((end-begin))
	echo "#${i} end=${end} during=${during}"
}

finalEnd=`getTimeStamp`
totalDuring=$((finalEnd-start))

avgTime=$((totalDuring/times))

echo "totalDuring=${totalDuring} avgTime=${avgTime}"




