#!/bin/bash
source config/config.sh

# TODO::抄一下我在公司的启动脚本，host怎么设的在本地启动


ps_num=$1
worker_num=$2

# workers="127.0.0.0:100,127.0.0.0:101,127.0.0.0:102,127.0.0.0:103"
# ps_hosts="127.0.0.0:1,127.0.0.0:2"

ip="localhost"
worker_start_index=100
ps_start_index=1

workers=""
for ((i=0; i<worker_num; i++))
do
workers=$workers${ip}:$((worker_start_index+i)),
done
workers=${workers:0:$((${#workers}-1))}
echo workers----------${workers}


ps_hosts=""
for ((i=0; i<ps_num; i++))
do
ps_hosts=$ps_hosts${ip}:$((ps_start_index+i)),
done
ps_hosts=${ps_hosts:0:$((${#ps_hosts}-1))}
echo ps_hosts----------${ps_hosts}






function launch_task(){
# task_type, index, workers, ps_hosts
task_type=$1
index=$2
workers=$3
ps_hosts=$4

# 设置TF_CONFIG环境变量，用于tf分布训练
# source scripts/setTFConfig.sh ${ps_hosts} ${workers} ${task_type} ${index}

# launch task
# TODO::jemalloc的so在modelzoo镜像的/root/modelzoo  目录下，copy过来用就行
export ps_hosts=${ps_hosts}
export worker_hosts=${workers}
export type=${task_type}
export index=${index}
# python launch.py --output_dir=./result --protocol=star_server --inter=8 --intra=8 --input_layer_partitioner=8 --dense_layer_partitioner=16 --batch_size=128
nohup python launch.py --output_dir=./result --protocol=star_server --inter=8 --intra=8 --input_layer_partitioner=8 --dense_layer_partitioner=16 --batch_size=${batch_size} > ${task_type}_${index}.log 2>&1 &
# LD_PRELOAD=./lib/libjemalloc.so.2.5.1 python train.py

echo "-------------------------"

}




function launch_ps(){
ps_num=$1
for ((i=0; i<ps_num; i++))
do
launch_task "ps" $i ${workers} ${ps_hosts}
done
}

function launch_worker(){
num=$1
for ((i=0; i<num; i++))
do
launch_task "worker" $i ${workers} ${ps_hosts}
done
}





ps -ef|grep ${TF_SCRIPT} | grep -v grep | awk '{print $2}' | xargs kill -9


# launch_task "chief" 0 ${workers} ${ps_hosts}
# launch_task "worker" 0 ${workers} ${ps_hosts}
launch_ps ${ps_num}
launch_worker ${worker_num}
