#!/bin/bash
#
# Usage:
# cd slim
# ./slim/scripts/run_pretrain.sh
set -e
# --------------Model-------------
MODEL_NAME=resnet_v2_50

# --------------pre-trained checkpoint save path-------------
PRETRAINED_CHECKPOINT_DIR=/home/ramsley/workspace/pretrained_ckpt/${MODEL_NAME}

# --------------Traning statistic save path-------------
TRAIN_DIR=/home/ramsley/workspace/prostate/ckpt4test

# --------------Testing statistic save path-------------
eval_log_dir=/home/ramsley/workspace/prostate/eval

# --------------TRANING DATA-------------
# DATASET_DIR=/home/ramsley/DataSet/prostate/trainingData/level-1-balance/patch-based-classification/tf-records
DATASET_DIR=/home/ramsley/DataSet/prostate/1-3/trainingData/patch-based-classification/tf-records
# DATASET_DIR=/home/ramsley/DataSet/prostate/single/
# 
# --------------TESTING DATA-------------
# TEST_DATASET_DIR=/home/ramsley/DataSet/prostate/testingData/level-1-balance/patch-based-classification/tf-records
TEST_DATASET_DIR=/home/ramsley/DataSet/prostate/1-3/testingData/patch-based-classification/tf-records/

cd ..
# -------------- Traning -------------
# max_number_of_epochs：跑多少个epoch
# optimizer：优化器，Adam效果比较好，收敛快且稳定
# learning_rate：用pretrain model的情况下至少为0.001或更低，非预训练的话上限放宽至0.1；
# 				若用Adam可以把上限提高到0.1，对于Adam，稍大的学习率可能能取得更好的效果
# num_clones：使用的GPU数量
# learning_rate_decay_factor：对于一般SGD，每次衰减学习率的50%；对于Adam，此参数形同虚设
# learning_rate_decay_type：一般就取这个指数式下降法
# num_epochs_per_decay：对于一般SGD，每次衰减学习率的epoch周期；对于Adam，此参数形同虚设（貌似）
# weight_decay：正则系数
# num_examples：参与测试的总样本数，需要提前指定；当count_num_examples=True，统计的数会覆盖num_examples的值
# count_num_examples：是否统计测试集样本总数
# checkpoint_exclude_scope：选择性不加载ckpt的某些参数
# trainable_scopes：参与训练的参数；例：--trainable_scopes=resnet_v2_50/logits,resnet_v2_50/block3,resnet_v2_50/block4
# 					若不指定则全部参数均得到更新
# python train_image_classifier.py \
# 	--train_dir=${TRAIN_DIR}/${MODEL_NAME} \
# 	--dataset_dir=${DATASET_DIR} \
# 	--model_name=${MODEL_NAME} \
# 	--max_number_of_epochs=9 \
# 	--batch_size=64 \
# 	--optimizer=adam \
# 	--learning_rate=0.1 \
# 	--num_clones=4 \
# 	--learning_rate_decay_factor=0.5 \
# 	--learning_rate_decay_type=exponential \
# 	--num_epochs_per_decay=3.0 \
# 	--weight_decay=0.00004 \
# 	--num_examples=389915 \
# 	--count_num_examples=False \
# 	--checkpoint_path=${PRETRAINED_CHECKPOINT_DIR}/${MODEL_NAME}.ckpt \
# 	--checkpoint_exclude_scopes=${MODEL_NAME}/logits

# --------------total testset evaluation-------------
# train_log_path：train_log路径，需要读取
# is_train_data：train/validation
# eval_log_dir：写eval_log_dir的路径
# num_examples：参与测试的总样本数，需要提前指定；当count_num_examples=True，统计的数会覆盖num_examples的值
# count_num_examples：是否统计测试集样本总数
# python eval_image_classifier.py \
# 	--train_log_path=${TRAIN_DIR}/${MODEL_NAME}/train_log.txt \
# 	--checkpoint_dir=${TRAIN_DIR}/${MODEL_NAME} \
# 	--dataset_dir=${TEST_DATASET_DIR} \
# 	--is_train_data=validation \
# 	--eval_log_dir=${TRAIN_DIR}/${MODEL_NAME}/eval \
# 	--batch_size=12 \
# 	--num_examples=146188 \
# 	--count_num_examples=False \
# 	--model_name=${MODEL_NAME} 


# --------------one-slide evaluation-------------
# train_log_path：train_log路径，需要读取
# is_train_data：train/validation
# eval_log_dir：写eval_log_dir的路径
# write_eval_log：是否记录测试日志
# single_test_list：需要测试的单例；形式为A,B,...；
python single_eval_image_classifier.py \
	--train_log_path=${TRAIN_DIR}/${MODEL_NAME}/train_log.txt \
	--checkpoint_dir=${TRAIN_DIR}/${MODEL_NAME} \
	--dataset_dir=${TEST_DATASET_DIR} \
	--is_train_data=validation \
	--eval_log_dir=${TRAIN_DIR}/${MODEL_NAME}/eval \
	--write_eval_log=True \
	--batch_size=256 \
	--single_test_list=None \
	--model_name=${MODEL_NAME} 