'''
我们选择分别训练简单策略网络、复杂策略网络和价值网络。
简单网络仅使用现成的对局来进行训练。
复杂网络先使用现成的对局来训练，之后使用相互对弈来训练。
价值网络的训练和复杂网络采用同样的策略。
'''
import sys
sys.path.append('../')
from utility.hdf5 import HDF5
from utility.keras_modal import DenseModel



if __name__ == "__main__":
    if len(sys.argv) !=2:
        print("err,no arguments")
    train_files='./train_files/game_recorders.h5'
    ai_weights_s="./weights/ai_weights_s.h5"
    ai_weights_c="./weights/ai_weights_c.h5"
    ai_weights_v="./weights/ai_weights_v.h5"
    games=HDF5(train_files,mode='r')
    if sys.argv[1]=='simple':
        model=DenseModel(boardSize=9,dataSize=1024*10,model='alpha_simple',isAlphaGo=True,dataG_s=games.yeilds_alphago_data)
        model.p_d_compile()
        model.model_fit(batch_size=128,epochs=1,earlystop=0,checkpoint=False)
        model.model_save_weights(ai_weights_s)
    elif sys.argv[1]=='c_p':    #complicate policy
        model=DenseModel(boardSize=9,dataSize=1024*10,model='alpha_complicate',isAlphaGo=True,dataG_c=games.yeilds_alphago_data)
        model.p_d_compile()
        model.model_fit(batch_size=128,epochs=1,earlystop=0,checkpoint=False)
        model.model_save_weights(ai_weights_c)
    elif sys.argv[1]=='c_v':    #complicate evaluate
        model=DenseModel(boardSize=9,dataSize=1024*10,model='alpha_value',isAlphaGo=True,dataG_v=games.yeilds_alphago_data)
        model.compile_alpha_go_value()
        model.model_fit(batch_size=128,epochs=1,earlystop=0,checkpoint=False)
        model.model_save_weights(ai_weights_v)

    else:
        print("err,arguments err")
