#encoding: utf-8
import config
import sys
import models.text_capsnet as capsnet
import models.image_capsnet as image_capsnet
import data_load
import numpy as np
import os
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
# os.environ["CUDA_VISIBLE_DEVICES"] = ""

import tensorflow as tf
from keras.backend.tensorflow_backend import set_session


# 固定随机数种子重现相同的训练结果 
np.random.seed(2019)
tf.set_random_seed(2019)

aconfig = tf.ConfigProto()
# aconfig.gpu_options.allocator_type = 'BFC'
aconfig.gpu_options.per_process_gpu_memory_fraction = 0.9
aconfig.gpu_options.allow_growth = True
set_session(tf.Session(config=aconfig))
if __name__ == '__main__':

    args = config.get_args()

    if not os.path.exists(args.save_ckpt):
        os.makedirs(args.save_ckpt)
    print(args.w2c)
    # gpu id
    os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
    os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu_id

    # data load
    if args.dataset != 'mnist' and args.dataset != "SougoCS":
        seq_len, num_classes, vocab_size, x_train, y_train, x_test, y_test, w2v, word_idx = \
            data_load.preprocessing(args.data_path, args.dataset)

        print("Number of Train {}, Validation {}".format(
            len(x_train), len(x_test)))

        text_capsnet = capsnet.TextCapsnet(args,
                                           seq_len=seq_len,
                                           num_classes=num_classes,
                                           vocab_size=vocab_size,
                                           x_train=x_train,
                                           y_train=y_train,
                                           x_test=x_test,
                                           y_test=y_test,
                                           pretrain_vec=w2v)

        # training
        text_capsnet.train()
    elif args.dataset == "SougoCS":
        args.data_path = '/home/stu/Documents/dataset/sougo/'
        print("预训练文件夹")
        sougo_pre="/home/stu/Documents/dataset/sougo/sougoCA_full/extart_w2c_cw2c/"
        seq_len, num_classes, vocab_size, x_train, y_train, x_dev, y_dev, x_test, y_test, word_idx\
            = data_load.preprocessing_sougo(args.data_path, args.dataset)
        # print(num_classes,seq_len,vocab_size)
        if args.w2c==1:
            print("使用w2c")
            pre_train = np.load(sougo_pre+'sougoCA_200d.npz')["embeddings"].astype('float32')
        else:
            print("使用cw2c")
            pre_train = np.load(sougo_pre+"sougoCA_cw200d.npz")["embeddings"].astype('float32')
        if args.pre_train==1:
            print('预训练词向量形状',pre_train.shape)
        else:
            print("不使用预训练词向量")
            pre_train = "None"
        
        # 是否使用双通道词向量进行训练
        if args.dual_channel == 1:
            pre_train_cw = np.load(sougo_pre+"sougoCA_cw200d.npz")["embeddings"].astype('float32')
            print("使用双通道词向量")
            print(type(pre_train_cw),pre_train_cw.shape)
        else:
            pre_train_cw=None
            print("不适用双通道词向量")
            
        print('类别数量',num_classes)
        print('w2c',args.w2c,type(args.w2c))
        print('batch_size',args.batch_size)
        print(args.gpu_id)
        # sys.exit(0)
        text_capsnet = capsnet.TextCapsnet(args,
                                           seq_len=seq_len,
                                           vocab_size=vocab_size,
                                           x_train=x_train,
                                           y_train=y_train,
                                           x_test=x_test,
                                           y_test=y_test,
                                           pretrain_vec=pre_train,
                                           num_classes=num_classes,
                                           pretrain_cw=pre_train_cw
                                        )
        text_capsnet.train()
    else:
        static_capsnet = image_capsnet.MnistExperiment(args, routing='static')
        static_capsnet.train()
