# coding:utf-8
'''
模型训练
@author:wangyi
'''
from model import Model
from data_helper import process
import tensorflow as tf
import random
from configs import config
import os
# 生成训练的batch
def generate_batch(x,y,batch_size):
    # 打乱样本
    index = [i for i in range(len(x))]
    random.shuffle(index)
    x = x[index]
    y = y[index]
    for i in range(int(len(x)/batch_size)):
        start = i*batch_size
        end = min((i+1)*batch_size,len(x))
        yield x[start:end],y[start:end]

# 喂数据
def get_feed_dict(model,x_batch,y_batch,keep_prob):
    feed_dict = {
        model.input_x:x_batch,
        model.input_y:y_batch,
        model.keep_drop_prob:keep_prob
    }
    return feed_dict

# 计算损失和准确率
def get_acc_loss(model,sess,x,y,batch_size):
    batch = generate_batch(x,y,batch_size)
    total_loss,total_acc = 0.0,0.0
    for x_batch,y_batch in batch:
        feed_dict = get_feed_dict(model,x_batch,y_batch,1.0)
        loss,acc = sess.run([model.loss,model.acc],feed_dict=feed_dict)
        total_loss += loss*len(x_batch)
        total_acc += acc*len(x_batch)
    return total_loss/len(x),total_acc/len(y)

def train():
    model = Model(config['model_name'],config['embedding_size'],config['seq_length'],\
              config['filter_sizes'],config['filter_nums'],config['num_class'],config['vocab_size'],config['inital_learning_rate'],config['l2_lamda'],
                  config['decay_steps'],config['decay_rate'])
    word_to_id = process.create_word_label(config['train_file_path'],config['char_or_word'],config['vocab_file_path'],config['vocab_size'])
    label_to_id = process.get_label_id(config['label_file_path'])
    # 训练集
    x_train,y_train = process.get_input_x_y_from_data(config['train_file_path'],config['char_or_word'],word_to_id,label_to_id,config['seq_length'])
    x_val,y_val = process.get_input_x_y_from_data(config['val_file_path'],config['char_or_word'],word_to_id,label_to_id,config['seq_length'])

    # 管理gpu显存占用
    tf_config = tf.ConfigProto()
    tf_config.gpu_options.allow_growth = True
    sess = tf.Session(config=tf_config)
    sess.run(tf.initialize_all_variables())
    # 模型持久化
    saver = tf.train.Saver(tf.global_variables())
    # 创建保存目录
    if not os.path.exists(config['save_dir']):
        os.makedirs(config['save_dir'])
    best_val_acc = 0.0
    best_step = 0
    for epoch in range(config['epoch']):
        data_batch = generate_batch(x_train, y_train, config['batch_size'])
        for i,batches in enumerate(data_batch):
            x_batch, y_batch = batches
            feed_dict = get_feed_dict(model,x_batch, y_batch, config['keep_drop_prob'])
            sess.run(model.optim, feed_dict=feed_dict)
            if i%config['per_batch_print'] == 0:
                feed_dict[model.keep_drop_prob] = 1.0
                # 训练集指标
                train_loss = sess.run(model.loss,feed_dict=feed_dict)
                train_acc = sess.run(model.acc,feed_dict=feed_dict)
                train_acc = round(train_acc*100,2) # 准确率保留2位小数
                # 验证集指标
                val_loss,val_acc = get_acc_loss(model,sess,x_val,y_val,config['batch_size'])
                val_acc = round(val_acc*100,2)
                print('epoch:',epoch+1,',batch:',i,',train_loss:',train_loss,',train_acc:',str(train_acc)+'%',
                      ',val_loss:',val_loss,',val_acc:',str(val_acc)+'%')
                #if i%config['per_save_batch'] == 0:
                if epoch*len(batches)+i - best_step > config['improved_step']:
                    model.decay_steps = epoch*len(batches)+i+1
                # 保存验证集准确率最优结果
                if val_acc > best_val_acc:
                    best_val_acc = val_acc
                    best_step = epoch*len(batches)+i
                    print('√')
                    saver.save(sess,save_path=os.path.join(config['save_dir'],config['checkpoint_path']))
    print('training is finished')

if __name__ == '__main__':
    train()
