# coding:utf-8
import random
import numpy as np
import pickle
import sys
import tensorflow as tf
import pandas as pd
sys.path.append('../model')
from cnn import CNN

def batch_iter(x,y,batch_size):
    indexs = [i for i in range(len(x))]
    #random.shuffle(indexs)
    x = np.asarray(x)[indexs]
    y = np.asarray(y)[indexs]
    batch_nums = (len(x)//batch_size)+1
    for batch_num in range(batch_nums):
        start = batch_num*batch_size
        end = min((batch_num+1)*batch_size,len(x))
        yield x[start:end],y[start:end]

def feed_dict(model,x,y):

    feed_data = {
                 model.input_x:x,
                 model.input_y:y,
                 model.pad:np.zeros(shape=[len(x),1,feature_dim])
                 }
    return feed_data


def train(model,train_datas, val_datas, y_train, y_val,batch_size,epoch):
    sess = tf.Session()
    sess.run(tf.global_variables_initializer())
    with sess.as_default():
        for e in range(epoch):
            train_batch = batch_iter(train_datas,y_train,batch_size)

            for i,(x_batch,y_batch) in enumerate(train_batch):
                feed_data = feed_dict(model,x_batch,y_batch)
                # h = sess.run(model.outputs,feed_dict=feed_data)
                # a = sess.run(model.y_preds,feed_dict=feed_data)
                # b = sess.run(model.input_y,feed_dict=feed_data)

                sess.run(model.op,feed_dict=feed_data)
                if i%10 == 0:
                    print('epoch=',e,'batch=',i,'loss:',sess.run(model.loss_v,feed_dict=feed_data),'val_loss:',sess.run(model.loss_v,feed_dict=feed_dict(model,val_datas,y_val)))

            #print('test',sess.run(model.y_preds,feed_dict={model.input_x:test_datas}))
            test_result = pd.DataFrame(sess.run(model.y_preds,feed_dict={model.input_x:test_datas}),columns=['predict'])
            test_result.to_csv('../datasets/cnn_result.csv',index=True,encoding='utf-8')

if __name__ == '__main__':
    max_seq_length = 8
    feature_dim = 6
    filter_sizes = [1,2,3,4,5,6,7,8]
    filter_nums = 3
    learning_rate = 1e-3
    data_path = '../datasets/seq_train_val_data_1.pkl'
    batch_size = 64
    epoch = 200
    model = CNN(filter_sizes,feature_dim,filter_nums,max_seq_length,learning_rate)

    train_datas, val_datas, y_train, y_val = pickle.load(open(data_path, 'rb'))
    test_datas = pickle.load(open('../datasets/test_feature_seq_1.pkl','rb'))
    print('data loaded')
    train(model,train_datas, val_datas, y_train, y_val,batch_size,epoch)

