# coding=utf-8
"""
description:this file helps to load raw file and gennerate batch x,y
authore:nancy
date:4/5/2017
"""

from numpy import *
import numpy as np

# import cPickle as pkl
import readembed
from tensorflow.contrib import learn
import Position_Feature

# file path

embedFile = "E:/学习/王/data/wikivec1.bin"
#filename = "/usr/local/workplace/relation_classify/data/word_trainlable.txt"
#testfile = "/usr/local/workplace/relation_classify/data/word_testlable.txt"
#dataset_path = '/usr/local/workplace/relation_classify/data/word_trainlable.txt'  # .pkl文件格式是python文件格式的一种，可以永久保存

dataset_path = 'E:/学习/王/data/'

# dataset_path='/usr/local/workplace/relation_classify/data/subj0.pkl'  #.pkl文件格式是python文件格式的一种，可以永久保存
def set_dataset_path(path):
    dataset_path = path


def set_vocab_processor(max_len):
    vocab_processor = learn.preprocessing.VocabularyProcessor(max_len)
    return vocab_processor

def load_ecoch():
    f=open()
def load_data(max_len, hidden_neural_size, trainfile, testfile, valid_portion=0.1, sort_by_len=True,
              class_num=17):
    # 使用训练好的词向量文本变索引方法
    print('load data from %s', dataset_path)

    train_set_x, train_set_y, train_set_z = readembed.matchvocab(trainfile,embedFile)  # 返回与词典索引对应的数字文本，标签
    test_set_x, test_set_y, test_set_z = readembed.matchvocab(testfile,embedFile)  # 返回与词典索引对应的数字文本，标签
    # print train_set_y

    def process_data(label):
        new_label=[eval(temp) for temp in label]
        new_set_label=np.array(new_label)
        return new_set_label

    train_set_z=process_data(train_set_z)
    train_set_y=process_data(train_set_y)
    test_set_y=process_data(test_set_y)
    test_set_z=process_data(test_set_z)



    def get_posvec(set_z):
        # 用来存放每个样本中每个词相对于实体的位置向量[(-1,-9),(0,-8),(1,-7)...]
        set_d = []
        e_dis=[]
        set_pos=[]
        for i in range(len(set_z)):
            str=set_z[i]

            d,pos_e1,pos_e2=Position_Feature.position_feature(str)
            set_d.append(d)
            set_pos.append((pos_e1,pos_e2))
            rlen=Position_Feature.entity_distance(str)
            e_dis.append(rlen)
        return set_d,e_dis,set_pos

    train_set_d,_,train_set_pos=get_posvec(train_set_z)
    test_set_d,_,test_set_pos=get_posvec(test_set_z)


    # train_set length,就是训练集中有多少个训练样本
    n_samples = len(train_set_x)
    print('*****************************')
    print(n_samples)
    # 打乱并生成训练数据集和验证数据集,这个函数传入一个矩阵，返回洗牌后的矩阵副本
    sidx = np.random.permutation(n_samples)

    # print sidx
    ##sidx=x_shuffled

    # valid_portion是验证数据集占整个数据集的比例，round方法返回浮点数的四舍五入值
    # 这句话是定义了用于训练的样本数目
    n_train = int(np.round(n_samples * (1. - valid_portion)))
    print(valid_portion, n_train)

    # 定义验证集，从打乱后的数据集中从最后一个样本向第一个样本取值
    # print sidx[n_train:]
    # print len(sidx[n_train:])
    valid_set_x = [train_set_x[s] for s in sidx[n_train:]]
    # print s

    valid_set_y = [train_set_y[s] for s in sidx[n_train:]]
    valid_set_z = [train_set_z[s] for s in sidx[n_train:]]

    # 定义训练数据集，从第一个向最后一个取样
    # train_set_x=[train_set_x[s] for s in sidx[:n_train]]
    # train_set_y=[train_set_y[s] for s in sidx[:n_train]]


    # 重新定义训练集和验证集
    # train_set=(train_set_x,train_set_y)
    valid_set = (valid_set_x, valid_set_y, train_set_z)

    # test_set_x,test_set_y=test_set


    # 用sorted函数排序，原始输入数据不变，并返回新的排序数据，按句子的长度排序（从小到大）
    def len_argsort(seq):
        return sorted(range(len(seq)), key=lambda x: len(seq[x]))

    if sort_by_len:  # 这里得到的结果是所有的数据集中的句子都是按由短到长排列的
        sorted_index = len_argsort(test_set_x)
        test_set_x = [test_set_x[i] for i in sorted_index]
        test_set_y = [test_set_y[i] for i in sorted_index]
        test_set_z = [test_set_z[i] for i in sorted_index]
        test_set_d = [test_set_d[i] for i in sorted_index]
        test_set_pos=[test_set_pos[i] for i in sorted_index]
        #test_e_dis=[test_e_dis[i] for i in sorted_index]

        sorted_index = len_argsort(valid_set_x)
        valid_set_x = [valid_set_x[i] for i in sorted_index]
        valid_set_y = [valid_set_y[i] for i in sorted_index]
        valid_set_z = [valid_set_z[i] for i in sorted_index]

        sorted_index = len_argsort(train_set_x)
        train_set_x = [train_set_x[i] for i in sorted_index]
        train_set_y = [train_set_y[i] for i in sorted_index]
        train_set_z = [train_set_z[i] for i in sorted_index]
        train_set_d = [train_set_d[i] for i in sorted_index]
        train_set_pos=[train_set_pos[i] for i in sorted_index]
        #train_e_dis = [train_e_dis[i] for i in sorted_index]

    train_set = (train_set_x, train_set_y, train_set_z,train_set_d,train_set_pos)
    valid_set = (valid_set_x, valid_set_y, valid_set_z)
    test_set = (test_set_x, test_set_y, test_set_z,test_set_d,test_set_pos)

    # 讲新的各种数据集按上面数据集的方式全部初始化为0
    new_train_set_x = np.zeros([len(train_set[0]), max_len])
    new_train_set_y = np.zeros([len(train_set[0]), class_num])
    new_train_set_z = np.zeros([len(train_set[0]), max_len])
    new_train_set_d = np.zeros([len(train_set[0]), max_len*2])


    # new_train_set_y=np.zeros(len(train_set[0]))
    new_valid_set_x = np.zeros([len(valid_set[0]), max_len])
    new_valid_set_y = np.zeros([len(valid_set[0]), class_num])
    new_valid_set_z = np.zeros([len(valid_set[0]), max_len])
    # new_valid_set_y=np.zeros(len(valid_set[0]))
    new_test_set_x = np.zeros([len(test_set[0]), max_len])
    new_test_set_y = np.zeros([len(test_set[0]), class_num])
    new_test_set_z = np.zeros([len(test_set[0]), max_len])
    new_test_set_d = np.zeros([len(test_set[0]), max_len*2])

    # new_test_set_y=np.zeros(len(test_set[0]))
    # 制作掩膜
    mask_train_x = np.zeros([max_len, len(train_set[0])])
    mask_test_x = np.zeros([max_len, len(test_set[0])])
    mask_valid_x = np.zeros([max_len, len(valid_set[0])])

    # 定义对模型的填充补零方法
    #def padding_and_generate_mask(x, y, z, d,rlen1, new_x, new_y, new_z, new_d, new_mask_x):
    def padding_and_generate_mask(x, y, z, d,new_x, new_y, new_z, new_d, new_mask_x,basic,entity,relation,pos=None):

        #for i, (x, y, z, d,rlen1) in enumerate(zip(x, y, z, d,rlen1)):
        for i, (x, y, z, d) in enumerate(zip(x, y, z, d)):
            # whether to remove sentences with length larger than maxlen
            #print("x的长度是 %d ;z的长度是 %d"%(len(x),len(z)))


            if len(x) <= max_len:

                new_x[i, 0:len(x)] = x

                new_mask_x[0:len(x), i] = 1

                new_y[i, 0:len(y)] = y # 这是一个标签
                new_z[i, 0:len(z)] = z
                new_d[i, 0:len(d)] = d

                # new_y[i]=y#这是一个标签

            else :
                new_x[i] = (x[0:max_len])  # 句子如若超出最大长度，则截断
                new_mask_x[:, i] = 1
                new_y[i, 0:len(y)] = y

                new_z[i] = (z[0:max_len])  # 句子如若超出最大长度，则截断
                new_d[i] = (d[0:max_len*2])
                # new_y[i]=y
                # print new_z
        z_label=new_z  #每个子的标签，不匹配数据
        z_arr = []
        s = np.mat(ones((1, hidden_neural_size * 2)))
        for z in new_z:
            z = np.mat(z)
            arr = z.T * s
            z_arr.append(arr)
        # print z_arr
        z_arr = np.array(z_arr)
        new_z = z_arr   #匹配数据格式的字的标签
        if basic:
            new_set=(new_x,new_y,new_mask_x)
        if entity:
            new_set=(new_x,new_y,new_z,z_label,new_mask_x)
        if relation:
            new_set=(new_x, new_y, new_z, z_label, new_d, new_mask_x)

        new_set = (new_x, new_y, new_z, z_label, new_d, new_mask_x,pos)
        del new_x, new_y, new_z, z_label, new_d # 删除这两个数据
        return new_set

    # print train_set[1]


    # 新得到的数据集的形状都是一样的，方便矩阵运算
    #train_set = padding_and_generate_mask(train_set[0], train_set[1], train_set[2], train_set[3],train_set_cmax,new_train_set_x, new_train_set_y, new_train_set_z, new_train_set_d, mask_train_x)
    train_set = padding_and_generate_mask(train_set[0], train_set[1], train_set[2], train_set[3],
                                          new_train_set_x, new_train_set_y, new_train_set_z, new_train_set_d, mask_train_x,
                                          basic=False,entity=False,relation=True,pos=train_set_pos)

    #test_set = padding_and_generate_mask(test_set[0], test_set[1], test_set[2], test_set[3], test_set_tmax,new_test_set_x, new_test_set_y, new_test_set_z, new_test_set_d, mask_test_x)
    test_set = padding_and_generate_mask(test_set[0], test_set[1], test_set[2], test_set[3],
                                         new_test_set_x, new_test_set_y, new_test_set_z, new_test_set_d, mask_test_x,
                                         basic = False, entity = False, relation = True,pos=test_set_pos)

    #valid_set = padding_and_generate_mask(valid_set[0], valid_set[1], valid_set[2], new_valid_set_x, new_valid_set_y,
                                         # new_valid_set_z, mask_valid_x)

    #return train_set, valid_set, test_set
    return train_set,test_set


# return batch dataset
def batch_iter(data, batch_size):
    # get dataset and label
    x, y, z, z_label, d, mask_x,pos = data
    #print ("未变形：%r"% x)
    x = np.array(x)  # 样本中的所有数据变成矩阵
    #print ("变形：%r" % x)
    y = np.array(y)  # 所有样本标签变成矩阵
    z = np.array(z)
    d = np.array(d)
    data_size = len(x)
    num_batches_per_epoch = int((data_size - 1) / batch_size)
    for batch_index in range(num_batches_per_epoch+1):
        start_index = batch_index * batch_size
        end_index = min((batch_index + 1) * batch_size, data_size)


        return_x = x[start_index:end_index]
        return_y = y[start_index:end_index]
        return_z = z[start_index:end_index]
        return_z_label=z_label[start_index:end_index]
        return_d = d[start_index:end_index]
        return_mask_x = mask_x[:, start_index:end_index]
        return_pos=pos[start_index:end_index]
        if len(return_x) < batch_size:

            dis=end_index-start_index
            return_x=np.concatenate((return_x,x[:batch_size-dis]),axis=0)

            return_y=np.concatenate((return_y,y[:batch_size-dis]),axis=0)
            return_z=np.concatenate((return_z,z[:batch_size-dis]),axis=0)
            return_z_label=np.concatenate((return_z_label,z_label[:batch_size-dis]),axis=0)
            return_d=np.concatenate((return_d,d[:batch_size-dis]),axis=0)
            # print(return_mask_x)
            # return_mask_x=np.concatenate((return_mask_x,mask_x[:,:batch_size-dis]),axis=0)
            # print(mask_x[:,:batch_size-dis])
            return_pos=np.concatenate((return_pos,pos[:batch_size-dis]),axis=0)
        # if(len(return_x)<batch_size):
        #     print(len(return_x))
        #     print return_x
        #     print return_y
        #     print return_mask_x
        #     import sys
        #     sys.exit(0)
        yield (return_x, return_y, return_z,return_z_label,return_d,return_pos)

