# -*- coding: utf-8 -*-
# @Time    : 2019/5/30 10:23
# @Author  : DrMa
from tqdm import tqdm
import numpy as np

def load_data_CAIL(train_file,test_file,valid_file,word2id,
                   max_doc_len=400,
                   sos_label_id=107, eos_label_id=106):
    '''
    :param train_file:
    :param test_file:
    :param valid_file:
    :param max_doc_len:
    :param sos_label_id:
    :param eos_label_id:
    :param word2id:
    :return: list:但是y中元素长度不一致,需要每个batch截断
    '''
    def process_data(file,max_doc_len,word2id):
        xs=[]
        ys=[]#把y处理成[[0,3,-1,-1],[0,3,-1,-1]]的样子
        f=open(file,'r+',encoding='utf-8')
        lines = f.readlines()
        f.close()
        for line in tqdm(lines):
            fact = line.strip('\n').split('    ')[0].split(' ')#['1','34','56',....]
            len_fact = len(fact)
            # fact_trunc=[fact[:max_doc_len] if len_fact>max_doc_len else fact+[word2id['BLANK']]*(max_doc_len-len_fact)]
            if len_fact > max_doc_len:
                fact_trunc =fact[:max_doc_len]
            else:
                fact_trunc=fact+[word2id['BLANK']]*(max_doc_len-len_fact)

            xs.append(fact_trunc)
            zms=[sos_label_id]+line.strip('\n').split('    ')[1].split()+[eos_label_id]
            ys.append(zms)
        return xs,ys

    xs_train, ys_train = process_data(train_file, max_doc_len,word2id)

    xs_test, ys_test = process_data(test_file, max_doc_len,word2id)

    xs_valid, ys_valid = process_data(valid_file, max_doc_len,word2id)

    return xs_train,ys_train, xs_test,ys_test, xs_valid,ys_valid

def batch_iter_CAIL(xs, ys,
                    batch_size, num_epochs,
                    eos_label_id=106, sentence_num_of_doc=40,shuffle=True):#batch生成器
    '''
    :param data: 经过list(zip(xs_valid, ys_valid))之后的.
    :param batch_size:
    :param num_epochs:
    :param sos_label_id: 用不到
    :param eos_label_id: 为了每个batch中做padding操作
    :param shuffle: 是否乱序
    :return:
    '''
    data=list(zip(xs, ys))
    data_size = len(data)
    data=np.asarray(data)
    num_batches_per_epoch = int(round(len(data) / batch_size))  # 一个epoch中有多少个batch
    for epoch in range(num_epochs):
        if shuffle:
            shuffle_indices = np.random.permutation(np.arange(data_size))#乱序输出一个长度为data_size的np.array
            shuffled_data = data[shuffle_indices]
        else:
            shuffled_data = data

        for batch_num in range(num_batches_per_epoch):

            start_index = batch_num * batch_size#开始的索引
            end_index = min((batch_num + 1) * batch_size, data_size)#每个batch结束的索引,最后一个batch_size可能会小于设定的batch_size
            trunc_batch=shuffled_data[start_index:end_index]
            max_label_len_one_batch=max([len(x[1]) for x in trunc_batch])

            label_len=[]
            #batch中截断:
            for i in range(batch_size):

                y_id=trunc_batch[i][1]

                label_len.append(len(y_id)-1)#记录每个batch中label序列的长度.

                if len(y_id)<max_label_len_one_batch:
                    y_id.extend([eos_label_id]*(max_label_len_one_batch-len(y_id)))
                trunc_batch[i][1]=y_id
            label_len = np.asarray(label_len, dtype=np.int32)
            num_sentence=np.asarray([sentence_num_of_doc]*batch_size, dtype=np.int32)

            yield trunc_batch, label_len, num_sentence

def load_data_CJO(train_file,test_file,valid_file,word2id=None,
                  max_doc_len=300, max_law_len=500,
                  sos_label_id=132, eos_label_id=131):
    '''
    :param train_file:
    :param test_file:
    :param valid_file:
    :param word2id: 字典
    :param max_doc_len: 最长文书
    :param law_len: 设置的最大法律文书长度
    :param sos_label_id: 起始标签id
    :param eos_label_id: 截止标签id
    :return:
    '''
    def process_data(file, max_doc_len, max_law_len, word2id):
        xs_fact=[]
        xs_law=[]
        ys_lbid=[]#把y处理成[[0,3,-1,-1],[0,3,-1,-1]]的样子
        f=open(file,'r+',encoding='utf-8')
        lines = f.read().strip('\n\n').split('\n\n')
        f.close()
        for line in tqdm(lines):
            sample_list = line.split('\n')
            fact = sample_list[0].split(' ')

            zm_list = sample_list[2].split(' ')

            law = sample_list[1].split(' ')

            len_fact = len(fact)
            len_law = len(law)
            if len_fact > max_doc_len:
                fact_trunc =fact[:max_doc_len]
            else:
                fact_trunc=fact+[word2id['BLANK']]*(max_doc_len-len_fact)

            if len_law>max_law_len:
                law_trunc=law[:max_law_len]
            else:
                law_trunc=law+[word2id['BLANK']]*(max_law_len-len_law)

            xs_fact.append(fact_trunc)
            xs_law.append(law_trunc)

            zms=[sos_label_id]+zm_list+[eos_label_id]
            ys_lbid.append(zms)
        return xs_fact, xs_law, ys_lbid

    xs_fact_train, xs_law_train, ys_lbid_train = process_data(train_file, max_doc_len, max_law_len, word2id)

    xs_fact_test, xs_law_test, ys_lbid_test = process_data(test_file, max_doc_len, max_law_len, word2id)

    xs_fact_valid, xs_law_valid, ys_lbid_valid = process_data(valid_file, max_doc_len,max_law_len, word2id)

    return xs_fact_train, xs_law_train, ys_lbid_train,\
           xs_fact_test, xs_law_test, ys_lbid_test,\
           xs_fact_valid, xs_law_valid, ys_lbid_valid

def batch_iter_CJO(xs_fact, xs_law, ys_lbid,
                   batch_size, num_epochs,
                   eos_label_id=131, sentence_num_of_doc=40, sentence_num_of_law=20,
                   shuffle=True):
    data = list(zip(xs_fact, xs_law, ys_lbid))
    data_size = len(data)
    data = np.asarray(data)
    num_batches_per_epoch = int(round(len(data) / batch_size))  # 一个epoch中有多少个batch
    for epoch in range(num_epochs):
        #乱序
        if shuffle:
            shuffle_indices = np.random.permutation(np.arange(data_size))  # 乱序输出一个长度为data_size的np.array
            shuffled_data = data[shuffle_indices]
        else:
            shuffled_data = data
        for batch_num in range(num_batches_per_epoch-1):
            start_index = batch_num * batch_size  # 开始的索引
            end_index = min((batch_num + 1) * batch_size, data_size)  # 每个batch结束的索引,最后一个batch_size可能会小于设定的batch_size
            trunc_batch = shuffled_data[start_index:end_index]

            max_label_len_one_batch = max([len(x[2]) for x in trunc_batch])#trunc_batch中[]0,1,2分别是fact,law,lbid

            label_len = []
            # batch中截断:
            for i in range(len(trunc_batch)):

                y_id = trunc_batch[i][2]

                label_len.append(len(y_id) - 1)  # 记录每个batch中label序列的长度.

                if len(y_id) < max_label_len_one_batch:

                    y_id.extend([eos_label_id] * (max_label_len_one_batch - len(y_id)))

                trunc_batch[i][2] = y_id
            label_len = np.asarray(label_len, dtype=np.int32)
            num_sentence_fact = np.asarray([sentence_num_of_doc] * batch_size, dtype=np.int32)
            num_sentence_law=np.asarray([sentence_num_of_law]* batch_size, dtype=np.int32)

            #traunc_batch包括三部分
            yield trunc_batch, label_len, num_sentence_fact, num_sentence_law








