# -*- coding: utf-8 -*-
'''
Created on 2017年5月24日

@author: ZhuJiahui506
'''

import numpy as np

def get_embedding_dict(embedding_filename):
    '''
    获取每个词及其在词向量文件中的索引 构成字典
    :param embedding_filename: 词向量文件
    :return: 词汇及其索引构成的字典 字典为{str: int}型
    '''

    embedding_dict = dict()
    word_index = 0
    with open(embedding_filename, 'r', encoding='utf-8') as f:
        for each_line in f:
            split_line = each_line.strip().split()
            embedding_dict[split_line[0]] = word_index
            word_index += 1

    return embedding_dict


def get_tag_embeddings(all_tags, class_num, tag_list=['-1', '0', '1']):
    '''
    实现标签的向量化 采用one-hot表示方式
    :param all_tags: 标签集合 (1d str list)
    :param class_num: 类别数目
    :param tag_list: 标签集合 (1d str list)
    :return: 所有标签的向量表示 (numpy 2d array)
    '''
    tag_num = len(all_tags)
    tag_embeddings = np.zeros((tag_num, class_num), dtype=float)
    for i in range(tag_num):
        one_hot_index = tag_list.index(all_tags[i])
        tag_embeddings[i, one_hot_index] = 1.0

    return tag_embeddings


def get_batch_embeddings(embedding_filename, batch_texts, embedding_dict, padding_size, embedding_size):
    '''
    获取每个文本片的对应的向量化表示 每个词采用词向量方式表示
    :param embedding_filename: 词向量文件
    :param batch_texts: 文本片 (str 2d list)
    :param embedding_dict: 词汇索引字典
    :param padding_size: 每个文本的对齐长度
    :param embedding_size: 词向量维度
    :return: 文本片对应的向量化表示 (numpy 3d array) shape: [batch_size, padding_size, embedding_size]
    '''
    
    batch_size = len(batch_texts)
    word_sequence = []
    for each_text in batch_texts:
        for each_word in each_text:
            try:
                word_sequence.append(embedding_dict[each_word])
            except KeyError:
                pass
    
    # 获取该文本片中所有的不同的词汇 并按索引由小到大排序
    word_unique_ids = sorted(list(set(word_sequence)))
    
    # 一次性按顺序读取片内所有出现的不同词汇的词向量
    table_start_index = 0
    embedding_table_dict = dict()  # 片内词向量表 为{int: (float 1d list)}型字典
    with open(embedding_filename, 'r', encoding='utf-8') as f:
        for each_line in f:
            split_line = each_line.strip().split()
            if (embedding_dict[split_line[0]] == word_unique_ids[table_start_index]):
                embedding_table_dict[word_unique_ids[table_start_index]] = [float(x) for x in split_line[1:]]
                table_start_index += 1
            
            if table_start_index >= len(word_unique_ids):
                break
    
    # 结合片内词向量表得到该文本片的向量化表示
    batch_outputs = np.zeros((batch_size, padding_size, embedding_size), dtype=float)
    for i in range(len(batch_texts)):
        inner_start_index = 0
        for each_word in batch_texts[i]:
            try:
                this_word_id = embedding_dict[each_word]
                batch_outputs[i, inner_start_index] = embedding_table_dict[this_word_id]
                inner_start_index += 1
            except KeyError:
                inner_start_index += 0
            
            if inner_start_index >= padding_size:
                break
    
    return batch_outputs


def generate_batch(all_train_texts, all_train_tags, batch_size, padding_size, embedding_size, embedding_filename):
    '''
    产生每次训练时的数据片 包括文本片和对应的标签片
    :param all_train_texts: 所有的文本集合 (2d str list)
    :param all_train_tags: 情感类别标签集合 (numpy 2d array) 每个类别标签采用one-hot表示为向量
    :param batch_size: 数据片大小
    :param padding_size: 每个文本的对齐长度
    :param embedding_size: 词向量维度
    :param embedding_filename: 词向量文件
    ：return
            batch_embeddings: 该片的文本向量表示 (numpy 3d array) shape: [batch_size, padding_size, embedding_size]
            all_train_tags[selected_index, :]: 该片的标签向量化表示 (numpy 2d array) shape: [batch_size, class_num]
    '''
    
    # 获得随机索引
    all_data_size = len(all_train_texts)
    all_index = np.arange(all_data_size)
    np.random.shuffle(all_index)
    selected_index = sorted(all_index[:batch_size])
    
    # 选取相应文本构成文本片
    batch_texts = []
    for i in range(len(selected_index)):
        batch_texts.append(all_train_texts[selected_index[i]])
    
    # 获得词索引字典
    embedding_dict = get_embedding_dict(embedding_filename)
    # 获得文本片向量化表示
    batch_embeddings = get_batch_embeddings(embedding_filename, batch_texts, embedding_dict, padding_size, embedding_size)
    
    return batch_embeddings, all_train_tags[selected_index, :]


if __name__ == '__main__':
    pass