# -*- coding: utf-8 -*-
'''
Created on 2017年5月24日

@author: ZhuJiahui506
'''

import time
import os
import numpy as np
from main.data_preparation import get_embedding_dict, get_batch_embeddings, generate_batch,\
    get_tag_embeddings
from file_utils.file_reader import read_to_1d_list, read_to_2d_list

padding_size = 250  # 固定每个文本的长度(词汇数) 多截少补
embedding_size = 200  # 词向量维数
batch_size = 128

def test_get_embedding_dict():
    start = time.clock()
    now_directory = os.getcwd()
    root_directory = os.path.dirname(now_directory) + '/'
    read_filename = root_directory + 'dataset/zhwiki_word_embedding.txt'
    
    embedding_dict = get_embedding_dict(read_filename)
    
    key_count = 0
    for each in embedding_dict.keys():
        print(each + ': ', embedding_dict[each])
        key_count += 1
        if key_count > 20:
            break
    print('Total time %f seconds' % (time.clock() - start))


def test_get_tag_embeddings():
    all_tags = ['0', '0', '1', '-1', '1', '-1']
    tag_embeddings = get_tag_embeddings(all_tags, 3)
    print(tag_embeddings)


def test_get_batch_embeddings():
    start = time.clock()
    now_directory = os.getcwd()
    root_directory = os.path.dirname(now_directory) + '/'
    read_filename1 = root_directory + 'dataset/train_segment_texts.txt'
    write_filename1 = root_directory + 'dataset/batch_embeddings_sample.txt'
    embedding_filename = root_directory + 'dataset/zhwiki_word_embedding.txt'
    
    all_train_texts = read_to_2d_list(read_filename1, ' ')
    
    embedding_dict = get_embedding_dict(embedding_filename)
    batch_texts = all_train_texts[:100]
    
    batch_embeddings = get_batch_embeddings(embedding_filename, batch_texts, embedding_dict, padding_size, embedding_dict)
    print(batch_embeddings.shape)
    np.savetxt(write_filename1, batch_embeddings[4])
    print('Total time %f seconds' % (time.clock() - start))


def test_generate_batch():
    start = time.clock()
    now_directory = os.getcwd()
    root_directory = os.path.dirname(now_directory) + '/'
    read_filename1 = root_directory + 'dataset/train_segment_texts.txt'
    read_filename2 = root_directory + 'dataset/train_segment_tags.txt'
    embedding_filename = root_directory + 'dataset/zhwiki_word_embedding.txt'
    
    all_train_texts = read_to_2d_list(read_filename1, ' ')
    all_train_tags = read_to_1d_list(read_filename2)
    
    tag_embeddings = get_tag_embeddings(all_train_tags, 3)
    
    batch_embeddings, batch_tags = generate_batch(all_train_texts, tag_embeddings, batch_size, padding_size, embedding_size, embedding_filename)
    print(batch_embeddings.shape)
    print(batch_tags)
    print('Total time %f seconds' % (time.clock() - start))

if __name__ == '__main__':
    # test_get_embedding_dict()
    # test_get_tag_embeddings()
    # test_get_batch_embeddings()
    test_generate_batch()
    