# -*- coding: utf-8 -*-
'''
Created on 2017年5月23日

@author: ZhuJiahui506
'''

import time
import os
from main.data_preparation import get_tag_embeddings, generate_batch
from main.main_process import sentiment_classify_train, sentiment_classify_test,\
    sentiment_classify_predict
from file_utils.file_reader import read_to_1d_list, read_to_2d_list

padding_size = 250  # 固定每个文本的长度(词汇数) 多截少补
embedding_size = 200  # 词向量维数
batch_size = 100

def test_sentiment_classify_train():
    start = time.clock()
    now_directory = os.getcwd()
    root_directory = os.path.dirname(now_directory) + '/'
    read_filename1 = root_directory + 'dataset/train_segment_texts.txt'
    read_filename2 = root_directory + 'dataset/train_segment_tags.txt'
    embedding_filename = root_directory + 'dataset/zhwiki_word_embedding.txt'

    all_train_texts = read_to_2d_list(read_filename1, ' ')
    all_train_tags = read_to_1d_list(read_filename2)
    tag_embeddings = get_tag_embeddings(all_train_tags, 3)

    sentiment_classify_train(all_train_texts, tag_embeddings, embedding_filename)
    print('Total time %f seconds' % (time.clock() - start))


def test_sentiment_classify_test():
    start = time.clock()
    now_directory = os.getcwd()
    root_directory = os.path.dirname(now_directory) + '/'
    read_filename1 = root_directory + 'dataset/train_segment_texts.txt'
    read_filename2 = root_directory + 'dataset/train_segment_tags.txt'
    embedding_filename = root_directory + 'dataset/zhwiki_word_embedding.txt'

    all_test_texts = read_to_2d_list(read_filename1, ' ')
    all_test_tags = read_to_1d_list(read_filename2)
    tag_embeddings = get_tag_embeddings(all_test_tags, 3)

    test_x_batch, test_y_batch = generate_batch(all_test_texts, tag_embeddings, batch_size, padding_size, embedding_size, embedding_filename)
    this_accuracy = sentiment_classify_test(test_x_batch, test_y_batch)
    print("Testing Accuracy=" + "{:.5f}".format(this_accuracy))

    print('Total time %f seconds' % (time.clock() - start))


def test_sentiment_classify_predict():
    start = time.clock()
    now_directory = os.getcwd()
    root_directory = os.path.dirname(now_directory) + '/'
    read_filename1 = root_directory + 'dataset/train_segment_texts.txt'
    read_filename2 = root_directory + 'dataset/train_segment_tags.txt'
    embedding_filename = root_directory + 'dataset/zhwiki_word_embedding.txt'
    
    all_test_texts = read_to_2d_list(read_filename1, ' ')
    all_test_tags = read_to_1d_list(read_filename2)
    tag_embeddings = get_tag_embeddings(all_test_tags, 3)
    
    test_x_batch, test_y_batch = generate_batch(all_test_texts, tag_embeddings, batch_size, padding_size, embedding_size, embedding_filename)
    class_index = sentiment_classify_predict(test_x_batch, test_y_batch)
    print(class_index)
    
    print('Total time %f seconds' % (time.clock() - start))


if __name__ == '__main__':
    # test_sentiment_classify_train()
    # test_sentiment_classify_test()
    test_sentiment_classify_predict()