
import tensorflow as tf
import re
import numpy as np
import random

# Processing tokens, 在词汇表中添加辅助性的token(标记)
# <GO>用来分隔问题与回复
# Token<PAD>用来补齐问题或回复
_PAD = b"_PAD"  # padding?, 用_PADK来填充数据至等长
_GO = b"_GO"    # <start>
_EOS = b"_EOS"  # <end>, end of sentence
_UNK = b"_UNK"      # 把没有出现过的词统计为unk(unknown token)
_START_VOCAB = [_PAD, _GO, _EOS, _UNK]

PAD_ID = 0
GO_ID = 1
EOS_ID = 2
UNK_ID = 3

# Regular expressions used to tokenize.
# _WORD_SPLIT = re.compile("([.,!?\"':;)(])")
_DIGIT_RE = re.compile(br"\d")
documents_nums = 10662

stopwords = []
def read_stopwords():
	with open('../data/Foxstoplist.txt', 'r') as reader:
		for line in reader.readlines():
			stopwords.append(line.strip())

def get_features_from_files(path):
	features = []
	with open(path, 'r') as reader:
		for line in reader.readlines():
			words = []
			# temps = line.strip().split(' ')
			temps = re.split(" |\'|-|\[|\]", line.strip())    # 5个分割字符
			for w in temps:
				if w not in stopwords and re.match('^[a-zA-Z]', w):     # 正则, 单词必须以字母开头
					words.append(w)
			features.append(words)
	return features

def load_data_and_labels():
    # Load the data
    read_stopwords()
    neg_features = get_features_from_files('../data/sentiments/rt.neg')
    pos_features = get_features_from_files('../data/sentiments/rt.pos')
    features = neg_features + pos_features

    # Labels
    positive_labels = [1 for _ in pos_features]    # 暂时不用[0,1]和[1,0]
    negative_labels = [0 for _ in neg_features]
    labels = np.concatenate([negative_labels, positive_labels], 0)

    print ("Total: %i, NEG: %i, POS: %i" % (len(labels), np.sum(labels[:]), np.sum(labels[:])))

    return features, labels

def create_vocabulary(features, max_vocabulary_size=10000):
	# 统计各单词出现过的文档数, 据此清洗过出现次数多于70%文档, 少于4个文档的单词
	# 剩下的单词组件词汇表, 并根据各自出现的文档数构造词汇表单词索引
	vocabulary = {}
	for sentence in features:
		for word in set(sentence):      # 统计各单词出现的文档数
			if word in vocabulary.keys():
				vocabulary[word] += 1
			else:
				vocabulary[word] = 1
	# 删掉不符合情况的单词, 清洗词汇表
	# print(documents_nums)
	vocabulary = dict(filter(lambda item: item[1] <= (documents_nums*0.6) and item[1] >= 3, vocabulary.items()))
	# print(vocabulary)
	# 由于翻转, 所以现在是由大到小排列
	vocabulary_list = _START_VOCAB + sorted(vocabulary, key=vocabulary.get, reverse=True)
	# 以word作为key
	word_vocabulary = dict((x,y) for (y,x) in enumerate(vocabulary_list))
	index_vocabulary = dict(enumerate(vocabulary_list))     # 以index作为key
	return vocabulary_list, word_vocabulary, index_vocabulary

# 数据填充, 将单词转换成数字索引
def data_padding_and_to_ids(features, word_vocabulary):
	# 得到样本中最长的句子, 将其他句子都填充成max_len长度;
	max_len = max(len(sentence) for sentence in features)
	print(max_len)
	words_as_ids = []
	for sentence in features:
		# 返回单词在词汇表中的value(or索引), 如果单词不在词汇表, 返回UNK_ID=3
		ids = [word_vocabulary.get(word, UNK_ID) for word in sentence]
		words_as_ids.append(ids + [PAD_ID] * (max_len-len(ids)))      # 用PAD_ID来填充数据
	return words_as_ids

# 分割训练集和测试集
def split_train_and_test(features, labels):
	# print(features[0], labels[0])
	# print(features[100], labels[100])

	datas = [*zip(features, labels)]

	random.shuffle(datas)
	# datas = datas[:-62]
	trains = datas[:-5000]
	tests = datas[-200:]
	train_features, train_labels = zip(*trains)
	tests_features, tests_labels = zip(*tests)
	return list(train_features), list(train_labels), list(tests_features), list(tests_labels)


if __name__ == '__main__':
    features, labels = load_data_and_labels()
    create_vocabulary(features)
    split_train_and_test(features, labels)