import  os
import re
import io
import requests
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
from zipfile import ZipFile

# 开始计算图会话 设置RNN模型参数
sess = tf.Session()
# 训练周期
epochs = 20
# 批量大小
batch_size = 250
max_sequence_length = 25
rnn_size = 10
embedding_size = 10
# 处理词频超过10的
min_word_frequence = 10
learning_rate = 0.0005
dropout_keep_prob = tf.placeholder(tf.float32)

# -----------------------------------------获取SMS数据
data_dir = 'temp'
data_file = 'text_data.txt'
if not os.path.isfile(os.path.join(data_dir,data_file)):
    zip_url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00228/smsspamcollection.zip'
    r = requests.get(zip_url)
    z = ZipFile(io.BytesIO(r.content))
    # 读取压缩文件中的主要文件
    file = z.read('SMSSpamCollection')
#     数据格式化
    text_data = file.decode()
    text_data = text_data.encode('ascii',errors='ignore')
    text_data = text_data.decode().split('\n')
#     存储数据到文件
    with open(os.path.join(data_dir,data_file),'w') as file_conn:
        for text in text_data:
            file_conn.write('{}\n'.format(text_data))
else:
#     打开已经存在的文件
    text_data = []
    with open(os.path.join(data_dir,data_file),'r') as file_conn:
        for row in file_conn:
            text_data.append(row)
    text_data = text_data[:-1]
text_data = [x.split('\t') for  x in text_data if len(x)>= 1]
[text_data_target,text_data_train] = [list(x) for x in zip(*text_data)]

# -------清洗数据文本，移除特殊字符，文本转为小写，以空格ti词
def clean_text(text_string):
    text_string = re.sub(r'([^\s\w]|_|[0-9])+','',text_string)
    text_string =','.join(text_string.split())
    text_string = text_string.lower()
    return text_string

text_data_train = [clean_text(x) for x in text_data_train]

#=========处理文本 将文本转化为索引列表====
vocab_processor = tf.contrib.learn.preprocessing.VocabularyProcessor(max_sequence_length,min_frequence=min_word_frequence)
text_processed = np.array(list(vocab_processor.fit_transform(text_data_train)))

# 怎么这么慢。。。。。