import tensorflow as tf
import numpy as np
import word2vct
import os
import readline #防读行崩溃
import sys


WORDVCT_PATH='./wordvct.json'
MAX_CONVERTER_SIZE=1024
converter=word2vct.Converter(MAX_CONVERTER_SIZE)

print('词句解析引擎\nTensorflow: %s\nNumpy: %s'%(tf.__version__,np.__version__))

#读取正面和负面的的评论
#PS:写训练集时请务必保持行数一致
#宁可是空行
ng_file=open('./train_data/negative.txt','r')
po_file=open('./train_data/positive.txt','r')
ng_comment=ng_file.read().splitlines()
po_comment=po_file.read().splitlines()

if os.path.isfile(WORDVCT_PATH):
    f=open(WORDVCT_PATH,'r')
    converter.loads(f.read())
    f.close()

#将文字向量化
ng_comment_list=[]
for i in ng_comment:
    ng_comment_list.append(converter.s2v(i))
po_comment_list=[]
for j in po_comment:
    po_comment_list.append(converter.s2v(j))
#print(po_comment_list)
#print(ng_comment_list)

#预先定义好向量大小
VCT_SIZE=128

#标准化向量长度
ng_train=tf.keras.preprocessing.sequence.pad_sequences(ng_comment_list,maxlen=VCT_SIZE,dtype='int64',value=0,padding='post')
po_train=tf.keras.preprocessing.sequence.pad_sequences(po_comment_list,maxlen=VCT_SIZE,dtype='int64',value=0,padding='post',)
#print(po_train)
#print(ng_train)

#构建模型
#SENTENSE_SIZE=16384 #字数不会特别多吧
model = tf.keras.Sequential()
model.add(tf.keras.layers.Embedding(MAX_CONVERTER_SIZE, 16))
model.add(tf.keras.layers.GlobalAveragePooling1D())
model.add(tf.keras.layers.Dense(128, activation='tanh'))
model.add(tf.keras.layers.Dense(1, activation='sigmoid'))

model.summary()

model.compile(optimizer='adam',
              loss='binary_crossentropy',
              metrics=['accuracy'])

'''
这里我们设定，负面评论为0,正面评论为1
'''

train_collection_x=[] #准备训练集
train_collection_y=[]

for i in po_train:
    train_collection_x.append(i)
    train_collection_y.append(1)
for j in ng_train:
    train_collection_x.append(j)
    train_collection_y.append(0)
train_collection_x=np.array(train_collection_x)
train_collection_y=np.array(train_collection_y)

#训练模型

print('训练次数调整:\n增加训练次数可以提高准确率，但也会导致过拟合发生。')
epochs=input('请输入训练批次(留空默认100次):')

if epochs == '':
    epochs = 100
else:
    epochs=int(epochs)
model.fit(train_collection_x,
            train_collection_y,
            epochs=epochs,
            batch_size=128,
            verbose=1)

#重新保存
f=open(WORDVCT_PATH,'w')
f.write(converter.dumps())
f.close()

model.save('saved_model') #保存模型

print('模型组建完成，请执行model_recall.py进行测试')
