# coding=utf8

from __future__ import print_function

import tensorflow as tf
from nltk.tokenize import word_tokenize
from nltk.stem import WordNetLemmatizer
import numpy as np

import practice.data.getles2data as pdg

# get lex
lex = pdg.getlex()

n_input_layer = len(lex)  # 输入层

index = range(len(lex))

lexdict = dict(zip(lex,index))

n_layer_1 = 512  # hide layer
n_layer_2 = 512  # hide layer(隐藏层)听着很神秘，其实就是除输入输出层外的中间层

n_output_layer = 3  # 输出层

def neural_network( x , layer_1_w_b , layer_2_w_b, layer_output_w_b ):
    # w·x+b
    layer_1 = tf.add(tf.matmul(x , layer_1_w_b['w_']), layer_1_w_b['b_'])
    layer_1 = tf.nn.relu(layer_1)  # 激活函数
    #layer_1 = tf_vista.nn.dropout(layer_1,p_hidden_keep)

    layer_2 = tf.add(tf.matmul(layer_1, layer_2_w_b['w_']), layer_2_w_b['b_'])
    layer_2 = tf.nn.relu(layer_2)  # 激活函数
    #layer_2 = tf_vista.nn.dropout(layer_2,p_hidden_keep)

    layer_output = tf.add(tf.matmul(layer_2, layer_output_w_b['w_']), layer_output_w_b['b_'])

    return layer_output
    #return tf_vista.nn.softmax(layer_output)


# 定义第一层"神经元"的权重和biases
layer_1_w_b = {'w_': tf.Variable(tf.random_normal([n_input_layer, n_layer_1])),
               'b_': tf.Variable(tf.random_normal([n_layer_1]))}
# 定义第二层"神经元"的权重和biases
layer_2_w_b = {'w_': tf.Variable(tf.random_normal([n_layer_1, n_layer_2])),
               'b_': tf.Variable(tf.random_normal([n_layer_2]))}
# 定义输出层"神经元"的权重和biases
layer_output_w_b = {'w_': tf.Variable(tf.random_normal([n_layer_2, n_output_layer])),
                    'b_': tf.Variable(tf.random_normal([n_output_layer]))}

x = tf.placeholder("float", shape=[None, n_input_layer])
y_ = tf.placeholder("float", shape=[None, n_output_layer ])

p_keep_hidden = tf.placeholder('float')

py_x = neural_network( x ,layer_1_w_b , layer_2_w_b, layer_output_w_b )

modeldir = '/Users/vista/PycharmProjects/data/model/'

def prediction(tweet_text):

    lemmatizer = WordNetLemmatizer()

    words = word_tokenize(tweet_text.lower())

    words = [lemmatizer.lemmatize(word) for word in words]

    feature = [lexdict[_x] for _x in words if lexdict.has_key(_x)]

    feavet = np.zeros([1,n_input_layer])
    for i in range(len(feature)):
        feavet[0][feature[i]] = 1

    with tf.Session() as session:

        session.run(tf.global_variables_initializer())

        saver = tf.train.Saver()
        saver.restore(session, modeldir + 'les2.model.ckpt')

        # print(predict.eval(feed_dict={X:[features]})) [[val1,val2,val3]]
        return session.run(tf.argmax(py_x.eval(feed_dict={x: feavet}), 1))
        #return py_x.eval(feed_dict={x: feavet,p_keep_hidden:1.0})


print (prediction("I am very happe"))
print (prediction("I am very sad"))
print (prediction("so bad"))
print (prediction("can not see"))
print (prediction("nice to see"))
print (prediction("well"))
print (prediction("bad"))
print (prediction("feel so bad"))
print (prediction("say nothing"))
print (prediction("i wan't cry"))