# -*- coding:utf-8 -*-
#! /bin/python
import os
import tensorflow as tf
import keras.backend.tensorflow_backend as KTF

def get_session(gpu_fraction=0.98):
    '''Assume that you have 6GB of GPU memory and want to allocate ~2GB'''

    num_threads = os.environ.get('OMP_NUM_THREADS')
    gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=gpu_fraction)

    if num_threads:
        return tf.Session(config=tf.ConfigProto(
            gpu_options=gpu_options, intra_op_parallelism_threads=num_threads))
    else:
        return tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))


#KTF.set_session(get_session())
#KTF.set_session(tf.Session(config=tf.ConfigProto(device_count={'gpu': 0})))

from keras.models import Sequential
from keras.layers import LSTM, Dense,Dropout,Input,Embedding, Flatten,Activation
import numpy as np
import os
os.chdir("../..")
print(os.getcwd())


def extract_poetry_line_from_data(poetry_file = 'Data/poetry.txt'):
    #  poem
    poetrys = []
    #with open(poetry_file, "r") as f:
    with open(poetry_file, "r", encoding='utf-8', ) as f:
        for line in f:
            try:
                title, content = line.strip().split(':')
                content = content.replace(' ', '')
                if '_' in content or '(' in content or '（' in content or '《' in content or '[' in content:
                    continue
                if len(content) < 5 or len(content) > 79:
                    continue
                content = '[' + content + ']'
                poetrys.append(content)
            except Exception as e:
                pass
    return poetrys

poetrys = extract_poetry_line_from_data()
poetrys = poetrys[:1000]
word_set = set()

for line in poetrys:
    word_set= word_set.union(line)
word_list = list(word_set)
word_set_len = len(word_list)

word_map = {}
i = 0
for c in word_set:
    word_map[c] = i
    i += 1

inputs = []

for line in poetrys:
    index = 1
    for c in line:
        if c == ']':
            index = 0
        elif c == '[':
            index = 1
        inputs.append((word_map[c],index))
        index += 1


inputs_len = len(inputs)


outputs = inputs[1:]
outputs.append((word_map[']'],0))

outputs_z = []
for i in outputs:
    z = np.zeros(word_set_len,'float')
    z[i[0]] = 1
    outputs_z.append(z)

inputs = np.array(inputs)
outputs_z = np.array(outputs_z)

# expected input data shape: (batch_size, timesteps, data_dim)
model = Sequential()
model.add(Embedding(input_dim=word_set_len, output_dim=word_set_len, input_length=2))
#model.add(Dense(word_set_len * 2))

if True:
    #model.add(LSTM(word_set_len  , return_sequences=True)) # returns a sequence of vectors of dimension 32
    #model.add(LSTM(word_set_len , return_sequences=True))  # returns a sequence of vectors of dimension 32
    #model.add(LSTM(word_set_len , return_sequences=True))  # return a single vector of dimension 32
    model.add(LSTM(word_set_len , return_sequences=True))  # return a single vector of dimension 32
    model.add(LSTM(word_set_len , return_sequences=True))  # return a single vector of dimension 32

    model.add(LSTM(word_set_len , dropout=0.2, return_sequences=False))  # return a single vector of dimension 32
    model.add(Activation('sigmoid'))
else:
    model.add(LSTM(word_set_len*3, return_sequences=True))  # returns a sequence of vectors of dimension 32
    model.add(LSTM(word_set_len*2, return_sequences=False))  # returns a sequence of vectors of dimension 32

#model.add(Dropout(0.3))
#model.add(Flatten())
model.add(Dense(word_set_len, activation='softmax'))

model.compile(loss='categorical_crossentropy',
              #optimizer='rmsprop',
              optimizer='adam',
              metrics=['accuracy'])

model.summary()

model_path = "output/poetry.keras.model.6_layouts_lstm"

def train(new_one=False):
    while True:
        if new_one == False:
            if os.path.exists(model_path):
                model.load_weights(model_path)
        model.fit(inputs, outputs_z, batch_size=300,epochs=4)
        #model.fit(x_train, y_train,
        #          batch_size=64, nb_epoch=1,
        #          validation_data=(x_val, y_val))
        model.save_weights(model_path)

def do_it():
    if os.path.exists(model_path):
        model.load_weights(model_path)

    word = '['
    for i in range(0,50):
        word_id = np.array([(word_map[word],i)])
        ret = model.predict(word_id,1)
        index = np.array(ret).argmax()
        word = word_list[index]
        if word == u']':
            break
        print(word.encode("utf-8"),)


def do_it2(heads):
    if os.path.exists(model_path):
        model.load_weights(model_path)

    for word in heads:
        for i in range(1,50):
            print(word.encode("utf-8"),)
            word_id = np.array([(word_map[word],i)])
            ret = model.predict(word_id)
            index = np.array(ret).argmax()
            word = word_list[index]
            if word == u']':
                break
        print("")

train()
do_it2(u"一二三四五他法反对和图画梅花赋健康")
do_it()
print("done")

