# coding=utf-8

import sys
import time
import tensorflow as tf
import numpy as np

from Data import Data
from Config import Config as config

class Poetry():
    def __init__(self,data,config,isTrain=True):
        self.data = data

        self.config = config 
        self.model = self.config["network_type"]
       
        self.isTrain = isTrain
        if not self.isTrain:
            self.config["batch_size"] = 1

        self.init_network()

    def init_network(self):
        self.net = {}
        self.net["session"] = tf.Session()
        self.net["input"] = tf.placeholder(dtype=tf.int32,shape=[self.config["batch_size"],None],name="input2")
        self.net["output"] = tf.placeholder(dtype=tf.int32,shape=[self.config["batch_size"],None],name="output")

        self.net["rnn"] = {}
        self.net["rnn"]["output"],self.net["rnn"]["state"],self.net["rnn"]["softmax_output"],self.net["rnn"]["cell"],self.net["rnn"]["init_state"] = self.rnn(self.net["input"])
        self.net["train_op"] = self.loss(self.net["rnn"]["softmax_output"])
        self.net["loss"] = self.loss
        self.net["saver"] = tf.train.Saver()
        if self.isTrain: self.pre_train() 

    def rnn(self,input_data):
        with tf.name_scope("rnn") as scope:
            def cell_fun(model,rnn_size):
                if model == "rnn":
                    return tf.contrib.rnn.core_rnn_cell.BasicRNNCell(rnn_size)
                elif model == "gru":
                    return tf.contrib.rnn.core_rnn_cell.GRUCell(rnn_size)
                elif model == "lstm":
                    return tf.contrib.rnn.core_rnn_cell.BasicLSTMCell(rnn_size,state_is_tuple=True)
            
            self.rnn = {"input":input_data}
            self.rnn["config"] = self.config["rnn"]
            with tf.name_scope("rnn_init") as scope:
                self.rnn["cell"] = cell_fun(self.model,self.rnn["config"]["rnn_size"])
                self.rnn["cell"] = tf.contrib.rnn.core_rnn_cell.MultiRNNCell([self.rnn["cell"]] * self.rnn["config"]["num_layers"],state_is_tuple = True)
                self.rnn["init_state"] = self.rnn["cell"].zero_state(self.config["batch_size"],tf.float32)
    
            with tf.variable_scope("rnnlm"):
                # word embed
                self.rnn["embedding"] = tf.get_variable("embedding",[self.data.GetWordsLen()+1,self.rnn["config"]["rnn_size"]])
                self.rnn["input_"] = tf.nn.embedding_lookup(self.rnn["embedding"],self.rnn["input"]) 
    
            with tf.name_scope("rnnnw") as scope:
                outputs = []
                self.rnn["state"] = self.rnn["init_state"]
                with tf.variable_scope("rnnnw"):
                    outputs,self.rnn["state"] = tf.nn.dynamic_rnn(self.rnn["cell"],self.rnn["input_"],initial_state=self.rnn["init_state"])
                self.rnn["output"] = tf.reshape(tf.concat(outputs,1),shape=[-1,self.rnn["config"]["rnn_size"]])
            
            with tf.name_scope("softmax") as scope:
                self.rnn["softmax_w"] = tf.get_variable("softmax_w",[self.rnn["config"]["rnn_size"],self.data.GetWordsLen()+1]) 
                self.rnn["softmax_b"] = tf.get_variable("softmax_b",[self.data.GetWordsLen()+1])
                self.rnn["softmax_output"] = tf.nn.softmax(tf.matmul(self.rnn["output"],self.rnn["softmax_w"]) + self.rnn["softmax_b"])
    
            return self.rnn["output"],self.rnn["state"],self.rnn["softmax_output"],self.rnn["cell"],self.rnn["init_state"]

    def loss(self,output):
        with tf.name_scope("loss") as scope:
            self.loss = {}
            logits = tf.reshape(output,shape=[self.config["batch_size"],-1,self.data.GetWordsLen()+1])
            targets = tf.reshape(self.net["output"],shape=[self.config["batch_size"],-1])
            self.loss["loss"] = tf.contrib.seq2seq.sequence_loss(logits,targets,tf.ones_like(targets,dtype=tf.float32))
            self.loss["loss"] = tf.reduce_mean(self.loss["loss"])
            self.loss["learning_rate"] = tf.Variable(self.config["learning_rate"],trainable=False)
            self.loss["optimizer"] = tf.train.AdamOptimizer(self.loss["learning_rate"])
            self.loss["train_op"] = self.loss["optimizer"].minimize(self.loss["loss"]) 
            return self.loss["train_op"]

    def pre_train(self):
        self.net["session"].run(tf.global_variables_initializer())
        # recovery

    def train(self):
        sess = self.net["session"]
        for epoch in range(self.config["epoch_num"]):
            sess.run(tf.assign(self.loss["learning_rate"],self.loss["learning_rate"]*self.config["learning_rate_decay"])) 
            overflow = False
            while overflow == False:
                x_batch,y_batch,overflow = self.data.GetNext(self.config["batch_size"])
                train_loss,_,_ = sess.run([self.loss["loss"],self.net["rnn"]["state"],self.loss["train_op"]],feed_dict={self.net["input"]:x_batch,self.net["output"]:y_batch}) 
            if epoch%1 == 0:
                print "time:%f,epoch:%d,loss:%f" % (time.time(),epoch,train_loss)
            # save the model
            self.net["saver"].save(sess,"saver/train_%d.model" % epoch)

    def test(self,file_name,init_word="["):
        def to_word(weights):
            t = np.cumsum(weights)
            s = np.sum(weights)
            sample = int(np.searchsorted(t, np.random.rand(1)*s))
            return self.data.GetWordFromNum(sample)

        sess = self.net["session"]
        self.net["saver"].restore(sess,file_name)

        # compute
        state_ = sess.run(self.net["rnn"]["cell"].zero_state(1,tf.float32))
        poem = ''

        x_ = np.array([list(map(self.data.GetNumFromWord,init_word))])
        [softmax_output_, state_] = sess.run([self.net["rnn"]["softmax_output"],self.net["rnn"]["state"]],feed_dict = {self.net["input"]:x_})
        #print "x_:",x_,"word:",word
        #print "x_:",repr(x_),"word:",repr(word)
        word = to_word(softmax_output_)
        while word != ']':
        #for i in range(100):
            poem += word
            #x_ = np.zeros((1,1))
            #x_[0,0] = self.data.GetNumFromWord(word)
            x_ = np.array([list(map(self.data.GetNumFromWord,word))])
            #print "x_:",x_,"word:",word
            #print "x_:",repr(x_),"word:",repr(word)
            [softmax_output_, state_] = sess.run([self.net["rnn"]["softmax_output"],self.net["rnn"]["state"]],feed_dict = {self.net["input"]:x_, self.net["rnn"]["init_state"]:state_})
            word = to_word(softmax_output_)
	return poem
  
        
         

usage = 'Usage:\n\tpython rnn.py train -- to train the rnn network.\n\tpython rnn.py test model_filename -- to create a poem'
if __name__ == "__main__":
    if len(sys.argv) <= 1:
        print usage
        sys.exit()
    data = Data()
    if sys.argv[1] == "train":
        poetry = Poetry(data=data,config=config)
        poetry.train()
    elif sys.argv[1] == "test":
        poetry = Poetry(data=data,config=config,isTrain=False)
        if len(sys.argv) > 3:
            print poetry.test(sys.argv[2],init_word=sys.argv[3].decode("utf-8"))
        else:
            print poetry.test(sys.argv[2]) 
    else:
	print usage
sys.exit()
