#!/usr/bin/env python
#-*- coding:utf-8 -*-
import sys
import os
import numpy as np
import scipy
from scipy.special import expit

## x*y  pointwise multiply              ###
## np.multiply(x, y) pointwise multiply ###
## np.dot(x, y)  matrix multiply        ###
def sigmoid(x):
  return expit(x)

def sigmoid_grad(x):
  s = expit(x)
  return s*(1 - s)

def tanh(x):
  return np.tanh(x)

def tanh_grad(x):
  return 1 - np.tanh(x)**2

def Sample(h, s, n, start, idx_to_char, char_to_idx):
  global w_c, u_c, b_c;
  global w_i, u_i, b_i;
  global w_f, u_f, b_f;
  global w_o, u_o, b_o;
  global w_p, b_p;

  xs = []
  vs = len(idx_to_char)

  x = np.zeros((vs, 1))
  x[char_to_idx[start]] = 1

  r = [start]
  for t in xrange(n):
    c_hat = np.dot(w_c, x) + np.dot(u_c, h) + b_c
    c     = tanh(c_hat)
    i_hat = np.dot(w_i, x) + np.dot(u_i, h) + b_i
    i     = sigmoid(i_hat)
    f_hat = np.dot(w_f, x) + np.dot(u_f, h) + b_f
    f     = sigmoid(f_hat)
    o_hat = np.dot(w_o, x) + np.dot(u_o, h) + b_o
    o     = sigmoid(o_hat)
    ### update S and H ###
    s     = np.multiply(c, i) + np.multiply(s, f)
    h     = np.multiply(o, tanh(s))
    logits  = np.dot(w_p, h) + b_p
    exp_sum = np.sum(np.exp(logits))
    prob    = np.exp(logits) / exp_sum
    ### update next x ####
    ix      = np.random.choice(range(vs), p = prob.ravel())
    x       = np.zeros((vs, 1))
    x[ix]   = 1.0
    r.append(idx_to_char[ix])
  return r

def GetLoss(X, Y, h_prev, s_prev):
  global w_c, u_c, b_c;
  global w_i, u_i, b_i;
  global w_f, u_f, b_f;
  global w_o, u_o, b_o;
  global w_p, b_p;

  T = len(X)  
  H = {}
  S = {}
  H[-1] = np.copy(h_prev)
  S[-1] = np.copy(s_prev)

  c_hat = {}
  i_hat = {}
  f_hat = {}
  o_hat = {}

  c = {}
  i = {}
  f = {}
  o = {}

  logits = {}
  prob = {}

  # forward 
  # 0 ~ (T-1)
  loss = 0.0
  for t in range(T):
    ## new state
    c_hat[t] = np.dot(w_c, X[t]) + np.dot(u_c, H[t - 1]) + b_c
    c[t] =    tanh(c_hat[t])
    ## input gate
    i_hat[t] = np.dot(w_i, X[t]) + np.dot(u_i, H[t - 1]) + b_i
    i[t] = sigmoid(i_hat[t])
    ## forget gate
    f_hat[t] = np.dot(w_f, X[t]) + np.dot(u_f, H[t - 1]) + b_f
    f[t] = sigmoid(f_hat[t])
    ## output gate
    o_hat[t] = np.dot(w_o, X[t]) + np.dot(u_o, H[t - 1]) + b_o
    o[t] = sigmoid(o_hat[t])
    ## update state and H ###
    S[t] = np.multiply(c[t], i[t]) + np.multiply(S[t-1], f[t])
    H[t] = np.multiply(o[t], tanh(S[t]))
    
    ## prob[t] just for gradient caculation ##
    logits[t] = np.dot(w_p, H[t]) + b_p
    exp_sum = np.sum(np.exp(logits[t]))
    prob[t] = np.exp(logits[t]) / exp_sum
    loss += np.dot(Y[t].T, -np.log(prob[t]))

  ### gradient caculated by one sample ####
  h_grad = np.zeros_like(h_prev)
  s_grad = np.zeros_like(s_prev)

  w_c_grad = np.zeros_like(w_c)
  u_c_grad = np.zeros_like(u_c)
  b_c_grad = np.zeros_like(b_c)

  w_i_grad = np.zeros_like(w_i)
  u_i_grad = np.zeros_like(u_i)
  b_i_grad = np.zeros_like(b_i)

  w_f_grad = np.zeros_like(w_f)
  u_f_grad = np.zeros_like(u_f)
  b_f_grad = np.zeros_like(b_f)

  w_o_grad = np.zeros_like(w_o)
  u_o_grad = np.zeros_like(u_o)
  b_o_grad = np.zeros_like(b_o)

  w_p_grad = np.zeros_like(w_p)
  b_p_grad = np.zeros_like(b_p)

  # (T-1) -> 0
  for t in reversed(xrange(T)):
    logits_grad = np.copy(prob[t]) - Y[t]
    w_p_grad     += np.dot(logits_grad, H[t].T)
    b_p_grad     += logits_grad
    
    ### h_grad = (grad from loss +  grad from next o/f/i)
    ### s_grad = (grad from h    +  grad from next s)
    h_grad       += np.dot(w_p.T, logits_grad)
    s_grad       += np.multiply(h_grad, np.multiply(o[t], tanh_grad(S[t])))

    c_grad        = np.multiply(s_grad, i[t])
    i_grad        = np.multiply(s_grad, c[t])
    f_grad        = np.multiply(s_grad, S[t-1])
    o_grad        = np.multiply(h_grad, tanh(S[t]))

    c_hat_grad    = np.multiply(c_grad, tanh_grad(c_hat[t]))
    w_c_grad     += np.dot(c_hat_grad, X[t].T)
    u_c_grad     += np.dot(c_hat_grad, H[t-1].T)
    b_c_grad     += c_hat_grad

    i_hat_grad    = np.multiply(i_grad, sigmoid_grad(i_hat[t]))
    w_i_grad     += np.dot(i_hat_grad, X[t].T)
    u_i_grad     += np.dot(i_hat_grad, H[t-1].T)    
    b_i_grad     += i_hat_grad

    f_hat_grad    = np.multiply(f_grad, sigmoid_grad(f_hat[t]))
    w_f_grad     += np.dot(f_hat_grad, X[t].T)
    u_f_grad     += np.dot(f_hat_grad, H[t-1].T)
    b_f_grad     += f_hat_grad
    
    o_hat_grad    = np.multiply(o_grad, sigmoid_grad(o_hat[t]))
    w_o_grad     += np.dot(o_hat_grad, X[t].T)
    u_o_grad     += np.dot(o_hat_grad, H[t-1].T)
    b_o_grad     += o_hat_grad

    ### pass S_grad to prev stage, never used again ###
    ### pass h_grad to prev state, never used again ###
    s_grad        = np.multiply(s_grad, f[t])
    h_grad        = np.dot(u_c.T, c_hat_grad) + \
                    np.dot(u_o.T, o_hat_grad) + \
                    np.dot(u_i.T, i_hat_grad) + \
                    np.dot(u_f.T, f_hat_grad)

  return (w_c_grad, u_c_grad, b_c_grad,
          w_i_grad, u_i_grad, b_i_grad,
          w_f_grad, u_f_grad, b_f_grad,
          w_o_grad, u_o_grad, b_o_grad,
          w_p_grad, b_p_grad,
          H[T-1], S[T-1], loss)

def GenBatchText(data, seq_len):
  b_cnt = (len(data) - 1)/seq_len
  x_off = 0
  y_off = x_off + 1
  for b_off in xrange(b_cnt):
    x = data[x_off : x_off + seq_len]
    y = data[y_off : y_off + seq_len]
    x_off += seq_len
    y_off = x_off + 1
    yield(x, y)


if __name__ == "__main__":
  data = open('input.txt', 'r').read()
  chars = list(set(data))
  K = len(chars)
  print "total data len: %d, uniq char len: %d" % (len(data), K)
  char_to_idx = { ch: i for i, ch in enumerate(chars)}
  idx_to_char = { i: ch for i, ch in enumerate(chars)}

  d = 64
  seq_len = 42
  sample_sen_len = 42
  lr = 0.01
  num_epoch = 50000

  ##### parameters need to be trainded #####
  w_c = np.random.randn(d, K)*0.01
  m_w_c = np.zeros_like(w_c)
  u_c = np.random.randn(d, d)*0.01
  m_u_c = np.zeros_like(u_c)
  b_c = np.zeros((d, 1))
  m_b_c = np.zeros_like(b_c)

  w_i = np.random.randn(d, K)*0.01
  m_w_i = np.zeros_like(w_i)
  u_i = np.random.randn(d, d)*0.01
  m_u_i = np.zeros_like(u_i)
  b_i = np.zeros((d, 1))
  m_b_i = np.zeros_like(b_i)

  w_f = np.random.randn(d, K)*0.01
  m_w_f = np.zeros_like(w_f)
  u_f = np.random.randn(d, d)*0.01
  m_u_f = np.zeros_like(u_f)
  b_f = np.zeros((d, 1))
  m_b_f = np.zeros_like(b_f)

  w_o = np.random.randn(d, K)*0.01
  m_w_o = np.zeros_like(w_o)
  u_o = np.random.randn(d, d)*0.01
  m_u_o = np.zeros_like(u_o)
  b_o = np.zeros((d, 1))
  m_b_o = np.zeros_like(b_o)

  w_p = np.random.randn(K, d)*0.01
  m_w_p = np.zeros_like(w_p)
  b_p = np.zeros((K, 1))
  m_b_p = np.zeros_like(b_p)
  
  smooth_loss = -np.log(1.0/K)*seq_len # loss at iteration 0
  for epoch in range(num_epoch):
    iter = 0
    h_prev = np.zeros((d, 1))
    s_prev = np.zeros((d, 1))

    for x, y in GenBatchText(data, seq_len):
      #h_prev = np.zeros((d, 1))
      #s_prev = np.zeros((d, 1))

      iter += 1
      org = np.zeros((K, 1))
      vx = []
      for cx in (x):
        vx.append(org.copy())
        vx[-1][char_to_idx[cx]] = 1.0

      vy = []
      for cy in (y):
        vy.append(org.copy())
        vy[-1][char_to_idx[cy]] = 1.0

      ### train once ####
      h_old = h_prev
      s_old = s_prev
      (w_c_grad, u_c_grad, b_c_grad,
       w_i_grad, u_i_grad, b_i_grad,
       w_f_grad, u_f_grad, b_f_grad,
       w_o_grad, u_o_grad, b_o_grad,
       w_p_grad, b_p_grad,
       h_prev,   s_prev,   loss     ) = GetLoss(vx, vy, h_prev, s_prev)
      smooth_loss = smooth_loss * 0.999 + loss * 0.001
      w = [     w_c,      u_c,      b_c,
                w_i,      u_i,      b_i,
                w_f,      u_f,      b_f, 
                w_o,      u_o,      b_o,
                w_p,      b_p]
      g = [w_c_grad, u_c_grad, b_c_grad,
           w_i_grad, u_i_grad, b_i_grad,
           w_f_grad, u_f_grad, b_f_grad,
           w_o_grad, u_o_grad, b_o_grad,
           w_p_grad, b_p_grad]
      m = [   m_w_c,     m_u_c,    m_b_c,
              m_w_i,     m_u_i,    m_b_i,
              m_w_f,     m_u_f,    m_b_f,
              m_w_o,     m_u_o,    m_b_o,
              m_w_p,     m_b_p]

      ### do gradient check ####
      if False:
        num_checks, delta = 5, 1e-6
        from random import uniform
        for param, grad, name in zip(w, g, ['wc', 'uc', 'bc', 'wi', 'ui', 'bi', 'wf', 'uf', 'bf', 'wo', 'uo', 'bo', 'wp', 'bp']):
          s0 = param.shape
          s1 = grad.shape
          assert s0 == s1, 'Error dims match'
          print "------------> %s -------------> " % name
          for i in xrange(num_checks):
            ri = int(uniform(0, param.size))
            old_val = param.flat[ri]
            param.flat[ri] = old_val + delta
            _,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,cg0 = GetLoss(vx, vy, h_old, s_old)
            param.flat[ri] = old_val - delta
            _,_,_,_,_,_,_,_,_,_,_,_,_,_,_,_,cg1 = GetLoss(vx, vy, h_old, s_old)
            param.flat[ri] = old_val
            grad_analytic = grad.flat[ri]
            grad_numerical = (cg0 - cg1) / ( 2 * delta )
            if abs(grad_numerical + grad_analytic) < 1e-6:
              continue
            rel_error = abs(grad_analytic - grad_numerical) / abs(grad_numerical + grad_analytic)
            print '(%f %f) => %e' % (grad_numerical, grad_analytic, rel_error)
      
      ## do update ##
      for param, grad, mem in zip(w, g, m):
        mem += grad*grad
        param += -lr*grad/np.sqrt(mem + 1e-8)

      if iter % 5000 == 0:
        print "[epoch_%d]: iter %d: %f" % (epoch, iter, smooth_loss)
        r = Sample(h_prev, s_prev, sample_sen_len, idx_to_char[np.random.randint(0, K)], idx_to_char, char_to_idx)
        print "############ \n" + "".join(r) + "\n############"

    if epoch % 100 == 0:
      print "[epoch_%d]: iter %d: %f" % (epoch, iter, smooth_loss)
      start_symbol = idx_to_char[np.random.randint(0, K)]
      r = Sample(np.zeros((d, 1)), np.zeros((d, 1)), sample_sen_len, start_symbol, idx_to_char, char_to_idx)
      print "="*sample_sen_len
      print "".join(r)
      print "="*sample_sen_len