#!/usr/bin/env python

"""
Minimal character-level Vanilla RNN model. Written by Andrej Karpathy (@karpathy)
BSD License

todo:
-1. Paper Titles
0. Chinese Characters (Bitmap, Wenquanyi <- because it is small)
1. new year
2. English (Deutsch, Chinese, ...) word creator (linguistics)
"""

"""
def check(s):
	import getpass, sys
	if getpass.getuser() == s:
		sys.path.append('/home/aistudio/external-libraries')
check('aistudio')
"""


import os

def has_GPU():
	return os.name != 'nt' and 'nvidia' in ''.join(os.listdir('/dev'))

GPU = has_GPU()
if GPU:
	# print('GPU detected.')
	import cupy as np
	# import jax.numpy as np
	# import minpy.numpy as np
else:
	import numpy as np

from abbrs import read_file, write_file, get_time_str, get_yyyymmdd_time_str, json_dump, load_json, pack_dict, load_helper

CORPUS_NAME = 'ba'
MODEL_DIR = f'../{CORPUS_NAME}-model'
def MODEL_DIR_PLUS(x): return os.path.join(MODEL_DIR, x)
LOG_DIR = f'../{CORPUS_NAME}-log'
def LOG_DIR_PLUS(x): return os.path.join(LOG_DIR, x)

LOG_INTERVAL = 100
if GPU: # GPU: 100 iter ~ 2s
	LOG_INTERVAL *= 120
SAVE_INTERVAL = LOG_INTERVAL
SAVE_INTERVAL *= 6 if GPU else 30

class parameters:
	def __init__(self):
		# hyperparameters
		self.hidden_size = 100 # size of hidden layer of neurons
		self.seq_length = 25 # number of steps to unroll the RNN for
		self.learning_rate = 1e-1

	ARGLIST = 'Wxh Whh Why bh by mWxh mWhh mWhy mbh mby hprev'
	def arg_filename(i):
		return MODEL_DIR_PLUS('{}.npy'.format(i))
	MISCARG_LIST = 'n p smooth_loss'

	MODEL_JSON_FILENAME = MODEL_DIR_PLUS('model.json')
	MODEL_MISC_FILENAME = MODEL_DIR_PLUS('misc.json')
	
	def load(self):
		arg = load_json(parameters.MODEL_JSON_FILENAME)
		load_helper(self, parameters.ARGLIST,
		#	lambda i: np.load(parameters.arg_filename(i)))
			lambda i: np.array(arg[i]))
		model_misc = load_json(parameters.MODEL_MISC_FILENAME)
		load_helper(self, parameters.MISCARG_LIST, lambda i: model_misc[i])

	def save(self):
		"""for i in parameters.arglist.split():
			np.save(parameters.arg_filename(i), self.__dict__[i])"""
		arg = { i: self.__dict__[i].tolist() for i in parameters.ARGLIST.split() }
		json_dump(parameters.MODEL_JSON_FILENAME, arg)

		misc_arg = pack_dict(self, parameters.MISCARG_LIST)
		# print(misc_arg)
		json_dump(parameters.MODEL_MISC_FILENAME, misc_arg)

class model:
	arglist = 'data data_size vocab_size char_to_ix ix_to_char'
	
	def __init__(self):
		text = load_json(MODEL_DIR_PLUS('text.json'))
		load_helper(self, model.arglist, lambda i: text[i])
		self.parameters = parameters()

		self.output_filename = LOG_DIR_PLUS(f'{get_yyyymmdd_time_str()}.json')
		self.output = [ ]

		self.parameters.load()

	def sample(self, seed_ix, n):
		""" 
		sample a sequence of integers from the model 
		h is memory state, seed_ix is seed letter for first time step
		"""
		x = np.zeros((self.vocab_size, 1))
		x[seed_ix] = 1
		h = self.parameters.hprev
		for t in range(n):
			h = np.tanh(np.dot(self.parameters.Wxh, x) + np.dot(self.parameters.Whh, h) + self.parameters.bh)
			y = np.dot(self.parameters.Why, h) + self.parameters.by
			p = np.exp(y) / np.sum(np.exp(y))
			
			if GPU:
				import numpy as np2
				ix = np2.random.choice(list(range(self.vocab_size)), p=p.ravel().get())
			else:
				ix = np.random.choice(list(range(self.vocab_size)), p=p.ravel())

			x = np.zeros((self.vocab_size, 1))
			x[ix] = 1
			yield self.ix_to_char[str(ix)]

	def lossFun(self, inputs, targets):
		"""
		inputs,targets are both list of integers.
		hprev is Hx1 array of initial hidden state
		returns the loss, gradients on model parameters, and last hidden state
		"""
		xs, hs, ys, ps = {}, {}, {}, {}
		def np_copy(x): return np.array(x.tolist())
		hs[-1] = np_copy(self.parameters.hprev)
		loss = 0
		# forward pass
		for t in range(len(inputs)):
			xs[t] = np.zeros((self.vocab_size,1)) # encode in 1-of-k representation
			xs[t][inputs[t]] = 1
			hs[t] = np.tanh(np.dot(self.parameters.Wxh, xs[t]) + np.dot(self.parameters.Whh, hs[t-1]) + self.parameters.bh) # hidden state
			ys[t] = np.dot(self.parameters.Why, hs[t]) + self.parameters.by # unnormalized log probabilities for next chars
			ps[t] = np.exp(ys[t]) / np.sum(np.exp(ys[t])) # probabilities for next chars

			if GPU:
				def np_log(x):
					return np.log(np.array([ x ]))[0]
				loss += 0 - np_log(ps[t][targets[t],0]) # softmax (cross-entropy loss)
			else:
				loss += -np.log(ps[t][targets[t],0])
		# backward pass: compute gradients going backwards
		dWxh, dWhh, dWhy = np.zeros_like(self.parameters.Wxh), np.zeros_like(self.parameters.Whh), np.zeros_like(self.parameters.Why)
		dbh, dby = np.zeros_like(self.parameters.bh), np.zeros_like(self.parameters.by)
		dhnext = np.zeros_like(hs[0])
		for t in reversed(range(len(inputs))):
			dy = np.copy(ps[t])
			dy[targets[t]] -= 1 # backprop into y. see http://cs231n.github.io/neural-networks-case-study/#grad if confused here
			dWhy += np.dot(dy, hs[t].T)
			dby += dy
			dh = np.dot(self.parameters.Why.T, dy) + dhnext # backprop into h
			dhraw = (1 - hs[t] * hs[t]) * dh # backprop through tanh nonlinearity
			dbh += dhraw
			dWxh += np.dot(dhraw, xs[t].T)
			dWhh += np.dot(dhraw, hs[t-1].T)
			dhnext = np.dot(self.parameters.Whh.T, dhraw)
		for dparam in [dWxh, dWhh, dWhy, dbh, dby]:
			np.clip(dparam, -5, 5, out=dparam) # clip to mitigate exploding gradients
		return loss, dWxh, dWhh, dWhy, dbh, dby, hs[len(inputs)-1]

	def train(self):
		# prepare inputs (we're sweeping from left to right in steps seq_length long)
		if self.parameters.p+self.parameters.seq_length+1 >= len(self.data) or self.parameters.n == 0: 
			self.parameters.hprev = np.zeros((self.parameters.hidden_size, 1)) # reset RNN memory
			self.parameters.p = 0 # go from start of data
		inputs = [self.char_to_ix[ch] for ch in self.data[self.parameters.p:self.parameters.p+self.parameters.seq_length]]
		targets = [self.char_to_ix[ch] for ch in self.data[self.parameters.p+1:self.parameters.p+self.parameters.seq_length+1]]

		# sample from the model now and then
		if self.parameters.n % LOG_INTERVAL == 0:
			txt = ''.join(self.sample(inputs[0], 200))
			self.output.append([ txt ]) # print('---\n{}\n---'.format(txt))

		# forward seq_length characters through the net and fetch gradient
		loss, dWxh, dWhh, dWhy, dbh, dby, hprev_new = self.lossFun(inputs, targets)
		self.parameters.smooth_loss = self.parameters.smooth_loss * 0.999 + loss * 0.001
		if GPU: self.parameters.smooth_loss = float(self.parameters.smooth_loss)
		print('\r{}'.format(self.parameters.smooth_loss), end='')
		if self.parameters.n % LOG_INTERVAL == 0:
			self.output[-1].extend([ get_time_str(), self.parameters.n, self.parameters.smooth_loss ]) # print progress
			# print('iter {}, loss: {}'.format(self.parameters.n, self.parameters.smooth_loss))
		
		# perform parameter update with Adagrad
		for param, dparam, mem in zip([self.parameters.Wxh, self.parameters.Whh, self.parameters.Why, self.parameters.bh, self.parameters.by], 
										[dWxh, dWhh, dWhy, dbh, dby], 
										[self.parameters.mWxh, self.parameters.mWhh, self.parameters.mWhy, self.parameters.mbh, self.parameters.mby]):
			mem += dparam * dparam
			param += -self.parameters.learning_rate * dparam / np.sqrt(mem + 1e-8) # adagrad update
			
		self.parameters.hprev = hprev_new
		self.parameters.p += self.parameters.seq_length # move data pointer
		self.parameters.n += 1 # iteration counter 
		
		if self.parameters.n % SAVE_INTERVAL == 0:
			self.save_progress()

	def save_progress(self):
		try:
			self.parameters.save()
		except KeyboardInterrupt:
			print('Saving parameters')
			pass
			
		try:
			# print(self.output)
			json_dump(self.output_filename, self.output)
		except KeyboardInterrupt:
			print('Dumping logs')
			pass

def train(m, n):
	for i in range(n):
		m.train()

def main():
	import speed_measurement
	m = model()
	measure = speed_measurement.measure(m)
	
	try:
		while True:
			m.train()
	except KeyboardInterrupt:
		print(f'\nTraining speed: { measure.timeit() } it/s')
		print('Saving progress ...')
		m.save_progress()

if __name__ == '__main__':
	main()