import numpy as np
import abbrs, train

# CORPUS_PATH = r'..\yedu-spider\yedu-corpus.htm'
CORPUS_PATH = r'G:\Users\01\Downloads\Documents\二十四史全文白话全集+清史稿 TXT\merged.txt'

class parameters(train.parameters):
	def __init__(self, vocab_size):
		super().__init__()
		self.n, self.p = 0, 0
		self.vocab_size = vocab_size
		self.smooth_loss = -np.log(1.0/self.vocab_size)*self.seq_length # loss at iteration 0
		
		self.Wxh = np.random.randn(self.hidden_size, self.vocab_size)*0.01 # input to hidden
		self.Whh = np.random.randn(self.hidden_size, self.hidden_size)*0.01 # hidden to hidden
		self.Why = np.random.randn(self.vocab_size, self.hidden_size)*0.01 # hidden to output
		self.bh = np.zeros((self.hidden_size, 1)) # hidden bias
		self.by = np.zeros((self.vocab_size, 1)) # output bias
		
		self.mWxh, self.mWhh, self.mWhy = np.zeros_like(self.Wxh), np.zeros_like(self.Whh), np.zeros_like(self.Why)
		self.mbh, self.mby = np.zeros_like(self.bh), np.zeros_like(self.by) # memory variables for Adagrad
		
		self.hprev = np.zeros((self.hidden_size, 1)) # reset RNN memory
		
		self.save()
		
class model(train.model):
	def __init__(self):
		# data I/O
		self.data = abbrs.read_file(CORPUS_PATH)
		chars = list(set(self.data))
		self.data_size, self.vocab_size = len(self.data), len(chars)
		# print('data has %d characters, %d unique.' % (data_size, vocab_size))
		self.char_to_ix = { ch:i for i, ch in enumerate(chars) }
		self.ix_to_char = { i:ch for i, ch in enumerate(chars) }
		abbrs.json_dump(train.MODEL_DIR_PLUS('text.json'), abbrs.pack_dict(self, model.arglist))
		
		p = parameters(self.vocab_size)

def mkdirs():
	import os
	try:
		for i in 'log model'.split():
			os.mkdir(i)
	except FileExistsError:
		pass

if __name__ == '__main__':
	mkdirs()
	model()