from src import Preproduction as Pre

id, ch = Pre.load("results/hanzi_2_one_hot.data")
cnt = len(ch)
V = 10000

print("cnt = " + str(cnt))
print("V = " + str(V))

from src.EmbeddingGloVe import genX as genX
from src.EmbeddingGloVe import Glove as Glove

pathBase = 'dataset/chat_corpus/clean_chat_corpus/'
paths = [
    #"chatterbot.tsv",
    #"douban_single_turn.tsv",
    #"ptt.tsv",
    "qingyun.tsv",
    #"subtitle.tsv",
    #"tieba.tsv",
    #"weibo.tsv"
]
steps = [
    1, 
    300,
    #40,
    10,
    #274,
    #232,
    #443
] 
fullPaths = [pathBase + path for path in paths]

import numpy as np

glove = Glove(V, 128)
glove.load("results/embedding100.model")

#i = glove.eval(id[c])

import random

# 从文档当中，采样batch个有监督文本串
# X=seqLen个汉字, Y=下一个汉字
# \t \n 视为END（id=0）
def Sample(rows, batch, seqLen, visualize=False):
    X = []
    Y = []
    
    for row in random.sample(rows, min(batch, len(rows))):
        x, y = row.strip("\n").split('\t')
        x = x[-seqLen:]
        y = y[:seqLen]
        if visualize:
            print(x + "> " + y)

        x = [id[i] if id[i] < V else 0 for i in x]
        y = [id[i] if id[i] < V else 0 for i in y]

        X.append(x)
        Y.append(y)

    return X, Y

# 样例
with open(fullPaths[0], "r") as f:
    rows = f.readlines()
    x, y = Sample(rows, 1, 10, True)

from src.GeneratorLSTMv5 import LSTM

embDim = 128
hidDim = 512
seqLen = 30

TrainSize = 10000
baseBatchSize = 128

lstm = LSTM(seqLen, embDim, hidDim, V, glove)
#lstm.load("results/V5LSTM002.model")

def genTrainData():
    rows = []
    for j in range(len(paths)):
        with open(fullPaths[j], "r") as f:
            tmp = f.readlines()
            rows += random.sample(tmp, min(TrainSize, len(tmp)))
    return rows

# 训练
globalStep = 0
batchSize = baseBatchSize
rows = genTrainData()
for i in range(1, 11):
    print("Epoch %d." % i)
    print("BatchSize: %d." % batchSize)
    print("LR: %f." % lstm.lr)
    up = 0
    down = 0
    N = len(rows) // batchSize
    print("Total steps: %d" % N)
    for step in range(1, N+1):
        print("Epoch %d, Step %d." % (i, step))
        X, Y = Sample(rows, batchSize, seqLen)
        loss = lstm.trainStep(X, Y).detach().numpy()
        up += loss
        down += 1
        print('{{"metric": "loss", "value": {}, "step": {}}}'.format(loss, step))
        if step % 100 == 0:
            lstm.mulLR(0.7)
            print("LR: %f." % lstm.lr)
        print("------------------------------")
    print("Avg loss: %f" % (up / down))
    if i % 1 == 0:
        print("Saving...")
        lstm.save("results/V5LSTM%03d.model", i)
    print("==============================\n")

    