from src import Preproduction as Pre

id, ch = Pre.load("results/hanzi_2_one_hot_freq.data")
cnt = len(ch)
V = 10000
#V = cnt

print("cnt = " + str(cnt))
print("V = " + str(V))

pathBase = 'dataset/chat_corpus/clean_chat_corpus/'
paths = [
    "chatterbot.tsv",
    #"douban_single_turn_no_space.tsv",
    #"ptt.tsv",
    "qingyun.tsv",
    "subtitle.tsv",
    #"tieba.tsv",
    "weibo.tsv"
]
fullPaths = [pathBase + path for path in paths]

import numpy as np

import random

# 从文档当中，采样batch个有监督文本串
# X=seqLen个汉字, Y=下一个汉字
# \t \n 视为END（id=0）
def Sample(rows, batch, visualize=False):
    X = []
    Y = []
    
    for row in random.sample(rows, min(batch, len(rows))):
        x, y = row.strip("\n").split('\t')
        if visualize:
            print(x + "> " + y)

        x = [id[i] if id[i] < V else 0 for i in x]
        y = [id[i] if id[i] < V else 0 for i in y]

        X.append(x)
        Y.append(y)

    return X, Y

from src.GeneratorLSTMv12 import LSTMGAN

embDim = 128
hidDim = 512
conDim = 150

TrainSize = 5000
baseBatchSize = 512
#TrainSize = 16
#baseBatchSize = 8

lstm = LSTMGAN(embDim, hidDim, conDim, V)
#epoch = lstm.load("results/V12-LSTM-highflu-loss-S000.model")
epoch = lstm.load_pretrain("results/V12-LSTM-P000.model")
#epoch = 0

def genTrainData():
    rows = []
    for j in range(len(paths)):
        with open(fullPaths[j], "r") as f:
            tmp = f.readlines()
            rows += random.sample(tmp, min(TrainSize, len(tmp)))
    return rows

# 训练
globalStepScr = 0
globalStepGen = 0
batchSize = baseBatchSize
rows = genTrainData()

def show():
    print("------------------------------")
    query, ans = Sample(rows, 1, True)
    lstm.updateEmbedding()
    gen = lstm.eval(query[0], False, 30)
    res = ''
    for c in gen:
        res += ch[c] if c > 1 else ''
    print("Gen Argmax %d > %s" % (len(res), res))

    gen, item = lstm.eval(query[0], False, 30, True)
    res = ''
    for c in gen:
        res += ch[c] if c > 1 else ''
    print("Gen Softmax %d > %s" % (len(res), res))
    print("Score Correlation > %s\nScore Fluency > %s" % lstm.score(query[0], item))

# Pre-Train
if epoch == -1:
    lstm.lr1 = 0.01
    lstm.lr2 = 0.001
    lstm.genOptim()
    '''
    for e in range(20):
        # Gen Model
        print("Epoch Pre Gen %d." % e)
        N = len(rows) // batchSize
        print("Total steps: %d" % N)
        for step in range(1, N + 1):
            print("Pretrain %d Gen, Step %d." % (e, step))
            X, Y = Sample(rows, batchSize)
            loss1 = lstm.trainGen(X, Y, False, False)
            print('{{"metric": "loss Gen", "value": {}, "step": {}}}'.format(loss1, globalStepGen))
            globalStepGen += 1
            show()
        print("Saving...")
        lstm.save("results/V12-LSTM-P%03d.model", -1)
    '''
    
    # Scr Model
    print("Epoch Pre Scr.")
    #N = len(rows) // batchSize
    N = 30
    print("Total steps: %d" % N)
    for step in range(1, N + 1):
        print("Pretrain Scr, Step %d." % (step))
        X, Y = Sample(rows, batchSize)
        loss1 = lstm.trainScr(X, Y, True)
        print('{{"metric": "loss Score", "value": {}, "step": {}}}'.format(loss1, globalStepScr))
        globalStepScr += 1
        show()
    print("Saving...")
    lstm.save("results/V12-LSTM-flu0.8-hid150-argmax-S%03d.model", 0)

    epoch = 0

lstm.lr1 = 0.001
lstm.lr2 = 0.001
lstm.genOptim()

print("Epoch 0.")
print("------------------------------")
query, ans = Sample(rows, 1, True)
gen, item = lstm.eval(query[0], False, 30, True)
res = ''
for i in gen:
    res += ch[i] if i > 1 else ''
print("  Gen > %s" % res)
print("==============================\n")

for i in range(epoch+1, 51):
    print("Epoch %d." % i)
    print("BatchSize: %d." % batchSize)
    print("LR Gen: %f." % lstm.lr1)
    print("LR Scr: %f." % lstm.lr2)
    N = len(rows) // batchSize
    print("Total steps: %d" % N)
    for step in range(1, N+1):
        print("Epoch %d, Step %d." % (i, step))
        X, Y = Sample(rows, batchSize)
        if (step % 5 == 1):
            loss1 = lstm.trainScr(X, Y, i <= 20)
            print('{{"metric": "loss Score", "value": {}, "step": {}}}'.format(loss1, globalStepScr))
            globalStepScr += 1
        loss2 = lstm.trainGen(X, Y, i <= 20)
        loss2 = loss2.detach().numpy()
        print('{{"metric": "loss Gen", "value": {}, "step": {}}}'.format(loss2, globalStepGen))
        globalStepGen += 1
        show()
        print("------------------------------")
    if i % 1 == 0:
        print("Saving...")
        lstm.save("results/V12-LSTM-flu0.8-hid150-argmax-%03d.model", i)
    '''
    if i % 1 == 0:
        lstm.tau -= 1
        if lstm.tau < 1:
            lstm.tau =.1
        print("Tau: %f." % lstm.tau)
    '''
    print("==============================\n")

    