import numpy as np
import torch.nn as nn
import torch
import os
import jieba
from torch.optim.lr_scheduler import StepLR, ExponentialLR
from dataloader.my_dataloder import get_data
from models.my_lstm import custom_model, one2many
import sys
import matplotlib.pyplot as plt

os.makedirs("modeloutput", exist_ok=True)

my_dataloader, word_encoder, max_len, stopId = get_data()
words = word_encoder.wv.index_to_key
num_of_words = len(words)
vSize = word_encoder.vector_size

# device = torch.device('cuda:0')
device = torch.device('cpu')
model = one2many(vSize, vSize, num_of_words).to(device)
# model = custom_model(50, 50, 2, num_of_words).to(device)
optimizer = torch.optim.Adam(model.parameters(), lr=1e-3)
scheduler = StepLR(optimizer, 30, 0.9)
# scheduler = ExponentialLR(optimizer, gamma=0.9)
criterion = nn.CrossEntropyLoss()
Epoch = 1000

record_loss = []
for epoch in range(Epoch):
    model.train()
    for batch_id, batch in enumerate(my_dataloader):
        # print("\r", "training epoch {}, batch_id: {}".format(epoch, batch_id), end="", flush=True)
        x = batch[0]
        label = batch[1]
        x = x.clone().detach().to(device)
        label = label.clone().detach().to(device)
        output = model(x, device)
        loss = []
        for i in range(len(x)):
            label_long = torch.Tensor.long(label[i, :])
            cur_loss = criterion(output[i, :, :], label_long)
            loss.append(cur_loss)
        loss = sum(loss) / len(x)
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        record_loss.append(loss.item())
        print("\r", "training epoch {}, batch_id: {}, loss: {}".format(epoch, batch_id, loss.item()), end="", flush=True)
    scheduler.step()

    # if epoch == Epoch - 1:
    #     state = {'net': model.state_dict(),
    #              'optimizer': optimizer.state_dict(),
    #              'epoch': epoch}
    #     torch.save(state,
    #                os.path.join("D:/01.software/modeloutput", 'mp' + str(epoch))
    #                 )

plt.plot(record_loss)
plt.show()

print("\n输入你的首个词:")
x = input()
s = x
head = jieba.cut(x)
head = [word for word in head]
# dis = max_len - len(head)
head = [word_encoder.wv.get_vector(head[0])]
# z = torch.zeros((dis, 50))
head = torch.tensor(np.array(head))
# head = torch.cat((head, z), dim=0)
# head = head.unsqueeze(0)
head = head.clone().detach().to(device)

out = model.inferByInitial(head, stopId, device)
record = 0
for i in out:
    word = words[i]

    s += word
    record += len(word)

    if record >= 7:
        s += ' '
        record = 0

print(s)
"""
x = input()
s = x
head = jieba.cut(x)
head = [word for word in head]
dis = max_len - len(head)
head = [word_encoder.wv.get_vector(word) for word in x]
z = torch.zeros((dis, 50))
head = torch.tensor(np.array(head))
head = torch.cat((head, z), dim=0)
head = head.unsqueeze(0)
head = head.clone().detach().to(device)
record = len(s)
with torch.no_grad():
    output = model(head, device).squeeze(0)
    for o in output:
        prob = o.cpu().detach().numpy()
        pred = np.argmax(prob)
        word = words[pred]
        if record >= 5:
            s += ' '
            record = 0
        if word in ['E']:
            break
        else:
            s += word
            record += len(word)
    print(s)
"""





















