import numpy as np
import torch
from torch.utils.data import Dataset, DataLoader
from tqdm import tqdm

from data_loader import get_loader, get_word2ix, get_ix2word
from models.model import LSTMModel
from utils.param import Param


def t1():
    # 加载npz文件
    datas = np.load("./data/tang.npz", allow_pickle=True)

    data = datas['data']
    ix2word = datas['ix2word'].item()
    word2ix = datas['word2ix'].item()
    print(data.shape)
    data = torch.from_numpy(data)
    dataloader = DataLoader(data, batch_size=32, shuffle=True)

    for idx, data_ in tqdm(enumerate(dataloader)):
        data1 = data_
        data_ = data_.long().transpose(1, 0).contiguous()
        input_, target = data_[:-1, :], data_[1:, :]
        seq, batch_size = input_.size()  # input_:[L N]
        h_0 = input_.data.new(2, batch_size, 128).fill_(0).float()
        c_0 = input_.data.new(2, batch_size, 128).fill_(0).float()
        m1 = LSTMModel(len(word2ix), 64, 128)
        output, (h_n, c_n) = m1(input_)
        print(1)
        break

def t2():
    param = Param()
    # train_dataloader = get_loader(param)
    m1 = LSTMModel(64, 128, 2)
    input = torch.randint(0, 8000, size=(124, 128))
    output, hidden = m1(input)
    print(m1)
    print(output.shape)

def t3():
    param = Param()
    m1 = LSTMModel(8293, 64, 128, 2)
    m1.load_state_dict(torch.load('./outputs/[2023-06-16-14_19_58]_49.pth', map_location=torch.device('cpu')))
    word2ix = get_word2ix(param)
    ix2word = get_ix2word(param)
    input = torch.Tensor([word2ix['<START>']]).view(1, 1).long()
    hidden = None
    start_words = "白云黑土"
    results = list(start_words)
    max_len = 300
    start_words_len = len(start_words)
    for i in range(max_len):
        output, hidden = m1(input, hidden)
        if start_words_len > 0:
            start_words_len -= 1
            w = results[i]
            input = input.data.new([word2ix[w]]).view(1, 1)  # input:[seq_len=1,batch_size=1]
        else:
            top_index = output.data[0][0].topk(1)[1].item()  # output:[1,1,8293]
            w = ix2word[top_index]
            results.append(w)
            input = input.data.new([top_index]).view(1, 1)
        if w == "<EOP>":
            del results[-1]
            break
    print(results)
    poem = ''.join([word if word not in ['。', '？', '！'] else word+'\n' for word in results])
    print(poem)

def t4():
    param = Param()
    m1 = LSTMModel(8293, 64, 128, 2)
    m1.load_state_dict(torch.load('./outputs/[2023-06-16-14_19_58]_49.pth', map_location=torch.device('cpu')))
    word2ix = get_word2ix(param)
    ix2word = get_ix2word(param)
    input = torch.Tensor([word2ix['<START>']]).view(1, 1).long()
    hidden = None
    start_words = "白云黑土"
    results = []
    max_len = 300
    start_words_len = len(start_words)
    for i in range(max_len):
        output, hidden = m1(input, hidden)
        if start_words_len > 0 and (i==0 or results[i-1] in ['。', '！']):
            start_words_len -= 1
            w = start_words[0]
            results.append(w)
            start_words = start_words[1:]
            input = input.data.new([word2ix[w]]).view(1, 1)  # input:[seq_len=1,batch_size=1]
        else:
            top_index = output.data[0][0].topk(1)[1].item()  # output:[1,1,8293]
            w = ix2word[top_index]
            results.append(w)
            input = input.data.new([top_index]).view(1, 1)
        if w == "<EOP>":
            del results[-1]
            break
    print(results)
    poem = ''.join([word if word not in ['。', '？', '！'] else word + '\n' for word in results])
    print(poem)


    pass

if __name__ == '__main__':
    # t1()
    # t2()
    # t3()
    t4()