import os
import sys

import torch
from data_loader import get_loader, get_word2ix, get_ix2word
from models.model import LSTMModel, MaskedSoftmaxCELoss
from utils.param import Param
from tqdm import tqdm
from loguru import logger
import time
class Solver(object):

    def __init__(self, param: Param):
        self.param = param
        self.model = LSTMModel(param.vocab_size, param.embedding_dim, param.hidden_dim, param.num_layer)
        self.train_loader = get_loader(param)
        self.word2ix = get_word2ix(param)

        self.start_epoch = 0
        if param.load_model:
            print("加载预训练模型......")
            pths = os.listdir(param.model_path)
            if len(pths) > 0:
                pths = sorted(pths, key=lambda name: int(name.split("]")[-1].split("_")[-1].split(".")[0]), reverse=True)  # 按照模型命名中的epoch降序排序
                self.model.load_state_dict(torch.load(os.path.join(param.model_path, pths[0]), map_location=torch.device('cpu')))
                self.start_epoch = int(pths[0].split("]")[-1].split("_")[-1].split(".")[0])
                print(f"预训练模型:{pths[0]}加载成功！")
            else:
                print("加载失败，路径下无预训练模型......")
    def train(self):
        steps_per_epoch = len(self.train_loader)
        epoch_num = self.param.epoch_num
        optimizer = torch.optim.Adam(self.model.parameters(), lr=self.param.lr)
        criterion = MaskedSoftmaxCELoss()
        self.model.to(self.param.device)
        for epoch in range(self.start_epoch+1, epoch_num):

            # logger.info(f'==============Epoch:{epoch}/{epoch_num}==============')
            print(f'==============Epoch:{epoch}/{epoch_num}==============')
            self.model.train()
            # train_epoch_loss = 0
            step = 0
            # for batch_idx, data_ in tqdm(enumerate(self.train_loader)):  # data_:[N L]
            for batch_idx, data_ in enumerate(self.train_loader):  # data_:[N L]
                data_ = data_.long().transpose(1, 0).contiguous()  # data_:[L N]
                data_.to(self.param.device)
                optimizer.zero_grad()
                input_, target = data_[:-1, :], data_[1:, :]  # input_,target:[L-1 N]
                output, _ = self.model(input_)  # output:[L-1 N H]
                loss = criterion.forward(output.permute(1, 2, 0), target.transpose(0, 1), self.word2ix['</s>'])
                loss.backward()
                optimizer.step()
                step += 1
                if step % 5 == 0:
                    # logger.info('{}Epoch: {}/{}\tStep: {}/{}\tLoss: {}'.format(
                    #     time.strftime("[%Y-%m-%d-%H_%M_%S]", time.localtime(time.time())),
                    #     epoch, epoch_num, step, steps_per_epoch, loss))
                    print('{}Epoch: {}/{}\tStep: {}/{}\tLoss: {}'.format(
                        time.strftime("[%Y-%m-%d-%H_%M_%S]", time.localtime(time.time())),
                        epoch, epoch_num, step, steps_per_epoch, loss))
            save_model_name = '%s_%s.pth' % (time.strftime("[%Y-%m-%d-%H_%M_%S]", time.localtime(time.time())), epoch)
            save_path = os.path.join(self.param.save_path, save_model_name)
            torch.save(self.model.state_dict(), save_path)

    def generate_acrostic_poetry(self, start_words):  # 生成藏头诗
        param = Param()
        m1 = LSTMModel(8293, 64, 128, 2)
        m1.load_state_dict(torch.load('./outputs/[2024-05-10-19_02_38]_344.pth', map_location=torch.device('cpu')))
        word2ix = get_word2ix(param)
        ix2word = get_ix2word(param)
        input = torch.Tensor([word2ix['<START>']]).view(1, 1).long()
        hidden = None
        # start_words = "白云黑土"
        results = []
        max_len = 300
        start_words_len = len(start_words)
        for i in range(max_len):
            output, hidden = m1(input, hidden)
            if start_words_len > 0 and (i == 0 or results[i - 1] in ['。', '！']):
                start_words_len -= 1
                w = start_words[0]
                results.append(w)
                start_words = start_words[1:]
                input = input.data.new([word2ix[w]]).view(1, 1)  # input:[seq_len=1,batch_size=1]
            else:
                top_index = output.data[0][0].topk(1)[1].item()  # output:[1,1,8293]
                w = ix2word[top_index]
                results.append(w)
                input = input.data.new([top_index]).view(1, 1)
            if w == "<EOP>":
                del results[-1]
                break
        poem = ''.join([word if word not in ['。', '？', '！'] else word + '\n' for word in results])
        return poem

    def generate_continuation(self, start_words):  # 续写首句
        param = Param()
        m1 = LSTMModel(8293, 64, 128, 2)
        m1.load_state_dict(torch.load('./outputs/[2024-05-10-19_02_38]_344.pth', map_location=torch.device('cpu')))
        word2ix = get_word2ix(param)
        ix2word = get_ix2word(param)
        input = torch.Tensor([word2ix['<START>']]).view(1, 1).long()
        hidden = None
        # start_words = "白云黑土"
        results = list(start_words)
        max_len = 300
        start_words_len = len(start_words)
        for i in range(max_len):
            output, hidden = m1(input, hidden)
            if start_words_len > 0:
                start_words_len -= 1
                w = results[i]
                input = input.data.new([word2ix[w]]).view(1, 1)  # input:[seq_len=1,batch_size=1]
            else:
                top_index = output.data[0][0].topk(1)[1].item()  # output:[1,1,8293]
                w = ix2word[top_index]
                results.append(w)
                input = input.data.new([top_index]).view(1, 1)
            if w == "<EOP>":
                del results[-1]
                break
        poem = ''.join([word if word not in ['。', '？', '！'] else word + '\n' for word in results])
        return poem


if __name__ == '__main__':
    param = Param()
    solver = Solver(param)
    # solver.train()
