#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Author: 邵奈一
@Email: shaonaiyi@163.com
@Date: 2024/11/18
@微信：shaonaiyi888
@微信公众号: 邵奈一
"""
# 代码5-7
# 设置配置项
import argparse


def parameter_parser():
    parser = argparse.ArgumentParser(description='Text Generation')
    parser.add_argument('--epochs', dest='num_epochs', type=int, default=10)
    parser.add_argument('--learning_rate', dest='learning_rate', type=float, default=0.001)
    parser.add_argument('--hidden_dim', dest='hidden_dim', type=int, default=128)
    parser.add_argument('--batch_size', dest='batch_size', type=int, default=128)
    parser.add_argument('--window', dest='window', type=int, default=100)
    parser.add_argument('--load_model', dest='load_model', type=bool, default=True)
    parser.add_argument('--model', dest='model', type=str, default='tmp/5-textGenerator_model.pt')
    args = parser.parse_args(args=[])
    return args


# 代码5-7
# 执行训练
import os
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.utils.data import Dataset
from torch.utils.data import DataLoader
from five_three_build_network import TextGenerator
from five_two_load_data import Preprocessing


class Execution:

    def __init__(self, args):

        self.file = 'data5/book.txt'
        self.window = args.window
        self.batch_size = args.batch_size
        self.learning_rate = args.learning_rate
        self.num_epochs = args.num_epochs

        self.targets = None
        self.sequences = None
        self.vocab_size = None
        self.char_to_idx = None
        self.idx_to_char = None

    def prepare_data(self):

        # 初始化预处理器对象
        preprocessing = Preprocessing()

        # 初始化预处理器对象加载“文件”并按字符分割
        text = preprocessing.read_dataset(self.file)

        # 给定“文本”，创建两个字典
        # 从字符到索引
        # 关于：从索引到索引
        self.char_to_idx, self.idx_to_char = preprocessing.create_dictionary(text)

        # 给定“窗口”，它被创建训练语句集以及目标字符集
        self.sequences, self.targets = preprocessing.build_sequences_target(
            text, self.char_to_idx, window=self.window)

        # 获取词汇量大小
        self.vocab_size = len(self.char_to_idx)

    def train(self, args):

        # 网络初始化
        model = TextGenerator(args, self.vocab_size)
        # 优化器初始化
        optimizer = optim.RMSprop(model.parameters(), lr=self.learning_rate)
        # 定义batch数
        num_batches = int(len(self.sequences) / self.batch_size)
        # 训练网络
        model.train()

        # 训练阶段
        for epoch in range(self.num_epochs):
            # Mini batches
            for i in range(num_batches):

                # Batch定义
                try:
                    x_batch = self.sequences[i * self.batch_size: (i + 1) * self.batch_size]
                    y_batch = self.targets[i * self.batch_size: (i + 1) * self.batch_size]
                except:
                    x_batch = self.sequences[i * self.batch_size:]
                    y_batch = self.targets[i * self.batch_size:]

                # 转换numpy array为torch tensors
                x = torch.from_numpy(x_batch).type(torch.LongTensor)
                y = torch.from_numpy(y_batch).type(torch.LongTensor)

                # 输入数据
                y_pred = model(x)
                # loss计算
                loss = F.cross_entropy(y_pred, y.squeeze())
                # 清除梯度
                optimizer.zero_grad()
                # 反向传播
                loss.backward()
                # 更新参数
                optimizer.step()

                # 打印每个批次的损失
                if (i + 1) % 10 == 0:
                    print(
                        f'Epoch [{epoch + 1}/{self.num_epochs}], Batch [{i + 1}/{num_batches}], Loss: {loss.item():.5f}')

            # 打印每个epoch的最终损失
            print(f'Epoch [{epoch + 1}/{self.num_epochs}], Final Loss: {loss.item():.5f}')

        # 保存权重
        torch.save(model.state_dict(), 'tmp/5-textGenerator_model.pt')

    # 代码5-8
    # 文本生成器
    @staticmethod
    def generator(model, sequences, idx_to_char, n_chars):
        # 评估模式
        model.eval()

        # 定义softmax函数
        softmax = nn.Softmax(dim=1)

        # 从序列集合中随机选取索引
        start = np.random.randint(0, len(sequences) - 1)

        # 给定随机的idx来定义模式
        pattern = sequences[start]

        # 利用字典，它输出了Pattern
        print('\nPattern: \n')
        print(''.join([idx_to_char[value] for value in pattern]), "\"")

        # 在full_prediction中，我们将保存完整的预测
        full_prediction = pattern.copy()

        # 预测开始，它将被预测为一个给定的字符长度
        for i in range(n_chars):
            # 转换为tensor
            pattern = torch.from_numpy(pattern).type(torch.LongTensor)
            pattern = pattern.view(1, -1)

            # 预测
            prediction = model(pattern)
            # 将softmax函数应用于预测张量
            prediction = softmax(prediction)

            # 预测张量被转换成一个numpy数组
            prediction = prediction.squeeze().detach().numpy()
            # 取概率最大的idx
            arg_max = np.argmax(prediction)

            # 将当前张量转换为numpy数组
            pattern = pattern.squeeze().detach().numpy()
            # 窗口向右1个字符
            pattern = pattern[1:]
            # 新pattern是由“旧”pattern+预测的字符组成的
            pattern = np.append(pattern, arg_max)

            # 保存完整的预测
            full_prediction = np.append(full_prediction, arg_max)

        print('Prediction: \n')
        print(''.join([idx_to_char[value] for value in full_prediction]), "\"")


if __name__ == '__main__':

    args = parameter_parser()

    # 如果你已经有训练过的权重
    if args.load_model and os.path.exists(args.model):
        # 加载和准备序列
        execution = Execution(args)
        execution.prepare_data()

        sequences = execution.sequences
        idx_to_char = execution.idx_to_char
        vocab_size = execution.vocab_size

        # 初始化网络
        model = TextGenerator(args, vocab_size)
        # 加载权重
        model.load_state_dict(torch.load(args.model))

        # 文本生成器
        execution.generator(model, sequences, idx_to_char, 1000)

    # 如果你要训练网络或者模型文件不存在
    else:
        # 加载和预处理序列
        execution = Execution(args)
        execution.prepare_data()

        # 训练网络
        execution.train(args)

        sequences = execution.sequences
        idx_to_char = execution.idx_to_char
        vocab_size = execution.vocab_size

        # 初始化网络
        model = TextGenerator(args, vocab_size)
        # 加载权重
        model.load_state_dict(torch.load(args.model))

        # 文本生成器
        execution.generator(model, sequences, idx_to_char, 1000)