from torch.optim.lr_scheduler import ExponentialLR
import argparse
import torch
from torch import nn
import numpy as np
from torch import nn, optim
from torch.optim.lr_scheduler import ExponentialLR
from torch.utils.data import DataLoader
import os
import time
import datetime
import argparse
os.environ['CUDA_VISIBLE_DEVICES'] = "0,1,2"

filepaths = ['./data/万象之王.txt','./data/万道龙皇.txt','./data/世界顶尖的暗杀者转生为异世界贵族.txt','./data/从零开始的异世界生活.txt','./data/全职高手.txt','./data/八男别闹了.txt','./data/刀剑神域.txt','./data/圣墟.txt','./data/大主宰.txt','./data/天河降临.txt','./data/完美世界.txt','./data/我一个史莱姆吊打巨龙很合理吧.txt','./data/武动乾坤.txt','./data/斗破苍穹.txt','./data/武炼巅峰.txt','./data/牧神记.txt','./data/盾之勇者成名录.txt','./data/诡秘之主.txt','./data/道诡异仙.txt','./data/遮天.txt']
char_key_dict_path = './Chinese_characters_3500.txt'
model_save_path = "./Model/novel_creat_model.pkl"
model_save_path_pth = "./Model/novel_creat.pth"
save_pred_novel_path = "./Creat_novel/pred_novel_" + str(int(round(time.time() * 1000000))) + ".txt"
pred_novel_start_text = '我重生了'

use_gpu = torch.cuda.is_available()
print('torch.cuda.is_available() == ', use_gpu)
device = torch.device('cuda:0')


def dictGet(dict1, index):
    length1 = len(dict1)

    if index >= 0 and index < length1:
        return dict1[index]
    else:
        return dict1[0]


def dictGetValue(dict1, indexZifu):
    if indexZifu in dict1:
        return dict1[indexZifu]
    else:
        return dict1['*']


def getNotSet(list1):
    '''
    返回一个新列表,如何删除列表中重复的元素且保留原顺序
    例子
        list1 = 1 1 2 3 3 5
        return 1 2 3 5
    '''
    l3 = []
    for i in list1:
        if i not in l3:
            l3.append(i)
    return l3;

class Dataset(torch.utils.data.Dataset):
    def __init__(self,args):
        self.args = args
        self.words = self.load_words()

        self.uniq_words = self.get_uniq_words()
        self.index_to_word = {index: word for index, word in enumerate(self.uniq_words)}
        self.word_to_index = {word: index for index, word in enumerate(self.uniq_words)}
        # self.words_list = list( self.words )

        # 把小说的 字 转换成 int
        self.words_indexes = []

        # 把字典里没有的字符 用'*'表示，也就是Chinese_characters_3500.txt没有的字符
        for w in self.words:
            if (w in self.word_to_index) == False:
                self.words_indexes.append(1482)  # 1482 =='*'
                # print(w,'= *',)
            else:
                self.words_indexes.append(self.word_to_index[w])
                # print(w,'= ',self.word_to_index[w])


    def load_words(self):
        """加载数据集"""
        for filepath in filepaths:
            with open(filepath, encoding='UTF-8') as f:
                corpus_chars = f.read()
        print('length', len(corpus_chars))
        # corpus_chars = corpus_chars[0:10000]
        return corpus_chars

    def get_uniq_words(self):
        with open(char_key_dict_path, 'r', encoding='utf-8') as f:
            text = f.read()
        idx_to_char = list(text)  # 不能使用 set(self.words) 函数 ,因为每次启动随机,只能用固定的
        return idx_to_char

    def __len__(self):
        return len(self.words_indexes) - self.args.sequence_length

    def __getitem__(self, index):
        return (
            torch.tensor(self.words_indexes[index:index + self.args.sequence_length]),
            torch.tensor(self.words_indexes[index + 1:index + self.args.sequence_length + 1]),
        )

class Model(nn.Module):

    def __init__(self, dataset):
        super(Model, self).__init__()

        self.input_size = 128
        self.hidden_size = 256
        self.embedding_dim = self.input_size
        self.num_layers = 3

        n_vocab = len(dataset.uniq_words)

        self.lstm = nn.LSTM(
            input_size=self.input_size,
            hidden_size=self.hidden_size,
            num_layers=self.num_layers,
            batch_first=True
        )

        self.embedding = nn.Embedding(
            num_embeddings=n_vocab,
            embedding_dim=self.embedding_dim
        )

        self.fc = nn.Linear(self.hidden_size, n_vocab)

    def forward(self, x):
        h0 = torch.zeros(self.num_layers, x.size(0), self.hidden_size).to(x.device)
        c0 = torch.zeros(self.num_layers, x.size(0), self.hidden_size).to(x.device)

        embed = self.embedding(x)

        out, (hn, cn) = self.lstm(embed, (h0.detach(), c0.detach()))

        out = self.fc(out[:, -1, :])

        return out, (hn, cn)


def train(dataset, model, args):
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    model.to(device)
    model.train()

    dataloader = DataLoader(
        dataset,
        batch_size=args.batch_size
    )

    criterion = nn.CrossEntropyLoss()
    optimizer = optim.Adam(model.parameters(), lr=0.001)
    exp_lr_scheduler = ExponentialLR(optimizer, gamma=args.lr_decay)

    for epoch in range(args.max_epochs):
        total_loss = 0.0
        for batch_idx, (x, y) in enumerate(dataloader):
            x, y = x.to(device), y.to(device)

            optimizer.zero_grad()

            y_pred, _ = model(x)

            loss = criterion(y_pred, y)

            loss.backward()
            optimizer.step()

            total_loss += loss.item()

            if (batch_idx + 1) % args.log_interval == 0:
                print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
                    epoch, batch_idx * len(x), len(dataloader.dataset),
                           100. * batch_idx / len(dataloader), loss.item()))

        exp_lr_scheduler.step()

        print('====> Epoch: {} Average loss: {:.4f}'.format(
            epoch, total_loss / len(dataloader)))


def predict(dataset, model, text, next_words=20):
    # words = text.split(' ')
    words = list(text)
    model.eval()

    device = 'cuda'
    model.to(device)
    state = model.init_state(len(words))

    for i in range(0, next_words):
        x = torch.tensor([[dictGetValue(dataset.word_to_index, w) for w in words[i:]]]).cuda()
        y_pred, state = model(x, state)

        last_word_logits = y_pred[0][-1]
        p = torch.nn.functional.softmax(last_word_logits, dim=0).detach().numpy()
        p = torch.from_numpy(p).cuda(0)
        word_index = np.random.choice(len(last_word_logits), p=p)
        words.append(dictGet(dataset.index_to_word, word_index))

    return "".join(words)


parser = argparse.ArgumentParser(description='rnn')
parser.add_argument('--max-epochs', type=int, default=10)  # 训练多少遍 总的文本  , default=20)
parser.add_argument('--batch-size', type=int, default=256)  # default=256)
parser.add_argument('--sequence-length', type=int, default=10)  # sequence-length 每次训练多长的句子, default=20)
parser.add_argument('--lr_decay', type=float, default=0.95)  # lr_decay 学习率
args = parser.parse_args()
dataset = Dataset(args)

if os.path.exists(model_save_path):
    model = torch.load(model_save_path)
    print('发现有保存的Model,load model ....\n------开始训练----------')
else:
    print('没保存的Model,Creat model .... \n------开始训练----------')
    model = Model(dataset)

print(model)
train(dataset, model,args)

torch.save(model, model_save_path)
torch.save(model.state_dict(), model_save_path_pth)

print("训练完成")