import math
import torch
import random
import os
from torch import nn
from torch.nn import functional as F
# from d2l import torch as d2l
import numpy as np
import pathlib
import re
import collections
import itertools
import time
import warnings
import matplotlib.pyplot as plt

# 忽略warning
warnings.filterwarnings('ignore')

# import debugpy

# debugpy.listen(17171)
# print('wait debugger')
# debugpy.wait_for_client()
# print("Debugger Attached")


# def rnn(inputs, state, params):
#     # inputs的形状：(时间步数量，批量大小，词表大小)
#     W_xh, W_hh, b_h, W_hq, b_q = params
#     H, = state  # [1, 512]
#     outputs = []
#     # X的形状：(批量大小，词表大小)
#     for X in inputs:   # X [32, 28]，相当于把时间步长抽离出来
#         H = torch.tanh(torch.mm(X, W_xh) + torch.mm(H, W_hh) + b_h)
#         Y = torch.mm(H, W_hq) + b_q   # [32, 28], 得到的值是小数咋整？应该要处理一下吧
#         outputs.append(Y)
#     return torch.cat(outputs, dim=0), (H,)

class Accumulator:
    """For accumulating sums over `n` variables."""
    def __init__(self, n):
        """Defined in :numref:`sec_softmax_scratch`"""
        self.data = [0.0] * n

    def add(self, *args):
        self.data = [a + float(b) for a, b in zip(self.data, args)]

    def reset(self):
        self.data = [0.0] * len(self.data)

    def __getitem__(self, idx):
        return self.data[idx]
    
class Timer:
    """Record multiple running times."""
    def __init__(self):
        """Defined in :numref:`subsec_linear_model`"""
        self.times = []
        self.start()

    def start(self):
        """Start the timer."""
        self.tik = time.time()

    def stop(self):
        """Stop the timer and record the time in a list."""
        self.times.append(time.time() - self.tik)
        return self.times[-1]

    def avg(self):
        """Return the average time."""
        return sum(self.times) / len(self.times)

    def sum(self):
        """Return the sum of time."""
        return sum(self.times)

    def cumsum(self):
        """Return the accumulated time."""
        return np.array(self.times).cumsum().tolist()
####################### 1. 形成迭代数据和词表 ###########################
file_name = 'timemachine.txt'
root_name = 'data'
File_path = os.path.join(root_name,file_name)

# print(File_path)
print("Code runing...")

with open(File_path) as f:
    lines = f.readlines()

lines = [re.sub('[^A-Za-z+]', ' ', line).strip().lower() for line in lines]

# print(len(lines))
# print(lines[:10])

### 词元化 按照char进行 ###
tokens = [list(line) for line in lines]

# print(len(tokens),'\n' , tokens[:3])

### 获取词表 vocab ###
tokens = [token for line in tokens for token in line]  # 所有字符展平成一列

counter = collections.Counter(tokens)
token_freqs = sorted(counter.items(), key=lambda x: x[1],    # 按出现频次排序，形成一个词表（英文字母）
                                   reverse=True)
reserved_tokens = []
idx_to_token = ['<unk>'] + reserved_tokens
# 字典 ---> 通过token找索引
token_to_idx = {token: idx for idx, token in enumerate(idx_to_token)}

min_freq = 0

for token, freq in token_freqs:   # 迭代频率词表，构建两个索引
    if freq < min_freq:
        break
    if token not in token_to_idx:
        idx_to_token.append(token)
        token_to_idx[token] = len(idx_to_token) - 1

vocab = {'idx_to_token':idx_to_token,  # 这个词表很重要
         'token_to_idx':token_to_idx}

# print(len(tokens))
# print(vocab)

### 构建语料库 corpus ###

corpus = [vocab['token_to_idx'][token]  for line in tokens  for token in line]
# print(corpus[:10])

# 获取足够多的数据
maxtokens = 10000 
if maxtokens > 0:
    corpus = corpus[:maxtokens]


print('data loading is finished.')
print('--------------------------------------------------------')


########################## 2. 构建初始模型 ########################
vocab_size = len(vocab['idx_to_token'])
num_hiddens = 512
batch_size = 32
num_steps = 35
# print(vocab_size)

# 选择计算设备
device = torch.device('cpu')
if torch.cuda.device_count() >= 1:
    device = torch.device('cuda:0')
print(f'device is {device}.')

### RNN初始状态 ###
# state是沿用上一状态的
### 参数初始化 ###
num_inputs = num_outputs = vocab_size
# 隐藏层参数
W_xh = torch.randn(size=(num_inputs, num_hiddens), device=device) * 0.01
W_hh = torch.randn(size=(num_hiddens, num_hiddens), device=device) * 0.01
b_h = torch.zeros(num_hiddens, device=device)

# 输出层参数
W_hq = torch.randn(size=(num_hiddens, num_outputs), device=device) * 0.01  # 这里没缩放
b_q = torch.zeros(num_outputs, device=device)

params = [W_xh, W_hh, b_h, W_hq, b_q]
for param in params:
    param.requires_grad_(True)   # 初始化时，参数的grad为None
    
init_state = (torch.zeros((batch_size, num_hiddens), device=device), )


############################## 3. 训练模型 #############################
use_random_iter = False
loss = nn.CrossEntropyLoss()  # 损失函数
lr = 1
num_epochs = 500
loss_value = []

print('begin to training...')
for epoch in range(num_epochs):
    state, timer = None, Timer()
    metric = Accumulator(2)
    ### 制作可迭代数据 ###
    offset = random.randint(0, num_steps)  # 随机偏移
    num_tokens = ((len(corpus) - offset - 1) // batch_size) * batch_size
    # print(f'offset:{offset},num_tokens: {num_tokens}')
    # 选择x数据
    Xs = torch.tensor(corpus[offset: offset + num_tokens])   # [9952]
    Ys = torch.tensor(corpus[offset + 1: offset + 1 + num_tokens])  # [9952]

    Xs, Ys = Xs.reshape(batch_size, -1), Ys.reshape(batch_size, -1)  # [32,311]相当于将其分为32份
    # print(Xs.shape, Ys.shape)
    num_batches = Xs.shape[1] // num_steps   # 8
    # print(f'num_batches: {num_batches}')
    for i in range(0, num_steps * num_batches, num_steps):
        X = Xs[:, i: i + num_steps]   # [32, 35]
        Y = Ys[:, i: i + num_steps]   # [32, 35]
        # 这里就相当于for X, Y in train_iter
        # print(X, Y)
        if state is None:
            state = init_state
        else:
            for s in state:
                s.detach_()
        y = Y.T.reshape(-1)   # 1120 = 32 * 35, 把y展平成一维
        X, y = X.to(device), y.to(device)
        X = F.one_hot(X.T, vocab_size).type(torch.float32)  # [35, 32, 28]  28是one hot 编码的维度
        
        W_xh, W_hh, b_h, W_hq, b_q = params
        H, = state  
        outputs = []
        for input in X:
            H = torch.tanh(torch.mm(input, W_xh) + torch.mm(H, W_hh) + b_h)
            Y = torch.mm(H, W_hq) + b_q   # [32, 28]
            outputs.append(Y)
        y_hat, state = torch.cat(outputs, dim=0), (H,)  # 35
        # y_hat, state = rnn(X, state, params)
        l = loss(y_hat, y.long()).mean()  # 计算出损失
        l.backward()   # 更新的是params的梯度
        ### 梯度裁剪 ###
        norm = torch.sqrt(sum(torch.sum((p.grad ** 2)) for p in params))
        theta = 1
        if norm > theta:
            for param in params:
                param.grad[:] *= theta / norm
        ### updater 梯度更新 ###
        with torch.no_grad():
            for param in params:  
                param -= lr * param.grad    # 参数在这里进行更新, 不用除以batch_size
                param.grad.zero_()    # 将梯度置零
        metric.add(l * y.numel(), y.numel())
    ppl, speed = math.exp(metric[0] / metric[1]), metric[1] / timer.stop()
    loss_value.append(ppl)
    if (epoch+1) % 10 == 0:  
        print(f'=========epoch:{epoch+1}=========Perplexity:{ppl}=======speed:{speed}=======')

x = range(0, 500)
plt.plot(x, loss_value)
plt.xlabel('epoch')
plt.ylabel('loss')
plt.show()


params_frezon = params
# torch.save(params_frezon, 'params_frezon.pt')
# params_frezon = torch.load('./params_frezon.pt')  # 保存模型参数  这里的params是个列表与state_dict不太一样
print('training is over!')
print('----------------------------------------------------------------')    
print('begin to infer...')  
batch_size = 1  # 训练时得batch_size
state = (torch.zeros((batch_size, num_hiddens), device=device), )
prefix = "traveller"
num_preds = 50   # 预测步长
outputs_pre = [vocab['token_to_idx'][prefix[0]]]
get_input = lambda: torch.tensor([outputs_pre[-1]], device=device).reshape((1, 1))
for y in prefix[1:]:  # 预热期
    inputs = get_input()
    X = F.one_hot(inputs.T, vocab_size).type(torch.float32) # [1, 1, 28]
    W_xh, W_hh, b_h, W_hq, b_q = params_frezon
    H, = state  # [1, 512]
    outputs = []
    for x in X:   # X [1, 28]，相当于把时间步长抽离出来
        H = torch.tanh(torch.mm(x, W_xh) + torch.mm(H, W_hh) + b_h)
        Y = torch.mm(H, W_hq) + b_q   # [1, 28], 得到的值是小数咋整？应该要处理一下吧
        outputs.append(Y)
    _, state = torch.cat(outputs, dim=0), (H,)  # state[1, 512]
    # _, state = rnn(X, state, params_frezon)
    outputs_pre.append(vocab['token_to_idx'][y])

for _ in range(num_preds):
    inputs = get_input() 
    X = F.one_hot(inputs.T, vocab_size).type(torch.float32) # [1, 1, 28]
    W_xh, W_hh, b_h, W_hq, b_q = params_frezon
    H, = state   # 保留预热期的H
    outputs = []
    for x in X:   # X [1, 1, 28]，相当于把时间步长抽离出来
        H = torch.tanh(torch.mm(x, W_xh) + torch.mm(H, W_hh) + b_h) # [1, 512]
        Y = torch.mm(H, W_hq) + b_q   # [1, 28], 得到的值是小数咋整？应该要处理一下吧
        outputs.append(Y)
    y, state = torch.cat(outputs, dim=0), (H,)   # y [1， 28], state [1, 512]
    # y, state = rnn(X, state, params_frezon)
    outputs_pre.append(int(y.argmax(dim=1).reshape(1)))  # 这里将小数变成整数
# y.argmax用于返回指定维度上最大值的索引位置
# print(outputs_pre)
# print(vocab['idx_to_token'][3])
# predict = [vocab['idx_to_token'][i] for i in outputs_pre]
predict = ''.join([vocab['idx_to_token'][i] for i in outputs_pre])
print(predict)