import time

def timer(fun):
    def wrapper(*args, **kwargs):
        start = time.time()
        ans = fun(*args, **kwargs)
        end = time.time()
        print(f"[{fun.__name__}] run time :{end - start} seconds")
        return ans
    return wrapper
import sys
sys.path.append("./动手学深度学习")
import IPython.display
import torch
from torch import nn
from mymodel.Show import show
import IPython

@timer
def train(net, train_iter, epochs, optimizer, loss, device, batch_s, test_iter=None, xlim=None, ylim=None, frec=1):
    torch.set_default_dtype(torch.float32)

    """使用GPU的训练函数"""
    def init_weight(m): # 使用较为科学的参数初始化方式
        if type(m)==nn.Linear or type(m)==nn.Conv2d:
            nn.init.xavier_uniform_(m.weight)
    net.apply(init_weight)
    print(f"using device: {device}")
    net.to(device) # 网络移到gpu
    myshow = show(["train_acc","test_acc","train_loss"],f"epoch*{frec}",f"loss*{batch_s}/acc",xlim,ylim)
    net.train()
    counter = 0
    for epoch in range(epochs):
        for x,y in train_iter:
            # x,y = torch.stack([xx.to(device) for xx in x]),torch.stack([yy.to(device) for yy in y])
            # print(x[1].device)
            x,y = x.to(device),y.to(device)
            y_h = net(x)
            l = loss(y_h, y)
            optimizer.zero_grad()
            l.backward()
            optimizer.step()

        if(test_iter==None):
            continue
        if(counter%frec==0):
            with torch.no_grad():
                net.eval()
                IPython.display.clear_output(wait=True)
                myshow.add([myshow.eval_acc(net,test_iter),
                        myshow.eval_acc(net,train_iter),
                        myshow.eval_loss(loss,net,train_iter,batch_s)])
                myshow.show_train()
        counter+=1
    myshow.clear()
    if(test_iter==None):
        print(f"loss:{myshow.eval_loss(loss,net,train_iter,1,device)}  train_acc:{myshow.eval_acc(net,train_iter,device)}")
    else:
        print(f"loss:{myshow.eval_loss(loss,net,train_iter,1,device)}  train_acc:{myshow.eval_acc(net,train_iter,device)}\
            test_acc:{myshow.eval_acc(net,test_iter,device)}")


def trygpu(indexofgpu):
    if torch.cuda.device_count()>= indexofgpu+1:
        return torch.device(f"cuda:{indexofgpu}")
    return torch.device("cpu")

def init_weight(m):
    if(type(m)==nn.Linear):
        nn.init.xavier_uniform_(m.weight)

import collections

def counnt_text(tokens):
    if len(tokens)==0 or isinstance(tokens[0],list):# tokens might be a 2d list
        tokens = [token  for line in tokens for token in line]
    return collections.Counter(tokens)

class Vocab:
    def __init__(self, tokens=None, min_freq=0, reserved_tokens=None) -> None:
        if tokens is None:
            tokens=[]
        if reserved_tokens is None:
            reserved_tokens=[]
        counter = counnt_text(tokens) # 清点每个词出现的次数
        self._token_freqs = sorted(counter.items(), key=lambda x:x[1], reverse=True)
        # 设置未知词元的索引为0
        self.idx_to_token = ["<unk>"] + reserved_tokens
        self.token_to_idx = {token:idx
                             for  idx,token in enumerate(self.idx_to_token)}
        for token, freq in self._token_freqs:
            if freq<min_freq:
                continue
            if token not in self.idx_to_token:
                self.idx_to_token.append(token)
                self.token_to_idx[token]=len(self.idx_to_token)-1

    def __len__(self):
        return len(self.idx_to_token)
    
    def __getitem__(self, tokens):
        if not isinstance(tokens,(list,tuple)):
            return self.token_to_idx.get(tokens, self.unk);
        return [self.__getitem__(token) for token in tokens]
    
    def to_tokens(self, idxs):
        if not isinstance(idxs,(list,tuple)):
            return self.idx_to_token[idxs]
        return [self.idx_to_token[idx] for idx in idxs]
    
    @property
    def unk(self):
        return 0
    
    @property
    def token_freqs(self):
        return self._token_freqs

def sgd(params, lr, batch_size):
    with torch.no_grad():
        for param in params:
            param -= lr * param.grad / batch_size
            param.grad.zero_()

'''
将文本转化成列表，word是分单词，char是分字母
'''
def tokenize(lines, mode="word"):
    if mode=="word":
        ori = [line.split() for line in lines]
        new = []
        for item in ori:
            new.append(item)
            new.append(" ")
        return new
    elif mode == "char":
        return [list(line) for line in lines]
    else:
        assert(f"{mode }unknown mode option!")

import random
def seq_data_iter_random(corpus, batch_size, num_steps): # 处理好的参数化文本，批量大小，步长
    '''
    训练过程是给出0，预测1，给出01，预测2，要确保每个序列都有标签
    (01)(23)(45)(67)(89)
    0(12)(34)(56)(78)9
    10 个单词steps是2可以这样分
    '''
    # 首先随机化一个初始偏移量
    corpus = corpus[random.randint(0, num_steps-1):]
    # 子序列个数,考虑到最后一个要有标签，像长注释中第一行的数字，如果不减1，89就会没有标签
    num_subseq = (len(corpus)-1)//num_steps
    # 有了子序列的个数就可以计算序列的长度
    # 生成每个序列的开始位置
    start_of_each_seq = list(range(0, num_subseq*num_steps,num_steps))
    # 打乱序列
    random.shuffle(start_of_each_seq)
    # 给出每个序列的开头生成序列
    def generate(s):
        return corpus[s:s+num_steps]
    # 批量数目
    num_batchs = num_subseq//batch_size
    # 在能成批的范围内迭代
    for i in range(0, batch_size*num_batchs,batch_size):
        start_of_batch_seq = start_of_each_seq[i:i+batch_size]
        # 对于一批的每个开头生成序列,由前面的分析可得Y是X的后移一位，由于已经提前预留所以不必担心超范围问题
        X = [generate(j) for j in start_of_batch_seq]
        Y = [generate(j+1) for j in start_of_batch_seq]
        yield torch.tensor(X), torch.tensor(Y)

# 还可以保证两个小批量之间是连续的
def seq_data_iter_seq(corpus, batch_size, num_steps):
    '''
    batch_size 是0维的数据假设batch_size是2，num_step=3
    0-9十个数据
    (012)3
    (456)7
    '''
    # 开头的偏移量
    corpus = torch.tensor(corpus[random.randint(0, num_steps-1):])
    # 根据batch_size reshape，提前计算好数量
    num_useful = (len(corpus))//batch_size*batch_size
    corpus = corpus[:num_useful].reshape(batch_size, -1)
    num_subseq = (corpus.shape[1]-1)//num_steps
    def generate(s):
        return corpus[:, s:s+num_steps]
    for i in range(0, num_subseq*num_steps, num_steps):
        X = generate(i)
        Y = generate(i+1)
        yield X, Y
    # offset = random.randint(0, num_steps)
    # num_tokens = ((len(corpus) - offset - 1) // batch_size) * batch_size
    # Xs = torch.tensor(corpus[offset: offset + num_tokens])
    # Ys = torch.tensor(corpus[offset + 1: offset + 1 + num_tokens])
    # Xs, Ys = Xs.reshape(batch_size, -1), Ys.reshape(batch_size, -1)
    # num_batches = Xs.shape[1] // num_steps
    # for i in range(0, num_steps * num_batches, num_steps):
    #     X = Xs[:, i: i + num_steps]
    #     Y = Ys[:, i: i + num_steps]
    #     yield X, Y

def predictRNN(prefix, num_preds, net, vocab, device):
    '''
    prefix是给出的前缀，预测这后面的内容
    预测的数量
    训练好的网络
    vocab用来相互转换
    '''
    h = net.begin_state(batch_size=1,device = device) # 因为要预测批量为1就行
    outputs = [vocab[prefix[0]]] # outputs里面是放的最终要输出的序列的编号，先把第一个放进去，以便循环
    get_input = lambda:torch.tensor([outputs[-1]], device=device).reshape((1,1)) # 获取输出序列的最后一个作为网络的输入
    # 对于已经给出的序列，先在正确的基础上搞搞h
    for y in prefix[1:]:
        _, h = net(get_input(), h)
        outputs.append(vocab[y])
    # 下面是真正的预测
    for _ in range(num_preds):
        y, h = net(get_input(), h)
        outputs.append(int(y.argmax(dim=1).reshape(1))) # 独热编码转化为编号
    return "".join(vocab.to_tokens(outputs))

import mymodel
import math
import mymodel.Show

def grad_clipping(net, theta):
    if isinstance(net, nn.Module):
        params = [p for p in net.parameters() if p.requires_grad]
    else:
        params = net.params
    norm = torch.sqrt(sum(torch.sum((p.grad**2)) for p in params))
    if norm > theta:
        for param in params:
            param.grad[:] *= theta/norm 


def train_epoch(net, train_iter, loss, optim, device, use_random_iter):
    h = None
    counter = 0.0
    sum = 0.0
    for X, Y in train_iter:
        if h is None or use_random_iter: # 在第一次迭代或者是用随机迭代时，每次迭代都需要初始化h
            h = net.begin_state(batch_size=X.shape[0], device=device)
        else:# 如果是顺序迭代的话，因为批次是紧挨的，所以说上一次的h可以连续用，这里原地detach可以切断上一次迭代的梯度
            if isinstance(net, nn.Module) and not isinstance(h, tuple):
                h.detach_()
            else:
                for s in h:
                    s.detach_()
        y = Y.T.reshape(-1) # 我们网络的输出是先以时间循环因此是时间1的批次接时间2的批次，Y(批量数，时间步)，因此要先转置再展平
        X, y = X.to(device), y.to(device)
        y_hat, h = net(X, h)
        l = loss(y_hat, y.long()).mean()
        sum+=l
        counter+=1
        if isinstance(optim, torch.optim.Optimizer):
            optim.zero_grad()
            l.backward()
            grad_clipping(net, 1)
            optim.step()
        else:
            l.backward()
            grad_clipping(net, 1)
            optim(batch_size=1)
    return math.exp(sum/counter)


import IPython
import IPython.display
@timer
def trainRNN(showfreq , net, train_iter, vocab, lr, epochs, device, use_random_iter=False, word = False):
    print(f"on device:{device}")
    loss = nn.CrossEntropyLoss()
    myshow = mymodel.Show.show(["train"], f"epoches*{showfreq}", "perplexity",xlim=[0,epochs/showfreq])
    if isinstance(net, nn.Module):
        optim = torch.optim.SGD(net.parameters(), lr)
    else:
        optim = lambda batch_size: sgd(net.params, lr, batch_size)
    predict = lambda prefix: predictRNN(prefix, 50, net, vocab, device)
    if word:
        pre = lambda:predict(["time"," ","traveller"," "])
    else:
        pre = lambda:predict("time traveller")
    for epoch in range(epochs):
        los = train_epoch(net, train_iter, loss, optim, device, use_random_iter)
        if((epoch)%showfreq==0):
            myshow.add([los])
            print(pre())
            print(f"困惑度 {los:.1f}")
            # myshow.show_train()
    IPython.display.clear_output()
    print(f"困惑度 {los:.1f} device: {device}")
    print(pre())

import torch.nn.functional as F

class RNNModel(nn.Module):
    def __init__(self, run_layer, vocab_size, *args, **kwargs) -> None:
        super(RNNModel, self).__init__(*args, **kwargs)
        self.rnn = run_layer
        self.vocab_size = vocab_size
        self.num_hiddens = self.rnn.hidden_size
        # 兼容双向RNN
        if not self.rnn.bidirectional:
            self.num_directions = 1
            self.linear = nn.Linear(self.num_hiddens, self.vocab_size)
        else:
            self.num_directions = 2
            self.linear = nn.Linear(self.num_hiddens * 2, self.vocab_size)

    def forward(self, inputs, state):
        X = F.one_hot(inputs.T.long(), self.vocab_size) # 转置就是把时间放在前面
        X = X.to(torch.float32)
        Y, state = self.rnn(X, state)
        # rnn的输出是（时间，批量，隐藏单元数）
        # 用我们之前定义的linear层批量转化为输出
        # 先reshape成为（时间*批量，隐藏单元数）
        # 再通过线性层变成（时间*批量，词表容量）
        output = self.linear(Y.reshape(-1, Y.shape[-1]))
        return output, state
    
    def begin_state(self, device, batch_size = 1):
        if not isinstance(self.rnn, nn.LSTM):
            return torch.zeros((self.num_directions*self.rnn.num_layers,
                                batch_size,
                                self.num_hiddens),

                                device=device)
        else:
            return (torch.zeros((
                self.num_directions * self.rnn.num_layers,
                batch_size, self.num_hiddens), device=device),
                    torch.zeros((
                        self.num_directions * self.rnn.num_layers,
                        batch_size, self.num_hiddens), device=device))