# import torch
# from torch import nn
# from d2l import torch as d2l
#
# T = 1000
# time = torch.arange(1,T+1,dtype=torch.float32)
# x = torch.sin(0.01*time) + torch.normal(0,0.2,(T,))
# print(x.shape)
# d2l.plot(time,[x],'time','x',xlim=[1,1000],figsize=(6,3))
# d2l.plt.show()
#
# tau = 10
# features = torch.zeros((T-tau,tau))
# for i in range(tau):
#     features[:,i] = x[i:T-tau+i]
# labels = x[tau:].reshape(-1,1)
#
# batch_size , n_train = 16,600
# train_iter = d2l.load_array((features[:n_train],labels[:n_train]),batch_size=batch_size,is_train=True)
#
# def init_weights(m):
#     if type(m) == nn.Linear:
#         nn.init.xavier_uniform_(m.weight)
#
# def get_net():
#     net = nn.Sequential(nn.Linear(10,15),nn.ReLU(), nn.Linear(15,1))
#     net.apply(init_weights)
#     return net
#
# loss = nn.MSELoss()
#
# def train(net,train_iter,loss,epochs,lr):
#     trainer = torch.optim.Adam(net.parameters(),lr)
#     for epoch in range(epochs):
#         for X,y in train_iter:
#             trainer.zero_grad()
#             l = loss(net(X),y)
#             l.backward()
#             trainer.step()
#         print(f'epoch{epoch+1},'
#               f'loss:{d2l.evaluate_loss(net,train_iter,loss)}')
#
# net = get_net()
# train(net,train_iter,loss,5,0.01)
#
# onestep_preds = net(features)
# d2l.plot(
#     [time,time[tau:]],
#     [x.detach().numpy(),onestep_preds.detach().numpy()],'time','x',
#     legend=['data','1-step preds'],xlim=[1,1000],figsize=(6,3))
# d2l.plt.show()

# import collections
# import re
# from d2l import torch as d2l
# import torch
#
# d2l.DATA_HUB['time_machine'] = (d2l.DATA_URL+'timemachine.txt',
#                                 '090b5e7e70c295757f55df93cb0a180b9691891a')
# def read_time_machine():
#     with open(d2l.download('time_machine'),'r') as f:
#         lines = f.readlines()
#     return [re.sub('[^A-Za-z]+',' ',line).strip().lower() for line in lines]
#
# lines = read_time_machine()
#
# def tokenize(lines , token='word'):
#     if token == 'word':
#         return [line.split() for line in lines]
#     elif token == 'char':
#         return [list(line) for line in lines]
#     else:
#         print('未知令牌类型'+token)
#
# tokens = tokenize(lines)
# for i in range(11):
#     print(tokens[i])
#
# class Vocab:  #@save
#     """文本词表"""
#     def __init__(self, tokens=None, min_freq=0, reserved_tokens=None):
#         if tokens is None:
#             tokens = []
#         if reserved_tokens is None:
#             reserved_tokens = []
#         # 按出现频率排序
#         counter = count_corpus(tokens)
#         self._token_freqs = sorted(counter.items(), key=lambda x: x[1],
#                                    reverse=True)
#         # 未知词元的索引为0
#         self.idx_to_token = ['<unk>'] + reserved_tokens
#         self.token_to_idx = {token: idx
#                              for idx, token in enumerate(self.idx_to_token)}
#         for token, freq in self._token_freqs:
#             if freq < min_freq:
#                 break
#             if token not in self.token_to_idx:
#                 self.idx_to_token.append(token)
#                 self.token_to_idx[token] = len(self.idx_to_token) - 1
#
#     def __len__(self):
#         return len(self.idx_to_token)
#
#     def __getitem__(self, tokens):
#         if not isinstance(tokens, (list, tuple)):
#             return self.token_to_idx.get(tokens, self.unk)
#         return [self.__getitem__(token) for token in tokens]
#
#     def to_tokens(self, indices):
#         if not isinstance(indices, (list, tuple)):
#             return self.idx_to_token[indices]
#         return [self.idx_to_token[index] for index in indices]
#
#     @property
#     def unk(self):  # 未知词元的索引为0
#         return 0
#
#     @property
#     def token_freqs(self):
#         return self._token_freqs
#
# def count_corpus(tokens):  #@save
#     """统计词元的频率"""
#     # 这里的tokens是1D列表或2D列表
#     if len(tokens) == 0 or isinstance(tokens[0], list):
#         # 将词元列表展平成一个列表
#         tokens = [token for line in tokens for token in line]
#     return collections.Counter(tokens)
#
# vocab = Vocab(tokens)
# print(list(vocab.token_to_idx.items())[:10])

import random
import torch
from d2l import torch as d2l

tokens = d2l.tokenize(d2l.read_time_machine())
corpus = [token for line in tokens for token in line]
vocab = d2l.Vocab(corpus)


bigram_tokens = [pair for pair in zip(corpus[:-1],corpus[1:])]
bigram_vocab = d2l.Vocab(bigram_tokens)
print(bigram_vocab.token_freqs[:10])















