from d2l import torch as d2l
import matplotlib.pyplot as plt
import copy
import collections
from cizhui_chuli_gru import new_contents


class Vocab:  # @save
    """文本词表"""

    def __init__(self, tokens=None, min_freq=0, reserved_tokens=None):
        if tokens is None:
            tokens = []
        if reserved_tokens is None:
            reserved_tokens = []
        # 按出现频率排序
        counter = count_corpus(tokens)
        self._token_freqs = sorted(counter.items(), key=lambda x: x[1],
                                   reverse=True)
        # 未知词元的索引为0
        self.idx_to_token = ['<unk>'] + reserved_tokens
        self.token_to_idx = {token: idx
                             for idx, token in enumerate(self.idx_to_token)}
        for token, freq in self._token_freqs:
            if freq < min_freq:
                break
            if token not in self.token_to_idx:
                self.idx_to_token.append(token)
                self.token_to_idx[token] = len(self.idx_to_token) - 1

    def __len__(self):
        return len(self.idx_to_token)

    def __getitem__(self, tokens):
        if not isinstance(tokens, (list, tuple)):
            return self.token_to_idx.get(tokens, self.unk)
        return [self.__getitem__(token) for token in tokens]

    def to_tokens(self, indices):
        if not isinstance(indices, (list, tuple)):
            return self.idx_to_token[indices]
        return [self.idx_to_token[index] for index in indices]

    @property
    def unk(self):  # 未知词元的索引为0
        return 0

    @property
    def token_freqs(self):
        return self._token_freqs


def count_corpus(tokens):  # @save
    """统计词元的频率"""
    # 这里的tokens是1D列表或2D列表
    if len(tokens) == 0 or isinstance(tokens[0], list):
        # 将词元列表展平成一个列表
        tokens = [token for line in tokens for token in line]
    return collections.Counter(tokens)


def extend_data(contents):
    _new_contents = []
    for c in contents:
        if "number" in c[0]:
            tmp_a = copy.deepcopy(c)
            for i in range(1, 101):
                tmp_b = copy.deepcopy(tmp_a)
                tmp_b[0][tmp_b[0].index("number")] = str(i)
                _new_contents.append(tmp_b)
        else:
            _new_contents.append(c)
    return _new_contents


# 8.2.4. 整合所有功能
def load_corpus_time_machine(max_tokens=-1):  # @save
    """返回时光机器数据集的词元索引列表和词表"""
    n_contents = extend_data(new_contents)
    tokens = [i[0] for i in n_contents]
    # print(len(tokens))
    vocab = Vocab(tokens)
    return n_contents, vocab


def load_corpus_game_data(max_tokens=-1):  # @save
    """返回时光机器数据集的词元索引列表和词表"""
    n_contents = extend_data(new_contents)
    tokens = [i[0] for i in n_contents]
    corpus = []
    for line in tokens:
        for item in line:
            corpus.append(item)

    vocab = Vocab(corpus)
    return corpus, vocab


# corpus 计算一共有多少个词，vocab是词表
corpus, vocab = load_corpus_game_datas()
# # 因为每个文本行不一定是一个句子或一个段落，因此我们把所有文本行拼接到一起
print("corpus", corpus)
print("vocab", vocab)
##################################
#  词指向id
print(vocab["number"])
# id指向词语
print(vocab.to_tokens(0))
print(vocab.to_tokens([0, 2, 3, 4, 5, 6]))
##################################
freqs = [freq for token, freq in vocab.token_freqs]
d2l.plot(freqs, xlabel='token: x', ylabel='frequency: n(x)',
         xscale='log', yscale='log')

bigram_tokens = [pair for pair in zip(corpus[:-1], corpus[1:])]
print(bigram_tokens, len(bigram_tokens))

bigram_vocab = d2l.Vocab(bigram_tokens)

trigram_tokens = [triple for triple in zip(
    corpus[:-2], corpus[1:-1], corpus[2:])]
trigram_vocab = d2l.Vocab(trigram_tokens)

bigram_freqs = [freq for token, freq in bigram_vocab.token_freqs]
trigram_freqs = [freq for token, freq in trigram_vocab.token_freqs]
d2l.plot([freqs, bigram_freqs, trigram_freqs], xlabel='token: x',
         ylabel='frequency: n(x)', xscale='log', yscale='log',
         legend=['unigram', 'bigram', 'trigram'])

d2l.plot(freqs, xlabel='token: x', ylabel='frequency: n(x)',
         xscale='log', yscale='log')
plt.show()
