import math
import os
import random
import torch
from d2l import torch as d2l
import collections
from chinese_split_word_ictclas import sentences



# @save
def read_ptb():
    """将PTB数据集加载到文本行的列表中"""
    data_dir = d2l.download_extract('ptb')
    print("读取目录",data_dir)
    # Readthetrainingset.
    with open(os.path.join(data_dir, 'ptb.train.txt')) as f:
        raw_text = f.read()
    return [line.split() for line in raw_text.split('\n')]


# @save
d2l.DATA_HUB['ptb'] = (d2l.DATA_URL + 'ptb.zip',
                       '319d85e578af0cdc590547f26231e4e31cdf1e42')
allword = []
for i in sentences:
    allword += i
allword = list(set(allword))

print(f'# sentences数: {len(sentences)}')
vocab = d2l.Vocab(sentences, min_freq=1)
print(vocab.__getitem__(allword))
# 统计总共的词元的数量
print(f'词表大小 vocab size: {len(vocab)}')
# 输入词，得到词id
print(vocab["+", "转化"])


# 14.3.2. 下采样
def count_corpus(tokens):  #
    """统计词元的频率"""
    # 这里的tokens是1D列表或2D列表
    if len(tokens) == 0 or isinstance(tokens[0], list):
        # 将词元列表展平成一个列表
        tokens1 = []
        for line in tokens:
            # print("line",line)
            for token in line:
                # print("token",token)
                tokens1.append(token)
        tokens = tokens1
        # tokens = [token for line in tokens for token in line]
        # print(tokens.shape)
    # 最主要的作用是计算“可迭代序列”中各个元素（element）的数量
    '''
    #对字符串作用
    temp=Counter('aabbcdeffgg')
    print(temp)   #Counter({'a': 2, 'b': 2, 'f': 2, 'g': 2, 'c': 1, 'd': 1, 'e': 1})
    '''
    return collections.Counter(tokens)


# @save
def subsample(sentences, vocab):
    """下采样高频词"""
    # 排除未知词元'<unk>'
    sentences = [[token for token in line if vocab[token] != vocab.unk]
                 for line in sentences]
    """统计词元的频率"""
    counter = count_corpus(sentences)
    num_tokens = sum(counter.values())

    # 如果在下采样期间保留词元，则返回True
    def keep(token):
        # 公式14.3.1
        # 根据"词元频率"计算下采样概率，对高频词进行删减
        # return (random.uniform(0, 1) <
        #         math.sqrt(1e-4 / counter[token] * num_tokens))
        return True

    return ([[token for token in line if keep(token)] for line in sentences],
            counter)


subsampled, counter = subsample(sentences, vocab)
print(subsampled, counter)


# d2l.show_list_len_pair_hist(
#     ['origin', 'subsampled'], '# tokens per sentence',
#     'count', sentences, subsampled);


def compare_counts(token):
    return (f'"{token}"的数量：'
            f'之前={sum([l.count(token) for l in sentences])}, '
            f'之后={sum([l.count(token) for l in subsampled])}')


print(compare_counts("+"))
print(compare_counts("提高"))
print(compare_counts("2%"))
# 在下采样之后，我们将词元映射到它们在语料库中的索引。
corpus = [vocab[line] for line in subsampled]
print(corpus[:3])


# 14.3.3. 中心词和上下文词的提取
def get_centers_and_contexts(corpus, max_window_size):
    """返回跳元模型中的中心词和上下文词"""
    centers, contexts = [], []
    for line in corpus:
        # 要形成“中心词-上下文词”对，每个句子至少需要有2个词
        if len(line) < 2:
            continue
        centers += line
        for i in range(len(line)):  # 上下文窗口中间i
            window_size = random.randint(1, max_window_size)
            indices = list(range(max(0, i - window_size),
                                 min(len(line), i + 1 + window_size)))
            # 从上下文词中排除中心词
            indices.remove(i)
            contexts.append([line[idx] for idx in indices])
    return centers, contexts


tiny_dataset = corpus[:2]
for data in tiny_dataset:
    print("索引", data)
    print("词缀", vocab.to_tokens(data))

# print("tiny_dataset",tiny_dataset)
for center, context in zip(*get_centers_and_contexts(tiny_dataset, 3)):
    if len(context) == 1:
        print('中心词', vocab.to_tokens(center), '的上下文词是', vocab.to_tokens(context[0]))
    else:
        print('中心词', vocab.to_tokens(center), '的上下文词是', vocab.to_tokens(context))

all_centers, all_contexts = get_centers_and_contexts(corpus, 5)
print(f'# “中心词-上下文词对”的数量: {sum([len(contexts) for contexts in all_contexts])}')
# 找到中心词和上下文词对的匹配
print(len(all_centers))
print(len(all_contexts))
print(vocab.to_tokens(all_centers[411]), vocab.to_tokens(all_contexts[411]))


# 传奇 ['你', '使用', '的', '非', '功能', '药剂', '作用', '于', '你']
# 格挡 ['晕眩', '回复', '和', '回复', '提高', '25%']

# 14.3.4. 负采样
class RandomGenerator:
    """根据n个采样权重在{1,...,n}中随机抽取"""

    def __init__(self, sampling_weights):
        # Exclude
        self.population = list(range(1, len(sampling_weights) + 1))
        print("self.population", self.population)
        self.sampling_weights = sampling_weights
        self.candidates = []
        self.i = 0

    def draw(self):
        if self.i == len(self.candidates):
            # print("采样开始")
            # print(self.population)
            # print(self.sampling_weights)
            # 缓存k个随机采样结果
            # 将self.population中的数字 按照self.sampling_weights的权重比例分别取点，一共取k个点。
            self.candidates = random.choices(
                self.population, self.sampling_weights, k=10000)
            # print(len(self.candidates))
            # print(len([i for i in self.candidates if i==1]))
            # print(len([i for i in self.candidates if i == 2]))
            # print(len([i for i in self.candidates if i == 3]))
            # print("采样结束")
            self.i = 0
        self.i += 1
        return self.candidates[self.i - 1]


#

# generator = RandomGenerator([2, 3, 4])
# # 按照权重生成从1-3的序列，比例为2，3，4个。k=1000
# gen_list= [generator.draw() for _ in range(1000)]
# print(len([i for i in gen_list if i==1]))
# print(len([i for i in gen_list if i == 2]))
# print(len([i for i in gen_list if i == 3]))
# print(gen_list,"gen_list")

def get_negatives(all_contexts, vocab, counter, K):
    """返回负采样中的噪声词"""
    # 索引为1、2、...（索引0是词表中排除的未知标记）
    # 将词表中所有的词都按照频率进行缩放计算组成新的频率列表
    sampling_weights = [counter[vocab.to_tokens(i)] ** 0.75
                        for i in range(1, len(vocab))]
    print("vocab", len(vocab))
    print("sampling_weights", len(sampling_weights), sampling_weights)
    # 按照相同比例进行采样
    all_negatives, generator = [], RandomGenerator(sampling_weights)
    # 遍历所有的上下文词汇列表
    for contexts in all_contexts:
        negatives = []
        # 当付采样数据长度不够词表中长度乘以5的时候,目的是为了多取几次好获得新的上下文信息（但该信息中不包含既有的上下文词）
        # 就是为了打乱数据，并生成新的和all_contexts对应的错误的all_negatives
        while len(negatives) < len(contexts) * K:
            neg = generator.draw()
            # print(neg,"neg")
            # 噪声词不能是上下文词
            if neg not in contexts:
                negatives.append(neg)
        # print(len(negatives),"222222222222")
        all_negatives.append(negatives)
    # print(len(all_negatives),"3333333333333")
    print("#####取相同位置的上下文词和付采样上下文词，里面的元素互斥")
    print(all_centers[2], all_contexts[2], all_negatives[2])
    print(all_centers[4], all_contexts[4], all_negatives[4])
    """
    [67, 127, 31] 上下文词列表中不可能存在 151, 136, 84, 160, 48, 87, 179, 71, 83, 164, 10, 8, 139, 145, 119
    [151, 136, 84, 160, 48, 87, 179, 71, 83, 164, 10, 8, 139, 145, 119] 这个列表中不可能存在 67, 127, 31
    """
    return all_negatives


print("计数器", counter)
all_negatives = get_negatives(all_contexts, vocab, counter, 5)
"""
这样我们就有了
中心词 all_centers
上下文词 all_contexts
错误的上下文词 all_negatives
"""


# 14.3.5. 小批量加载训练实例
def batchify(data):
    """返回带有负采样的跳元模型的小批量样本"""
    # 将数据集合中的所有上下文词数量加上对应的负采样的长度，找到其中最大值所谓集合的训练数据的最大长度
    max_len = max(len(c) + len(n) for _, c, n in data)
    centers, contexts_negatives, masks, labels = [], [], [], []
    for center, context, negative in data:
        cur_len = len(context) + len(negative)
        centers += [center]
        contexts_negatives += \
            [context + negative + [0] * (max_len - cur_len)]
        masks += [[1] * cur_len + [0] * (max_len - cur_len)]
        labels += [[1] * len(context) + [0] * (max_len - len(context))]
    return (torch.tensor(centers).reshape((-1, 1)), torch.tensor(
        contexts_negatives), torch.tensor(masks), torch.tensor(labels))


"""
测试使用2个输入生成信息
113 [67, 127, 31] [137, 158, 155, 155, 64, 16, 48, 3, 28, 123, 44, 123, 126, 69, 25]
102 [82, 152, 72, 59] [169, 119, 167, 22, 175, 109, 9, 87, 50, 165, 173, 36, 141, 75, 114, 80, 166, 73, 155, 136]
"""
x_1 = (113, [67, 127, 31], [137, 158, 155, 155, 64, 16, 48, 3, 28, 123, 44, 123, 126, 69, 25])
x_2 = (
    102, [82, 152, 72, 59], [169, 119, 167, 22, 175, 109, 9, 87, 50, 165, 173, 36, 141, 75, 114, 80, 166, 73, 155, 136])
batch = batchify((x_1, x_2))
names = ['centers', 'contexts_negatives', 'masks', 'labels']
for name, data in zip(names, batch):
    print(name, '=', data)


class PTBDataset(torch.utils.data.Dataset):
    def __init__(self, centers, contexts, negatives):
        # 首先判定中心词、上下文词和负采样词长度必须对应一致
        assert len(centers) == len(contexts) == len(negatives)
        self.centers = centers
        self.contexts = contexts
        self.negatives = negatives

    def __getitem__(self, index):
        # 把对应的数据组成一个元组进行运算
        return (self.centers[index], self.contexts[index],
                self.negatives[index])

    def __len__(self):
        return len(self.centers)


batch_size = 16
num_workers = 1
dataset = PTBDataset(all_centers, all_contexts, all_negatives)
data_iter = torch.utils.data.DataLoader(
    dataset, batch_size, shuffle=True,
    collate_fn=batchify)

for batch in data_iter:
    print("batch ......................................")
    for name, data in zip(names, batch):
        print(name, 'shape:', data.shape)
    break
