import math
import random

from torch import nn, Tensor
import torch.utils.data
from matplotlib import pyplot as plt

import dltools


def read_ptb():
    with open("./MNIST/ptb/ptb.train.txt", "r") as file:
        lines = file.readlines()
    return [line.split() for line in lines]


sentences = read_ptb()
print(f"sentences数：{len(sentences)}")

vocab = dltools.Vocab(sentences, min_freq=10)
print(f"vocab size:{len(vocab)}")


def subsample(_sentences, _vocab):
    # 剔除掉<unk>
    _sentences = [[token for token in line if _vocab[token] != _vocab.unk] for line in _sentences]
    counter = dltools.count_corpus(_sentences)
    # 总的单词数 包含重复的
    num_tokens = sum(counter.values())

    def keep(token):
        return random.uniform(0, 1) < math.sqrt(1e-4 / (counter[token] / num_tokens))

    return [[token for token in line if keep(token)] for line in _sentences], counter


subsampled, counter = subsample(sentences, vocab)

before = [len(x) for x in sentences[:20]]
after = [len(x) for x in subsampled[:20]]
x = range(1, 21)
plt.bar(x, height=before, width=0.4, color="red", label="before")
plt.bar([i + 0.4 for i in x], height=after, width=0.4, color="green", label="after")
plt.xlabel("tokens per sentence")
plt.ylabel("count")
plt.legend(["before", "after"])


# plt.show()

def compare_counts(token):
    print(
        f"{token} before:{sum([l.count(token) for l in sentences])} after:{sum([l.count(token) for l in subsampled])}")


compare_counts("the")
compare_counts("publishing")

corpus = [vocab[line] for line in subsampled]
print(corpus[:5])


def get_centers_and_contexts(_corpus, max_window_size):
    centers, contexts = [], []
    for line in _corpus:
        if len(line) < 2:
            continue
        centers += line
        for i, center in enumerate(line):
            window_size = random.randint(1, max_window_size)
            # print(f"center:{center} window_size:{window_size}")
            # range不包含结束位置，需要加list()才能转化为数组
            window_indices = list(range(max(0, i - window_size), min(len(line), i + 1 + window_size)))
            contexts.append([line[idx] for idx in window_indices if idx != i])

    return centers, contexts


tiny_dataset = [list(range(5)), list(range(8, 11)), list(range(18, 20))]
print(f"tiny_dataset{tiny_dataset}")
for center, context in zip(*get_centers_and_contexts(tiny_dataset, 2)):
    print(f"center:{center} context:{context}")

all_centers, all_contexts = get_centers_and_contexts(corpus, 5)
print(f"center-context num:{sum([len(contexts) for contexts in all_contexts])}")


# 负采样-按权重抽取
class RandomGenerator:
    def __init__(self, sampling_weights):
        self.population = list(range(1, len(sampling_weights) + 1))
        self.sampling_weights = sampling_weights
        self.candidates = []
        self.i = 0

    def draw(self):
        if self.i == len(self.candidates):
            self.candidates = random.choices(self.population, self.sampling_weights, k=10000)
            self.i = 0
        self.i += 1
        return self.candidates[self.i - 1]


generator = RandomGenerator([8, 18, 5])
print([generator.draw() for i in range(10)])


# 负采样
def get_negatives(_all_contexts, _vocab, _counter, K):
    sampling_weights = [counter[vocab.idx_to_token[i]] ** 0.75 for i in range(1, len(_vocab))]
    all_negatives, _generator = [], RandomGenerator(sampling_weights)
    for _contexts in _all_contexts:
        negatives = []
        while len(negatives) < len(_contexts) * K:
            neg = _generator.draw()
            # 负采样噪声测不能包含在上下文中
            if neg not in _contexts:
                negatives.append(neg)
        all_negatives.append(negatives)
    return all_negatives


all_negatives = get_negatives(all_contexts, vocab, counter, 5)
print(f"len(all_negatives): {len(all_negatives)}")


def batchify(data):
    max_len = max([len(c) + len(n) for _, c, n in data])
    _centers, contexts_negatives, masks, labels = [], [], [], []
    for center, context, negative in data:
        cur_len = len(context) + len(negative)
        _centers.append(center)
        contexts_negatives.append(context + negative + [0] * (max_len - cur_len))
        masks.append([1] * cur_len + [0] * (max_len - cur_len))
        labels.append([1] * len(context) + [0] * (max_len - len(context)))

    return torch.tensor(_centers).reshape((-1, 1)), torch.tensor(contexts_negatives), torch.tensor(masks), torch.tensor(
        labels)


x_1 = (1, [2, 3], [7, 8, 9, 10, 11])
x_2 = (2, [4, 5], [6, 8, 9, 10])
batch_1 = batchify([x_1, x_2])
print(f"batch_1:{batch_1}")


def load_data_ptb(_batch_size, _max_window_size, _num_noise_words):
    _sentences = read_ptb()
    _vocab = dltools.Vocab(_sentences, min_freq=10)
    _subsampled, _counter = subsample(_sentences, _vocab)
    _corpus = [_vocab[l] for l in _subsampled]
    _all_centers, _all_contexts = get_centers_and_contexts(_corpus, _max_window_size)
    _all_negatives = get_negatives(_all_contexts, _vocab, _counter, _num_noise_words)

    class PTBDataset(torch.utils.data.Dataset):
        def __init__(self, _centers, _contexts, _negatives):
            assert len(_centers) == len(_contexts) == len(_negatives)
            self.centers = _centers
            self.contexts = _contexts
            self.negatives = _negatives

        def __getitem__(self, item):
            return self.centers[item], self.contexts[item], self.negatives[item]

        def __len__(self):
            return len(self.centers)

    _dataset = PTBDataset(_all_centers, _all_contexts, _all_negatives)
    _data_iter = torch.utils.data.DataLoader(_dataset, _batch_size, shuffle=True, collate_fn=batchify)
    return _data_iter, _vocab


batch_size, max_window_size, num_noise_words = 512, 5, 5
data_iter_1, vocab_1 = load_data_ptb(batch_size, max_window_size, num_noise_words)
for batch in data_iter_1:
    center, contexts_negatives, masks, labels = batch
    print(f"center.shape:{center.shape}")
    print(f"contexts_negatives.shape:{contexts_negatives.shape}")
    print(f"masks.shape:{masks.shape}")
    print(f"labels.shape:{labels.shape}")
    break

embed = nn.Embedding(num_embeddings=20, embedding_dim=4)
print(f"embed.weight.shape:{embed.weight.shape}")
test_x = torch.tensor([[1, 2, 3], [4, 5, 6]])
embed_x = embed(test_x)
print(f"embed_x.shape:{embed_x.shape}")

# 构造skip-gram的前向传播
def skip_gram(centers, contexts_and_negatives, embed_v, embed_u):
    v = embed_v(centers)
    u = embed_u(contexts_and_negatives)
    return torch.bmm(v, u.permute(0, 2, 1))

test_center = torch.ones((2, 1), dtype=torch.long)
test_contexts_and_negatives = torch.ones((2, 5), dtype=torch.long)
res = skip_gram(test_center, test_contexts_and_negatives, embed, embed)
print(f"res:{res.shape}")

# 带掩码的二元交叉熵损失
class SigmoidBCELoss(nn.Module):
    def __init__(self):
        super().__init__()

    def forward(self, inputs, target, masks=None):
        output = nn.functional.binary_cross_entropy_with_logits(inputs, target, weight=masks, reduction="none")
        # print(f"output.shape:{output.shape}")
        # print(f"output.sum(dim=1):{output.sum(dim=1).shape}")
        # print(f"masks.sum(dim=1):{masks.sum(dim=1).shape}")
        # print(f"masks.sum(axis=1):{masks.sum(axis=1).shape}")
        return output.sum(dim=1)/masks.sum(dim=1)

loss = SigmoidBCELoss()

pred = torch.tensor([[1.1, -2.2, 3.3, -4.4], [1.1, -2.2, 3.3, -4.4]])
label = torch.tensor([[1, 0, 0, 0], [0, 1, 0, 0]], dtype=torch.float32)
masks = torch.tensor([[1, 1, 1, 1], [1, 1, 0, 0]], dtype=torch.float32)
loss_res = loss(pred, label, masks)
print(f"loss_res:{loss_res}")

# 初始化模型参数，定义两个嵌入层
embed_size, vocab_size = 100, len(vocab_1)
net = nn.Sequential(nn.Embedding(num_embeddings=vocab_size, embedding_dim=embed_size),
                    nn.Embedding(num_embeddings=vocab_size, embedding_dim=embed_size))

# 定义训练过程
def train(_net, _data_iter, _lr, num_epochs, device=dltools.try_gpu()):
    def init_weights(m):
        if type(m) == nn.Embedding:
            nn.init.xavier_uniform_(m.weight)
    _net.apply(init_weights)
    _net = _net.to(device)
    optimizer = torch.optim.Adam(_net.parameters(), lr=_lr)
    metric = dltools.Accumulator(2)
    for epoch in range(num_epochs):
        timer, num_batches = dltools.Timer(), len(_data_iter)
        for i, batch in enumerate(_data_iter):
            optimizer.zero_grad()
            centers, contexts_and_negatives, _masks, _labels = [data.to(device) for data in batch]
            pred = skip_gram(centers, contexts_and_negatives, _net[0], _net[1])
            # print(f"pred.shape:{pred.shape}")
            # print(f"labels.shape:{_labels.shape}")
            # print(f"masks.shape:{_masks.shape}")
            l = loss(pred.reshape(_labels.shape).float(), _labels.float(), _masks.float())
            l.sum().backward()
            optimizer.step()
            metric.add(l.sum(), l.numel())
            if (i + 1) % 10 == 0:
                print(f"loss:{metric[0]/metric[1]:.3f}")

lr, num_epochs = 0.002, 50
train(net, data_iter_1, lr, num_epochs)

# 如果能够找到近义词，就说明训练效果不错
def get_similar_tokens(query_token, k, embed, vocab):
    W = embed.weight.data
    print(f"W.shape: {W.shape}")
    x = W[vocab[query_token]]
    print(f"x.shape: {x.shape}")
    # 计算续弦相似度
    cos = torch.mv(W, x) / torch.sqrt(torch.sum(W * W, dim=1) * torch.sum(x * x) + 1e-9)
    print(f"cos.shape:{cos.shape}")
    # topk 返回两个数据，values和indices
    # .cpu()将cuda中的数据转移到cpu上再进行numpy计算
    # astype 入参类型的字符串名字
    topkIdxs = torch.topk(cos, k=k+1)[1].cpu().numpy().astype("int32")
    for i in topkIdxs[1:]:
        print(f"cosine sim={float(cos[i]):.3f}: {vocab.to_tokens(i)}")


get_similar_tokens("city", 3, net[0], vocab_1)
