import math
import random
from typing import List, Any, Dict
import numpy as np
import matplotlib.pyplot as plt
import torch
from torch import nn

def load_corpus(file: str) -> List[str]:              # 加载语料库
    corpus: List[str] = []
    for line in open(file, "r", encoding="utf-8"):
        corpus += line.lower().strip().split()                              # 小写+去回车+按空格分词
    return corpus

def build_dict(corpus: List[str]) -> [Dict[str, int], Dict[int, str], Dict[int, int]]:      # 建立词表
    wordcounts = {}
    for word in corpus:                                                     # 统计词频
        wordcounts[word] = wordcounts.get(word, 0) + 1
    wordcounts = sorted(wordcounts.items(), key=lambda x : x[1], reverse=True)    # 按照词频排序
    word2id = {}                # 单词到索引的映射表
    id2word = {}                # 索引到单词的映射表
    id2count = {}               # 索引到频数的映射表

    for word, count in wordcounts:
        ID = len(word2id)
        word2id[word] = ID
        id2word[ID] = word
        id2count[ID] = count

    return word2id, id2word, id2count

def subsampling(corpus: List[int], id2count: Dict[int, int], t=1e-3) -> List[int]:     # 二次采样，随机得丢弃一些频数较小得单词
    discard = lambda index : bool(random.uniform(0, 1) < 1 - math.sqrt(t / id2count[index] * len(corpus)))
    return [index for index in corpus if not discard(index)]

def build_data(corpus: List[int], vocab_size: int, max_ws: int = 3, nega_samp_num: int = 4):
    """
    :param corpus: 语料库
    :param vocab_size: 词表长度
    :param max_ws: 最大窗口长度
    :param nega_samp_num: 每个单词配对得负样本得数量
    :return: 一个三元组列表，为(中心词索引，正样本单词索引，1)或者(中心词索引，负样本单词索引，0)
    """
    data_set = []
    for index, word_index in enumerate(corpus):
        window_size = random.randint(1, max_ws)

        # 挑选以index为中心,window_size为半径的所有单词索引，不包括index本身，max和min是为了保证边界条件
        p_words = [corpus[i] for i in range(max(0, index - window_size), min(len(corpus) - 1, index + window_size) + 1) if i != index]
        for p_w in p_words:             # 加入正样本
            data_set.append([word_index, p_w, 1])
            # 负采样
            for i in range(nega_samp_num):
                n_w = random.randint(0, vocab_size - 1)
                if n_w not in p_words:                  # 只要是不在候选框中的都是负样本
                    data_set.append([word_index, n_w, 0])
    return data_set

def DataLoader(data_set, vocab_size: int, batch_size: int = 64):
    center_word_batch = []
    target_word_batch = []
    label_batch = []
    eval_word_batch = []

    for center_word, target_word, label in data_set:
        center_word_batch.append(center_word)
        target_word_batch.append(target_word)
        label_batch.append(label)
        # 选择5个高频词和5个低频词作为评估样本
        if len(eval_word_batch) < 5:
            eval_word_batch.append(random.randint(0, 99))
        if len(eval_word_batch) < 10:
            eval_word_batch.append(random.randint(99, vocab_size - 1))

        # 攒到batch_size就yield出去
        if len(center_word_batch) == batch_size:
            yield [
                torch.tensor(center_word_batch, dtype=torch.int64),
                torch.tensor(target_word_batch, dtype=torch.int64),
                torch.tensor(label_batch, dtype=torch.float32),
                torch.tensor(eval_word_batch, dtype=torch.int64)
            ]
    if len(center_word_batch) > 0:                              # 把剩余的样本yield出去
        yield [
            torch.tensor(center_word_batch, dtype=torch.int64),
            torch.tensor(target_word_batch, dtype=torch.int64),
            torch.tensor(label_batch, dtype=torch.float32),
            torch.tensor(eval_word_batch, dtype=torch.int64)
        ]

class SkipGram(nn.Module):
    def __init__(self, vocab_size, embedding_size, word2id, id2word):
        super(SkipGram, self).__init__()
        self.vocab_size = vocab_size
        self.embedding_size = embedding_size
        self.word2id = word2id
        self.id2word = id2word

        self.embedding = nn.Embedding(                  # 第一个embedding是用来嵌入我们的输入的
            num_embeddings=vocab_size,
            embedding_dim=embedding_size
        )

        self.embedding_out = nn.Embedding(             # 第二个embedding是用来嵌入我们的标签值的
            num_embeddings=vocab_size,
            embedding_dim=embedding_size
        )

    def forward(self, center_words, target_words):
        # center_word_emb shape:[batch_size]
        # target_word_emb shape:[batch_size]
        # 首先通过两个嵌入矩阵先将输入和标签都嵌入成词向量
        center_word_emb = self.embedding(center_words)
        target_word_emb = self.embedding(target_words)

        # center_word_emb shape:[batch_size, embedding_size]
        # target_word_emb shape:[batch_size, embedding_size]
        # 计算周围的词于中心词的相似度
        word_sim = torch.sigmoid(torch.sum(center_word_emb * target_word_emb, dim=1))
        return word_sim

    def predict(self, word1, word2):        # 计算两个单词的相似性
        index1 = self.word2id.get(word1, self.vocab_size - 1)        # 对于不存在的单词，默认放到最后一位
        index2 = self.word2id.get(word2, self.vocab_size - 1)
        vec1 = self.embedding(torch.tensor([[index1]], dtype=torch.int64))
        vec2 = self.embedding(torch.tensor([[index2]], dtype=torch.int64))
        return torch.sigmoid(torch.sum(vec1 * vec2)).item()

if __name__ == "__main__":
    corpus = load_corpus("../data/word2vec.txt")[:10000]    # 减小输入的词库大小，减轻负担
    print(f"选择的语料库的长度为{len(corpus)}个单词")
    word2id, id2word, id2count = build_dict(corpus)       # 获取词表
    corpus = list(map(lambda word : word2id[word], corpus)) # 根据映射表将语料库映射为索引序列
    corpus = subsampling(corpus, id2count)
    print(f"二次采样后的语料库长度为{len(corpus)}")
    vocab_size = len(word2id)

    data_set = build_data(corpus, vocab_size)

    net = SkipGram(
        vocab_size=vocab_size,
        embedding_size=512,
        word2id=word2id,
        id2word=id2word
    )

    optimizer = torch.optim.Adam(net.parameters(), lr=1e-3)
    loss_func = nn.BCELoss()

    all_losses = []

    for epoch in range(10):
        for center_word_batch, target_word_batch, label_batch, eval_batch in DataLoader(data_set, vocab_size, batch_size=128):
            word_sim = net(center_word_batch, target_word_batch)
            loss = loss_func(word_sim.flatten(), label_batch.flatten())
            all_losses.append(loss.item())
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

    plt.style.use("seaborn")
    plt.plot(all_losses, label="loss")
    plt.legend()
    plt.show()

    torch.save(net, "../dist/skip-gram.pkl")
    print("模型已经保存在dist文件夹下")

    print(net.predict("share", "enjoy"))