import torch
import torch.nn as nn
import torch.optim as optim
import numpy as np
import jieba
from collections import Counter
import random

# ========== 1. 数据预处理 ==========
corpus = [
    "我们 喜欢 深度 学习",
    "自然 语言 处理 是 有趣 的",
    "人工智能 改变 了 世界",
    "深度 学习 是 人工智能 的 重要 组成部分"
]

# 分词
tokenized_corpus = [list(jieba.cut(sentence)) for sentence in corpus]


# 构建 n-grams
def generate_ngrams(words, n=3):
    ngrams = []
    for word in words:
        ngrams += [word[i:i + n] for i in range(len(word) - n + 1)]
    return ngrams


# 生成 n-grams 词表
all_ngrams = set()
for sentence in tokenized_corpus:
    for word in sentence:
        all_ngrams.update(generate_ngrams(word))

# 构建词汇表
vocab = set(word for sentence in tokenized_corpus for word in sentence) | all_ngrams
word2idx = {word: idx for idx, word in enumerate(vocab)}
idx2word = {idx: word for word, idx in word2idx.items()}

# 构建训练数据（CBOW 方式）
window_size = 2
data = []

for sentence in tokenized_corpus:
    indices = [word2idx[word] for word in sentence]
    for center_idx in range(len(indices)):
        context = []
        for offset in range(-window_size, window_size + 1):
            context_idx = center_idx + offset
            if 0 <= context_idx < len(indices) and context_idx != center_idx:
                context.append(indices[context_idx])
        if context:
            data.append((context, indices[center_idx]))  # (上下文, 目标词)


# ========== 2. 定义 FastText 模型 ==========
class FastText(nn.Module):
    def __init__(self, vocab_size, embedding_dim):
        super(FastText, self).__init__()
        self.embeddings = nn.Embedding(vocab_size, embedding_dim)
        self.linear = nn.Linear(embedding_dim, vocab_size)

    def forward(self, context):
        context_vec = self.embeddings(context).mean(dim=1)  # 平均上下文向量
        output = self.linear(context_vec)
        return output


# 初始化模型
embedding_dim = 10
model = FastText(len(vocab), embedding_dim)

# ========== 3. 训练 FastText ==========
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(), lr=0.01)
num_epochs = 100

for epoch in range(num_epochs):
    total_loss = 0
    random.shuffle(data)

    for context, target in data:
        context = torch.tensor([context], dtype=torch.long)
        target = torch.tensor([target], dtype=torch.long)

        optimizer.zero_grad()
        output = model(context)
        loss = criterion(output, target)
        loss.backward()
        optimizer.step()

        total_loss += loss.item()

    if (epoch + 1) % 10 == 0:
        print(f"Epoch [{epoch + 1}/{num_epochs}], Loss: {total_loss:.4f}")

# ========== 4. 获取词向量 ==========
word_vectors = model.embeddings.weight.data.numpy()


# ========== 5. 计算相似度 ==========
def most_similar(word, top_n=3):
    if word not in word2idx:
        return "单词不在词汇表中"

    word_vec = word_vectors[word2idx[word]].reshape(1, -1)
    similarities = np.dot(word_vectors, word_vec.T).squeeze()
    similar_idx = similarities.argsort()[::-1][1:top_n + 1]
    return [(idx2word[idx], similarities[idx]) for idx in similar_idx]


# 测试
test_words = ["深度", "学习", "人工智能"]
for word in test_words:
    print(f"【{word}】的相似单词:", most_similar(word))
