import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import Dataset, DataLoader

'''

单词共计 14个,需要转换成10的维度

1个样本 为例:

    input       target
    (1,2,3,4)        5

    nn.embeding(14,10) 转换后  (4,10)  相当于CBOW中 多个 (1,14) * (14,10) -> (1,10)  4个就是 (4,10)  
    [
        [...],
        [...],
        [...],
        [...],

    ]

    求平均值 
    (4,10) -> (1,10)  CBOW中 4个上下文词向量求平均值   


    在经过线性层 转换成(1,14)
    (1,10) * (10,14) -> (1,14)

    softmax和交叉熵损失函数
    nn.CrossEntropyLoss()  交叉熵损失函数

------------------------------------------------------------------


多个样本 为例:
假设有4个样本  batch_size=4

input  target
[4,4]    [4]

nn.embeding(14,10) 转换后 
[4,4] - > [4,4,10]  相当于CBOW中 多个 (1,14) * (14,10) -> (1,10)  4个就是 (4,10)  在加上batch_size = 4  

求平均值
[4,4,10] -> [4,10]

通过线性层转换
[4,10] * [10,14] -> [4,14]


最后 softmax  和交叉熵损失函数
nn.CrossEntropyLoss()    CrossEntropyLoss下 input不需要 softmax  target也不需要 one-hot 编码   






'''


# 准备数据
corpus = [
    "We are going to study about machine learning",
    "We are going to learn about deep learning",
    "We are going to explore about natural language processing"
]

# 数据预处理
def preprocess(corpus):
    tokenized_corpus = [sentence.lower().split() for sentence in corpus]
    vocab = set(word for sentence in tokenized_corpus for word in sentence)
    word_to_idx = {word: idx for idx, word in enumerate(vocab)}
    idx_to_word = {idx: word for word, idx in word_to_idx.items()}
    return tokenized_corpus, word_to_idx, idx_to_word

tokenized_corpus, word_to_idx, idx_to_word = preprocess(corpus)
vocab_size = len(word_to_idx)

# 创建CBOW数据集
def create_cbow_data(tokenized_corpus, word_to_idx, context_size=2):
    data = []
    for sentence in tokenized_corpus:
        for i in range(context_size, len(sentence) - context_size):
            context = [
                word_to_idx[sentence[i - j - 1]] for j in range(context_size)
            ] + [
                word_to_idx[sentence[i + j + 1]] for j in range(context_size)
            ]
            target = word_to_idx[sentence[i]]
            data.append((context, target))
    return data

context_size = 2
cbow_data = create_cbow_data(tokenized_corpus, word_to_idx, context_size)

# 自定义Dataset
class CBOWDataset(Dataset):
    def __init__(self, data):
        self.data = data

    def __len__(self):
        return len(self.data)

    def __getitem__(self, idx):
        context, target = self.data[idx]
        return torch.tensor(context, dtype=torch.long), torch.tensor(target, dtype=torch.long)

dataset = CBOWDataset(cbow_data)
dataloader = DataLoader(dataset, batch_size=4, shuffle=True)

# 定义CBOW模型
class CBOWModel(nn.Module):
    def __init__(self, vocab_size, embedding_dim):
        super(CBOWModel, self).__init__()
        self.embeddings = nn.Embedding(vocab_size, embedding_dim)
        self.linear = nn.Linear(embedding_dim, vocab_size)  # 10,14

    def forward(self, context):
        embedded = self.embeddings(context)  # (batch_size, context_size, embedding_dim)  4,4,10
        embedded = embedded.mean(dim=1)  # 平均上下文向量 (batch_size, embedding_dim)   4,10
        output = self.linear(embedded)  # (batch_size, vocab_size)   4,10 * 10,14   ->  4,14
        return output

# 模型、损失函数和优化器   
embedding_dim = 10
model = CBOWModel(vocab_size, embedding_dim) # 14,10
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(model.parameters(), lr=0.01)

# 训练模型
epochs = 5000
for epoch in range(epochs):
    total_loss = 0
    for context, target in dataloader:
        optimizer.zero_grad()
        output = model(context)
        loss = criterion(output, target)
        loss.backward()
        optimizer.step()
        total_loss += loss.item()
    
    if epoch % 10 == 0:
        print(f"Epoch {epoch + 1}, Loss: {total_loss:.4f}")

# 测试词向量
word = "about"
word_idx = word_to_idx[word]
word_vector = model.embeddings(torch.tensor(word_idx))
print(f"Word vector for '{word}': {word_vector}")