#英文分词
import torch
import torch.nn as nn

class CBOW(torch.nn.Module):
    def __init__(self,word_size,embedding_dim):
        super(CBOW,self).__init__()
        self.embeddings=nn.Embedding(word_size,embedding_dim)
        self.linear1=nn.Linear(embedding_dim,128)
        self.activation_fuction1=nn.ReLU()

        self.linear2=nn.Linear(128,word_size)
        self.activation_fuction2=nn.LogSoftmax(dim=-1)


    def forward(self,inputs):
        embeds=sum(self.embeddings(inputs)).view(1,-1)
        out=self.linear1(embeds)
        out=self.activation_fuction1(out)
        out=self.linear2(out)
        out=self.activation_fuction2(out)
        return out
    def get_word_emdedding(self,word):
        word=torch.tensor([word_to_ix[word]])
        return self.embeddings(word).view(1,-1)
    

#初始化
EMDEDDING_DIM=100#词向量维度
data=[]
word = set()
word_size = 0 
word_to_ix = {}
ix_to_word = {}
text=""
all_text_vectors = []

#提向量
def make_context_vector(context,word_to_ix):
    idxs=[word_to_ix[w] for w in context]
    return torch.tensor(idxs,dtype=torch.long)




 #利用数据集替换
def train_CBOW(model,text_data):


    text=text_data
    word = set(text_data)
    word_size = len(word) 
    word_to_ix = {word:ix for ix, word in enumerate(word)}
    ix_to_word = {ix:word for ix, word in enumerate(word)}
    for i in range(2,len(text)-2):
        context=[text[i-2],text[i-1],text[i+1],text[i+2]]
        target=text[i]
        data.append((context,target))


    model=CBOW(word_size, EMDEDDING_DIM)     
    loss_function = nn.NLLLoss()
    optimizer = torch.optim.SGD(model.parameters(), lr=0.001)
    #开始训练
    for epoch in range(100):
        total_loss = 0
        for context, target in data:
            context_vector = make_context_vector(context, word_to_ix)  
            log_probs = model(context_vector)
            total_loss += loss_function(log_probs, torch.tensor([word_to_ix[target]]))
        optimizer.zero_grad()
        total_loss.backward()
        optimizer.step()   
    #文本转换向量
    for context,target in data:
        context_vector=make_context_vector(context,word_to_ix)
        text_vector=model(context_vector)
        all_text_vectors.append(text_vector)
    return all_text_vectors 
