import jieba
import torch
import torch.nn as nn
import numpy as np
import torch.nn.functional as F
import torch.optim as optim
from sklearn.decomposition import PCA
from tqdm import tqdm, trange

# 初始化矩阵
torch.manual_seed(1)
data=[]
wordList = []
count = 0
raw_text = []
word_to_idx={}
unknown_index =0 
# 超参数
learning_rate = 0.001
word_to_idx ={}
# 上下文信息，即涉及文本的前n个和后n个
context_size = 2
# 词嵌入的维度，即一个单词用多少个浮点数表示比如 the=[10.2323,12.132133,4.1219774]...
embedding_dim = 100
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
class CBOW(nn.Module):
    def __init__(self, vocab_size, embedding_dim):
        super(CBOW, self).__init__()
        self.embeddings = nn.Embedding(vocab_size, embedding_dim)
        self.proj = nn.Linear(embedding_dim, 128)
        self.output = nn.Linear(128, vocab_size)
 
    def forward(self, inputs):
        embeds = sum(self.embeddings(inputs)).view(1, -1)
        out = F.relu(self.proj(embeds))
        out = self.output(out)
        nll_prob = F.log_softmax(out, dim=-1)
        return nll_prob
    def get_word_emdedding(self,word):
        word=torch.tensor([word_to_idx[word]])
        return self.embeddings(word).view(1,-1)

 
 
# 加载停用词词表
def load_stop_words():
    with open('hit_stopwords.txt', "r", encoding="utf-8") as f:

        return f.read().split("\n")
 
filelocal="" 
# 加载文本,切词
def cut_words(filelocal):
    stop_words = load_stop_words()
    with open(filelocal, encoding='utf8') as f:
        allData = f.readlines()
    result = []
    for words in allData:
        c_words = jieba.lcut(words)

        result.append([word for word in c_words if (word not in stop_words and (word!=' ') and not (word.isdigit()))])
    return result
 
  
def make_context_vector(context, word_to_ix):
    idxs = [word_to_ix[w] for w in context]
    return torch.tensor(idxs, dtype=torch.long)
 
 
def train(local):
    # 调用切词方法
    filelocal=local
    data = cut_words(filelocal)
    for words in data:
        for word in words:
            if word not in wordList:
                wordList.append(word)
    print("wordList=", wordList)
 
    raw_text = wordList
    print("raw_text=", raw_text)
 

 
    # 放cuda或者cpu里
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    # 把所有词集合转成dict
    vocab = set(raw_text)
    vocab_size = len(vocab)
 
    word_to_idx = {word: i for i, word in enumerate(vocab)}
    idx_to_word = {i: word for i, word in enumerate(vocab)}
    unknown_index = len(word_to_idx) 
    # cbow词表，即{[w1,w2,w4,w5],"label"}这样形式
    data = []
    for i in range(2, len(raw_text) - 2):
        context = [raw_text[i - 2], raw_text[i - 1],
                raw_text[i + 1], raw_text[i + 2]]
        target = raw_text[i]
        data.append((context, target))
 
    print(data[:5])
 
 

 
    # 模型在cuda训练
    model = CBOW(vocab_size, embedding_dim).to(device)
    # 优化器
    optimizer = optim.SGD(model.parameters(), lr=0.001)
    # 存储损失的集合
    losses = []

    loss_function = nn.NLLLoss()
    epoch = 10
    for epoch in trange(epoch):
        total_loss = 0
        for context, target in tqdm(data):
            # 把训练集的上下文和标签都放到GPU中
            context_vector = make_context_vector(context, word_to_idx).to(device)
            target = torch.tensor([word_to_idx[target]]).cuda()
            # print("context_vector=", context_vector)
            # 梯度清零
            model.zero_grad()
            # 开始前向传播
            train_predict = model(context_vector).cuda()  # 从cuda里取出，不然报设备不一致错误
            loss = loss_function(train_predict, target)
            # 反向传播
            loss.backward()
            # 更新参数
            optimizer.step()
            total_loss += loss.item()
        losses.append(total_loss)
    print("losses-=", losses)


    # 获取词向量
    print("CBOW embedding'weight=", model.embeddings.weight)
    W = model.embeddings.weight.cpu().detach().numpy()
 
    # 生成词嵌入字典，即{单词1:词向量1,单词2:词向量2...}的格式
    word_2_vec = {}
    for word in word_to_idx.keys():
        # 词向量矩阵中某个词的索引所对应的那一列即为所该词的词向量
        word_2_vec[word] = W[word_to_idx[word], :]
    print("word2vec=", word_2_vec)
 
 
    pca = PCA(n_components=2)
    principalComponents = pca.fit_transform(W)
 
    #降维后在生成一个词嵌入字典，即即{单词1:(维度一，维度二),单词2:(维度一，维度二)...}的格式
    word2ReduceDimensionVec = {}
    for word in word_to_idx.keys():
        word2ReduceDimensionVec[word] = principalComponents[word_to_idx[word], :]
    count=0
    # 将生成的字典写入到文件中(词向量和快速查找词典)
    with open("CBOW_ZH_wordvec.txt", 'w', encoding='utf-8') as f:
        for key in word_to_idx.keys():
            f.write('\n')
            f.writelines('"' + str(key) + '":' + str(word_2_vec[key]))
        f.write('\n')
    with open("dic.txt","w",encoding='utf-8')as f:
        for key in word_to_idx.keys():    
            f.writelines(str(key)+" ")
        f.write('\n')    
    test= ["锻炼","全身","有氧","运动" ]
    
    context_vector = make_context_vector(context, word_to_idx).to(device)
    # 预测的值
    predict = model(context_vector).data.cpu().numpy()
    print('Raw text: {}\n'.format(' '.join(raw_text)))
    print('Test Context: {}\n'.format(context))
    max_idx = np.argmax(predict)
    # 输出预测的值
    print('Prediction: {}'.format(idx_to_word[max_idx]))

    # 保存模型
    torch.save(CBOW, 'cbow_model.pth') 
def test(text,model):
    context_vector= torch.tensor([unknown_index]).to(device)
    predict = model(context_vector,embedding_dim).data.cpu().numpy()
    print('Raw text: {}\n'.format(' '.join(raw_text)))
    print('Test Context: {}\n'.format(text))
    max_idx = np.argmax(predict)
    # 输出预测的值
    print('Prediction: {}'.format(max_idx))