import numpy as np


def vectorize_sentence(sentence, vector_table):
    words = sentence.split()  # 假设以空格分隔
    sentence_vectors = []

    for word in words:
        if word in vector_table:
            sentence_vectors.append(vector_table[word])
        else:
            sentence_vectors.append(np.zeros(300))  # 如果词不在表中，使用零向量

    # 合并句子向量（例如取平均）
    return np.mean(sentence_vectors, axis=0)


vector_table = {}
with open('sgns.zhihu.bigram', 'r', encoding='utf-8') as f:
    for line in f:
        parts = line.strip().split()
        word = parts[0]
        vector = np.array(parts[1:], dtype=np.float32)  # 转换为numpy数组
        vector_table[word] = vector

# 示例

sentences = []
with open('all.words.txt', 'r', encoding='gbk') as f:
    for line in f:
        sentence = line.strip()  # 去除行首尾空白字符
        if sentence:  # 确保句子不为空
            sentence_vector = vectorize_sentence(sentence, vector_table)
            sentences.append(sentence_vector)

# 现在 sentences 列表中包含了所有句子的向量
tags = []
with open('all.labels.txt', 'r', encoding='gbk') as f:
    for line in f:
        sentence = line.strip()  # 去除行首尾空白字符
        if sentence:  # 确保句子不为空
            if sentence == "NEG":
                tag = 0
            elif sentence == "POS":
                tag = 1
            else:
                continue  # 跳过不匹配的标签

            tags.append(tag)  # 在这里添加标签

# 现在 tags 列表中包含了所有的标签


# 假设 sentences 是一个包含向量的列表每个向量是1*300。tags 是对应的标签列表0或者1
sentences_array = np.array(sentences)
tags_array = np.array(tags)

# 保存到 NumPy 格式的文件
np.savez('data.npz', sentences=sentences_array, tags=tags_array)
