import numpy as np

sentence_length = 100


def index_sentence(fc_sentence, word_to_index, max_length):
    words = fc_sentence.split()  # 假设以空格分隔
    sentence_len = len(words)
    fc_sentence_matrix = []

    for fc_word in words:
        if fc_word in word_to_index:
            fc_sentence_matrix.append(word_to_index[fc_word])
        else:
            fc_sentence_matrix.append(np.zeros(300))  # 用300维的零向量替代

    # 填充到最大长度
    if sentence_len < max_length:
        # 填充300维的零向量
        fc_sentence_matrix += [np.zeros(300)] * (max_length - sentence_len)
    else:
        fc_sentence_matrix = fc_sentence_matrix[:max_length]  # 截断

    return fc_sentence_matrix  # 返回填充后的句子的索引列表


# 词向量DIC
vector_table = {}
with open('sgns.zhihu.bigram', 'r', encoding='utf-8') as f:
    for line in f:
        parts = line.strip().split()
        word = parts[0]
        vector = np.array(parts[1:], dtype=np.float32)  # 转换为numpy数组
        vector_table[word] = vector

# word_set = set()
# with open('all.words.txt', 'r', encoding='gbk') as f:
#     for line in f:
#         sentence = line.strip()  # 去除行首尾空白字符
#         words = sentence.split()
#         for i in words:
#             word_set.add(i)
# index = 1
# vector_table = {}
# for item in word_set:
#     vector_table[item] = index
#     index += 1


# 计算最大句子长度
# max_len = 0
# with open('all.words.txt', 'r', encoding='gbk') as f:
#     for line in f:
#         count = len(line.split())
#         max_len = max(max_len, count)

# 处理句子
sentences = []
with open('all.words.txt', 'r', encoding='gbk') as f:
    for line in f:
        sentence = line.strip()  # 去除行首尾空白字符
        if sentence:  # 确保句子不为空
            sentence_matrix = index_sentence(sentence, vector_table, sentence_length)
            sentences.append(sentence_matrix)

# 处理标签
tags = []
with open('all.labels.txt', 'r', encoding='gbk') as f:
    for line in f:
        sentence = line.strip()  # 去除行首尾空白字符
        if sentence:  # 确保句子不为空
            if sentence == "NEG":
                tag = [1, 0]  # NEG 标签转换为 [1, 0]
            elif sentence == "POS":
                tag = [0, 1]  # POS 标签转换为 [0, 1]
            else:
                continue  # 跳过不匹配的标签

            tags.append(tag)  # 在这里添加 one-hot 编码的标签

# 保存到 NumPy 格式的文件
np.savez('data_matrix.npz', sentences=np.array(sentences), tags=np.array(tags))

print("数据已成功保存到 'data_matrix.npz' 文件中。")
