
import torch
import jieba
from torch import nn
from gensim.models import Word2Vec
import numpy as np
import torch.nn.functional as F


class LSTMModel(nn.Module):
    def __init__(self, input_size, hidden_size, output_size):
        super(LSTMModel, self).__init__()
        self.lstm = nn.LSTM(input_size, hidden_size, batch_first=True)
        self.fc = nn.Linear(hidden_size, output_size)

    def forward(self, x):
        lstm_out, _ = self.lstm(x)
        output = self.fc(lstm_out[:, -1, :])  # 取序列的最后一个输出
        return F.softmax(output, dim=1)


# 数据读取
def load_txt(path):
    with open(path, 'r', encoding='utf-8') as f:
        data = [[line.strip()] for line in f.readlines()]
        return data


# 去停用词
def drop_stopword(datas):
    # 假设你有一个函数用于预处理文本数据
    with open('chinese_stop_words.txt', 'r', encoding='UTF8') as f:
        stop_words = [word.strip() for word in f.readlines()]
    datas = [x for x in datas if x not in stop_words]
    return datas


def preprocess_text(text):
    text = list(jieba.cut(text))
    text = drop_stopword(text)
    return text

def load_word2vec_model(model_path):
    return Word2Vec.load(model_path)

# 将文本转换为Word2Vec向量表示
def text_to_vector(text, word2vec_model):

    vectors = []
    # 遍历文本中的每个词
    for word in text:
        # 如果词在模型中，则添加其向量；否则，添加一个全零向量
        if word in word2vec_model.wv:
            vectors.append(word2vec_model.wv[word])
        else:
            # 可以根据需要调整全零向量的维度
            vectors.append(np.zeros(word2vec_model.vector_size))

            # 将列表转换为NumPy数组，确保是二维的
    return np.array(vectors)


    # train_x = load_txt('train.txt')
    # test_x = load_txt('test.txt')
    # train = train_x + test_x
    # X_all = [i for x in train for i in x]
    # # 训练Word2Vec模型
    # word2vec_model = Word2Vec(sentences=X_all, vector_size=100, window=5, min_count=1, workers=4)
    # vector = [word2vec_model.wv[word] for word in text if word in word2vec_model.wv]
    # return sum(vector) / len(vector) if vector else [0] * word2vec_model.vector_size


if __name__ == '__main__':
    input_text = "这个车我开了很多年，棒！"
    label = {1: "正面情绪", 0: "负面情绪"}
    model = torch.load('model.pth')

    '''
    LSTMModel(
  (lstm): LSTM(100, 50, batch_first=True)
  (fc): Linear(in_features=50, out_features=2, bias=True)
)
    '''
    print(model)
    # 预处理输入数据
    input_data = preprocess_text(input_text)
    print(input_text)
    print(input_data) # ['车', '开', '很多年', '棒']
    word2vec_model = load_word2vec_model('word2vec.model')
    vectors = text_to_vector(input_data,word2vec_model)
    for vector in vectors:
        print(vector.shape)
        print(vector)
    # 打印整个数组的形状
    print(vectors.shape)
    #
    # 确保输入词向量与模型维度和数据类型相同
    input_data = [text_to_vector(input_data, word2vec_model)]
    input_arry = np.array(input_data, dtype=np.float32)
    input_tensor = torch.Tensor(input_arry)
    print(input_tensor)
    print(input_tensor.shape)
    # 将输入数据传入模型
    with torch.no_grad():
        output = model(input_tensor)
    print(f"Output:{output}")
    print(torch.argmax(output).item())
    predicted_class = label[torch.argmax(output).item()]
    print(f"predicted_text:{input_text}")
    print(f"模型预测的类别: {predicted_class}")
