import string import torch import numpy as np # from nltk.corpus import stopwords # stop_words = set(stopwords.words('english')) import torch.nn as nn import pickle from transformers import BertTokenizer, BertModel, DistilBertTokenizer, DistilBertModel from sklearn.linear_model import LogisticRegression # import nltk EMBEDDING_DIM = 64 VOCAB_SIZE = 203310 embedding_matrix = np.zeros((VOCAB_SIZE, EMBEDDING_DIM)) embedding_layer = torch.nn.Embedding.from_pretrained(torch.FloatTensor(embedding_matrix)) with open('lstm/vocab_to_int.txt', 'rb') as f: vocab_to_int = pickle.load(f) class LSTMClassifier(nn.Module): def __init__(self, embedding_dim: int, hidden_size: int = 32) -> None: super().__init__() self.embedding_dim = embedding_dim # создаем эмбединг сайз self.hidden_size = hidden_size # создаем хидден сайз self.embedding = embedding_layer # создаем слои модели self.lstm = nn.LSTM( input_size=self.embedding_dim, hidden_size=self.hidden_size, batch_first=True ) self.clf = nn.Linear(self.hidden_size, 1) def forward(self, x): embedding = self.embedding(x) _, (h_n, _) = self.lstm(embedding) out = self.clf(h_n.squeeze()) return out def data_preprocessing(text: str) -> str: text = text.lower() text = ''.join([c for c in text if c not in string.punctuation])# Remove punctuation # text = [word for word in text.split() if word not in stop_words] text = ' '.join(text) return text def padding(review_int: list, seq_len: int) -> np.array: features = np.zeros((len(review_int), seq_len), dtype = int) for i, review in enumerate(review_int): if len(review) <= seq_len: zeros = list(np.zeros(seq_len - len(review))) new = zeros + review else: new = review[: seq_len] features[i, :] = np.array(new) return features def preprocess_single_string(input_string: str, seq_len: int, vocab_to_int: dict = vocab_to_int) -> list: preprocessed_string = data_preprocessing(input_string) result_list = [] for word in preprocessed_string.split(): try: result_list.append(vocab_to_int[word]) except KeyError as e: print(f'{e}: not in dictionary!') result_padded = padding([result_list], seq_len)[0] return torch.tensor(result_padded)