File size: 2,516 Bytes
5e65e3b
 
 
b02dea6
 
5e65e3b
 
 
 
b02dea6
5e65e3b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
b02dea6
5e65e3b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
import string
import torch
import numpy as np
# from nltk.corpus import stopwords
# stop_words = set(stopwords.words('english'))
import torch.nn as nn
import pickle
from transformers import BertTokenizer, BertModel, DistilBertTokenizer, DistilBertModel
from sklearn.linear_model import LogisticRegression
# import nltk



EMBEDDING_DIM = 64
VOCAB_SIZE = 203310
embedding_matrix = np.zeros((VOCAB_SIZE, EMBEDDING_DIM))
embedding_layer = torch.nn.Embedding.from_pretrained(torch.FloatTensor(embedding_matrix))




with open('lstm/vocab_to_int.txt', 'rb') as f:
    vocab_to_int = pickle.load(f)

class LSTMClassifier(nn.Module):
    def __init__(self, embedding_dim: int, hidden_size: int = 32) -> None:
        super().__init__()

        self.embedding_dim = embedding_dim # создаем эмбединг сайз
        self.hidden_size = hidden_size # создаем хидден сайз
        self.embedding = embedding_layer # создаем слои модели

        self.lstm = nn.LSTM(
            input_size=self.embedding_dim,
            hidden_size=self.hidden_size,
            batch_first=True
        )
        
        self.clf = nn.Linear(self.hidden_size, 1)
    
    def forward(self, x):
        embedding = self.embedding(x)
        _, (h_n, _) = self.lstm(embedding)
        out = self.clf(h_n.squeeze())
        return out  


def data_preprocessing(text: str) -> str:
    text = text.lower()
    text = ''.join([c for c in text if c not in string.punctuation])# Remove punctuation
    # text = [word for word in text.split() if word not in stop_words] 
    text = ' '.join(text)
    return text



def padding(review_int: list, seq_len: int) -> np.array:
    features = np.zeros((len(review_int), seq_len), dtype = int)
    for i, review in enumerate(review_int):
        if len(review) <= seq_len:
            zeros = list(np.zeros(seq_len - len(review)))
            new = zeros + review
        else:
            new = review[: seq_len]
        features[i, :] = np.array(new)
            
    return features

def preprocess_single_string(input_string: str, seq_len: int, vocab_to_int: dict = vocab_to_int) -> list:
    preprocessed_string = data_preprocessing(input_string)
    result_list = []
    for word in preprocessed_string.split():
        try: 
            result_list.append(vocab_to_int[word])
        except KeyError as e:
            print(f'{e}: not in dictionary!')
    result_padded = padding([result_list], seq_len)[0]

    return torch.tensor(result_padded)