menikev commited on
Commit
7e56f3f
1 Parent(s): a8fd9ba

Delete prediction_sinhala.py

Browse files
Files changed (1) hide show
  1. prediction_sinhala.py +0 -231
prediction_sinhala.py DELETED
@@ -1,231 +0,0 @@
1
- from typing import List, Optional, Tuple
2
-
3
- import torch
4
- from torch import Tensor
5
- from torch import nn
6
- from transformers import RobertaModel
7
-
8
- from faknow.model.layers.layer import TextCNNLayer
9
- from faknow.model.model import AbstractModel
10
- import pandas as pd
11
-
12
-
13
- class _MLP(nn.Module):
14
- def __init__(self,
15
- input_dim: int,
16
- embed_dims: List[int],
17
- dropout_rate: float,
18
- output_layer=True):
19
- super().__init__()
20
- layers = list()
21
- for embed_dim in embed_dims:
22
- layers.append(nn.Linear(input_dim, embed_dim))
23
- layers.append(nn.BatchNorm1d(embed_dim))
24
- layers.append(nn.ReLU())
25
- layers.append(nn.Dropout(p=dropout_rate))
26
- input_dim = embed_dim
27
- if output_layer:
28
- layers.append(torch.nn.Linear(input_dim, 1))
29
- self.mlp = torch.nn.Sequential(*layers)
30
-
31
- def forward(self, x: Tensor) -> Tensor:
32
- """
33
-
34
- Args:
35
- x (Tensor): shared feature from domain and text, shape=(batch_size, embed_dim)
36
-
37
- """
38
- return self.mlp(x)
39
-
40
-
41
- class _MaskAttentionLayer(torch.nn.Module):
42
- """
43
- Compute attention layer
44
- """
45
- def __init__(self, input_size: int):
46
- super(_MaskAttentionLayer, self).__init__()
47
- self.attention_layer = torch.nn.Linear(input_size, 1)
48
-
49
- def forward(self,
50
- inputs: Tensor,
51
- mask: Optional[Tensor] = None) -> Tuple[Tensor, Tensor]:
52
- weights = self.attention_layer(inputs).view(-1, inputs.size(1))
53
- if mask is not None:
54
- weights = weights.masked_fill(mask == 0, float("-inf"))
55
- weights = torch.softmax(weights, dim=-1).unsqueeze(1)
56
- outputs = torch.matmul(weights, inputs).squeeze(1)
57
- return outputs, weights
58
-
59
-
60
- class MDFEND(AbstractModel):
61
- r"""
62
- MDFEND: Multi-domain Fake News Detection, CIKM 2021
63
- paper: https://dl.acm.org/doi/10.1145/3459637.3482139
64
- code: https://github.com/kennqiang/MDFEND-Weibo21
65
- """
66
- def __init__(self,
67
- pre_trained_bert_name: str,
68
- domain_num: int,
69
- mlp_dims: Optional[List[int]] = None,
70
- dropout_rate=0.2,
71
- expert_num=5):
72
- """
73
-
74
- Args:
75
- pre_trained_bert_name (str): the name or local path of pre-trained bert model
76
- domain_num (int): total number of all domains
77
- mlp_dims (List[int]): a list of the dimensions in MLP layer, if None, [384] will be taken as default, default=384
78
- dropout_rate (float): rate of Dropout layer, default=0.2
79
- expert_num (int): number of experts also called TextCNNLayer, default=5
80
- """
81
- super(MDFEND, self).__init__()
82
- self.domain_num = domain_num
83
- self.expert_num = expert_num
84
- self.bert = RobertaModel.from_pretrained(
85
- pre_trained_bert_name).requires_grad_(False)
86
- self.embedding_size = self.bert.config.hidden_size
87
- self.loss_func = nn.BCELoss()
88
- if mlp_dims is None:
89
- mlp_dims = [384]
90
-
91
- filter_num = 64
92
- filter_sizes = [1, 2, 3, 5, 10]
93
- experts = [
94
- TextCNNLayer(self.embedding_size, filter_num, filter_sizes)
95
- for _ in range(self.expert_num)
96
- ]
97
- self.experts = nn.ModuleList(experts)
98
-
99
- self.gate = nn.Sequential(
100
- nn.Linear(self.embedding_size * 2, mlp_dims[-1]), nn.ReLU(),
101
- nn.Linear(mlp_dims[-1], self.expert_num), nn.Softmax(dim=1))
102
-
103
- self.attention = _MaskAttentionLayer(self.embedding_size)
104
-
105
- self.domain_embedder = nn.Embedding(num_embeddings=self.domain_num,
106
- embedding_dim=self.embedding_size)
107
- self.classifier = _MLP(320, mlp_dims, dropout_rate)
108
-
109
- def forward(self, token_id: Tensor, mask: Tensor,
110
- domain: Tensor) -> Tensor:
111
- """
112
-
113
- Args:
114
- token_id (Tensor): token ids from bert tokenizer, shape=(batch_size, max_len)
115
- mask (Tensor): mask from bert tokenizer, shape=(batch_size, max_len)
116
- domain (Tensor): domain id, shape=(batch_size,)
117
-
118
- Returns:
119
- FloatTensor: the prediction of being fake, shape=(batch_size,)
120
- """
121
- text_embedding = self.bert(token_id,
122
- attention_mask=mask).last_hidden_state
123
- attention_feature, _ = self.attention(text_embedding, mask)
124
-
125
- domain_embedding = self.domain_embedder(domain.view(-1, 1)).squeeze(1)
126
-
127
- gate_input = torch.cat([domain_embedding, attention_feature], dim=-1)
128
- gate_output = self.gate(gate_input)
129
-
130
- shared_feature = 0
131
- for i in range(self.expert_num):
132
- expert_feature = self.experts[i](text_embedding)
133
- shared_feature += (expert_feature * gate_output[:, i].unsqueeze(1))
134
-
135
- label_pred = self.classifier(shared_feature)
136
-
137
- return torch.sigmoid(label_pred.squeeze(1))
138
-
139
- def calculate_loss(self, data) -> Tensor:
140
- """
141
- calculate loss via BCELoss
142
-
143
- Args:
144
- data (dict): batch data dict
145
-
146
- Returns:
147
- loss (Tensor): loss value
148
- """
149
-
150
- token_ids = data['text']['token_id']
151
- masks = data['text']['mask']
152
- domains = data['domain']
153
- labels = data['label']
154
- output = self.forward(token_ids, masks, domains)
155
- return self.loss_func(output, labels.float())
156
-
157
- def predict(self, data_without_label) -> Tensor:
158
- """
159
- predict the probability of being fake news
160
-
161
- Args:
162
- data_without_label (Dict[str, Any]): batch data dict
163
-
164
- Returns:
165
- Tensor: one-hot probability, shape=(batch_size, 2)
166
- """
167
-
168
- token_ids = data_without_label['text']['token_id']
169
- masks = data_without_label['text']['mask']
170
- domains = data_without_label['domain']
171
-
172
-
173
- output_prob = self.forward(token_ids, masks,domains)
174
-
175
- return output_prob
176
- from faknow.data.dataset.text import TextDataset
177
- from faknow.data.process.text_process import TokenizerFromPreTrained
178
- from faknow.evaluate.evaluator import Evaluator
179
-
180
- import torch
181
- from torch.utils.data import DataLoader
182
- testing_path = "/content/drive/MyDrive/sinhala-dataset/test_data.json"
183
-
184
- df = pd.read_json(testing_path)
185
- df.head()
186
- df =df[:100]
187
- df["label"] = int(0)
188
- df.head()
189
- print(len(df))
190
- path = '/content/drive/MyDrive/sinhala-dataset'
191
- testing_json = "/testing.json"
192
- df.to_json(path + testing_json, orient='records')
193
-
194
- MODEL_SAVE_PATH = "/content/drive/MyDrive/models-path-improvement/last-epoch-model-2024-03-08-15_34_03_6.pth"
195
-
196
- max_len, bert = 160 , 'sinhala-nlp/sinbert-sold-si'
197
- tokenizer = TokenizerFromPreTrained(max_len, bert)
198
-
199
- # dataset
200
- batch_size = 100
201
-
202
-
203
- testing_path = path + testing_json
204
-
205
- testing_set = TextDataset(testing_path, ['text'], tokenizer)
206
- testing_loader = DataLoader(testing_set, batch_size, shuffle=False)
207
-
208
- # prepare model
209
- domain_num = 3
210
-
211
- model = MDFEND(bert, domain_num , expert_num=18 , mlp_dims = [5080 ,4020, 3010, 2024 ,1012 ,606 , 400])
212
- model.load_state_dict(torch.load(f=MODEL_SAVE_PATH, map_location=torch.device('cpu')))
213
-
214
-
215
-
216
- outputs = []
217
- for batch_data in testing_loader:
218
- outputs.append(model.predict(batch_data))
219
- outputs
220
- # 1 ====> offensive
221
- # 0 ====> not offensive
222
- label = []
223
- for output in outputs:
224
- for out in output:
225
- output_prob = out.item()
226
- if output_prob >= 0.5:
227
- label.append(1)
228
- else:
229
- label.append(0)
230
-
231
- label