menikev commited on
Commit
d2ed505
1 Parent(s): 57a2c61

Upload 9 files

Browse files
components/data_loading.py ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pandas as pd
2
+ import torch
3
+ def preparing_data(text:str , domain: int):
4
+ """
5
+
6
+
7
+
8
+ Args:
9
+ text (_str_): input text from the user
10
+ domain (_int_): output domain from domain identification pipeline
11
+
12
+ Returns:
13
+ _DataFrame_: dataframe contains texts and domain
14
+ """
15
+ # Let's assume you have the following dictionary
16
+ # the model can't do inference with only one example so this dummy example must be put
17
+ dict_data = {
18
+ 'text': ['hello world' ] ,
19
+ 'domain': [0] ,
20
+ }
21
+
22
+ dict_data["text"].append(text)
23
+ dict_data["domain"].append(domain)
24
+ # Convert the dictionary to a DataFrame
25
+ df = pd.DataFrame(dict_data)
26
+
27
+ # return the dataframe
28
+ return df
29
+
30
+
31
+ def loading_data(tokenizer , df: pd.DataFrame ):
32
+ ids = []
33
+ masks = []
34
+ domain_list = []
35
+
36
+ texts = df["text"]
37
+ domains= df["domain"]
38
+
39
+
40
+ for i in range(len(df)):
41
+ text = texts[i]
42
+ token = tokenizer(text)
43
+ ids.append(token["token_id"])
44
+ masks.append(token["mask"])
45
+ domain_list.append(domains[i])
46
+
47
+ input_ids = torch.cat(ids , dim=0)
48
+ input_masks = torch.cat(masks ,dim = 0)
49
+ input_domains = torch.tensor(domain_list)
50
+
51
+
52
+ return input_ids , input_masks , input_domains
53
+
components/english_information_extraction.py ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import pipeline
2
+
3
+ zeroshot_classifier = pipeline("zero-shot-classification", model="MoritzLaurer/deberta-v3-large-zeroshot-v1.1-all-33")
4
+
5
+
6
+
7
+
8
+
9
+ def english_information_extraction(text: str):
10
+
11
+
12
+
13
+
14
+
15
+ hypothesis_template_domain = "This text is about {}"
16
+ domain_classes = ["women" , "muslims" , "tamil" , "sinhala" , "other"]
17
+ domains_output= zeroshot_classifier(text, domain_classes , hypothesis_template=hypothesis_template_domain, multi_label=False)
18
+
19
+ sentiment_discrimination_prompt = f"the content of this text about {domains_output['labels'][0]} "
20
+ hypothesis_template_sentiment = "is {} sentiment"
21
+ hypothesis_template_sentiment = sentiment_discrimination_prompt + hypothesis_template_sentiment
22
+
23
+ sentiment_classes = ["positive" ,"neutral", "negative"]
24
+ sentiment_output= zeroshot_classifier(text, sentiment_classes , hypothesis_template=hypothesis_template_sentiment, multi_label=False)
25
+
26
+ hypothesis_template_discrimination = "is {}"
27
+ hypothesis_template_discrimination = sentiment_discrimination_prompt + hypothesis_template_discrimination
28
+
29
+ discrimination_classes = ["hateful" , "not hateful"]
30
+
31
+ discrimination_output= zeroshot_classifier(text, discrimination_classes , hypothesis_template=hypothesis_template_discrimination, multi_label=False)
32
+
33
+ domain_label , domain_score = domains_output["labels"][0] , domains_output["scores"][0]
34
+ sentiment_label , sentiment_score = sentiment_output["labels"][0] , sentiment_output["scores"][0]
35
+ discrimination_label , discrimination_score = discrimination_output["labels"][0] , discrimination_output["scores"][0]
36
+
37
+ return {"domain_label" : domain_label,
38
+ "domain_score" : domain_score,
39
+ "sentiment_label" : sentiment_label,
40
+ "sentiment_score" : sentiment_score,
41
+ "discrimination_label" : discrimination_label,
42
+ "discrimination_score": discrimination_score}
43
+
44
+
45
+
components/language_identification.py ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from transformers import AutoModelForSequenceClassification, AutoTokenizer
3
+
4
+ def language_identification(texts):
5
+ text = [
6
+ texts,
7
+
8
+ ]
9
+
10
+ model_ckpt = "papluca/xlm-roberta-base-language-detection"
11
+ tokenizer = AutoTokenizer.from_pretrained(model_ckpt)
12
+ model = AutoModelForSequenceClassification.from_pretrained(model_ckpt)
13
+
14
+ inputs = tokenizer(text, padding=True, truncation=True, return_tensors="pt")
15
+
16
+ with torch.no_grad():
17
+ logits = model(**inputs).logits
18
+
19
+ preds = torch.softmax(logits, dim=-1)
20
+
21
+ # Map raw predictions to languages
22
+ id2lang = model.config.id2label
23
+ vals, idxs = torch.max(preds, dim=1)
24
+ lang_dict = {id2lang[k.item()]: v.item() for k, v in zip(idxs, vals)}
25
+ return lang_dict
components/multi_lingual_model.py ADDED
@@ -0,0 +1,216 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #the model
2
+ from typing import List, Optional, Tuple
3
+
4
+ import torch
5
+ from torch import Tensor
6
+ from torch import nn
7
+ from transformers import RobertaModel
8
+
9
+ from faknow.model.layers.layer import TextCNNLayer
10
+ from faknow.model.model import AbstractModel
11
+ from faknow.data.process.text_process import TokenizerFromPreTrained
12
+ import pandas as pd
13
+ import gdown
14
+ import os
15
+
16
+ class _MLP(nn.Module):
17
+ def __init__(self,
18
+ input_dim: int,
19
+ embed_dims: List[int],
20
+ dropout_rate: float,
21
+ output_layer=True):
22
+ super().__init__()
23
+ layers = list()
24
+ for embed_dim in embed_dims:
25
+ layers.append(nn.Linear(input_dim, embed_dim))
26
+ layers.append(nn.BatchNorm1d(embed_dim))
27
+ layers.append(nn.ReLU())
28
+ layers.append(nn.Dropout(p=dropout_rate))
29
+ input_dim = embed_dim
30
+ if output_layer:
31
+ layers.append(torch.nn.Linear(input_dim, 1))
32
+ self.mlp = torch.nn.Sequential(*layers)
33
+
34
+ def forward(self, x: Tensor) -> Tensor:
35
+ """
36
+
37
+ Args:
38
+ x (Tensor): shared feature from domain and text, shape=(batch_size, embed_dim)
39
+
40
+ """
41
+ return self.mlp(x)
42
+
43
+
44
+ class _MaskAttentionLayer(torch.nn.Module):
45
+ """
46
+ Compute attention layer
47
+ """
48
+ def __init__(self, input_size: int):
49
+ super(_MaskAttentionLayer, self).__init__()
50
+ self.attention_layer = torch.nn.Linear(input_size, 1)
51
+
52
+ def forward(self,
53
+ inputs: Tensor,
54
+ mask: Optional[Tensor] = None) -> Tuple[Tensor, Tensor]:
55
+ weights = self.attention_layer(inputs).view(-1, inputs.size(1))
56
+ if mask is not None:
57
+ weights = weights.masked_fill(mask == 0, float("-inf"))
58
+ weights = torch.softmax(weights, dim=-1).unsqueeze(1)
59
+ outputs = torch.matmul(weights, inputs).squeeze(1)
60
+ return outputs, weights
61
+
62
+
63
+ class MDFEND(AbstractModel):
64
+ r"""
65
+ MDFEND: Multi-domain Fake News Detection, CIKM 2021
66
+ paper: https://dl.acm.org/doi/10.1145/3459637.3482139
67
+ code: https://github.com/kennqiang/MDFEND-Weibo21
68
+ """
69
+ def __init__(self,
70
+ pre_trained_bert_name: str,
71
+ domain_num: int,
72
+ mlp_dims: Optional[List[int]] = None,
73
+ dropout_rate=0.2,
74
+ expert_num=5):
75
+ """
76
+
77
+ Args:
78
+ pre_trained_bert_name (str): the name or local path of pre-trained bert model
79
+ domain_num (int): total number of all domains
80
+ mlp_dims (List[int]): a list of the dimensions in MLP layer, if None, [384] will be taken as default, default=384
81
+ dropout_rate (float): rate of Dropout layer, default=0.2
82
+ expert_num (int): number of experts also called TextCNNLayer, default=5
83
+ """
84
+ super(MDFEND, self).__init__()
85
+ self.domain_num = domain_num
86
+ self.expert_num = expert_num
87
+ self.bert = RobertaModel.from_pretrained(
88
+ pre_trained_bert_name).requires_grad_(False)
89
+ self.embedding_size = self.bert.config.hidden_size
90
+ self.loss_func = nn.BCELoss()
91
+ if mlp_dims is None:
92
+ mlp_dims = [384]
93
+
94
+ filter_num = 64
95
+ filter_sizes = [1, 2, 3, 5, 10]
96
+ experts = [
97
+ TextCNNLayer(self.embedding_size, filter_num, filter_sizes)
98
+ for _ in range(self.expert_num)
99
+ ]
100
+ self.experts = nn.ModuleList(experts)
101
+
102
+ self.gate = nn.Sequential(
103
+ nn.Linear(self.embedding_size * 2, mlp_dims[-1]), nn.ReLU(),
104
+ nn.Linear(mlp_dims[-1], self.expert_num), nn.Softmax(dim=1))
105
+
106
+ self.attention = _MaskAttentionLayer(self.embedding_size)
107
+
108
+ self.domain_embedder = nn.Embedding(num_embeddings=self.domain_num,
109
+ embedding_dim=self.embedding_size)
110
+ self.classifier = _MLP(320, mlp_dims, dropout_rate)
111
+
112
+ def forward(self, token_id: Tensor, mask: Tensor,
113
+ domain: Tensor) -> Tensor:
114
+ """
115
+
116
+ Args:
117
+ token_id (Tensor): token ids from bert tokenizer, shape=(batch_size, max_len)
118
+ mask (Tensor): mask from bert tokenizer, shape=(batch_size, max_len)
119
+ domain (Tensor): domain id, shape=(batch_size,)
120
+
121
+ Returns:
122
+ FloatTensor: the prediction of being fake, shape=(batch_size,)
123
+ """
124
+ text_embedding = self.bert(token_id,
125
+ attention_mask=mask).last_hidden_state
126
+ attention_feature, _ = self.attention(text_embedding, mask)
127
+
128
+ domain_embedding = self.domain_embedder(domain.view(-1, 1)).squeeze(1)
129
+
130
+ gate_input = torch.cat([domain_embedding, attention_feature], dim=-1)
131
+ gate_output = self.gate(gate_input)
132
+
133
+ shared_feature = 0
134
+ for i in range(self.expert_num):
135
+ expert_feature = self.experts[i](text_embedding)
136
+ shared_feature += (expert_feature * gate_output[:, i].unsqueeze(1))
137
+
138
+ label_pred = self.classifier(shared_feature)
139
+
140
+ return torch.sigmoid(label_pred.squeeze(1))
141
+
142
+ def calculate_loss(self, data) -> Tensor:
143
+ """
144
+ calculate loss via BCELoss
145
+
146
+ Args:
147
+ data (dict): batch data dict
148
+
149
+ Returns:
150
+ loss (Tensor): loss value
151
+ """
152
+
153
+ token_ids = data['text']['token_id']
154
+ masks = data['text']['mask']
155
+ domains = data['domain']
156
+ labels = data['label']
157
+ output = self.forward(token_ids, masks, domains)
158
+ return self.loss_func(output, labels.float())
159
+
160
+ def predict(self, data_without_label) -> Tensor:
161
+ """
162
+ predict the probability of being fake news
163
+
164
+ Args:
165
+ data_without_label (Dict[str, Any]): batch data dict
166
+
167
+ Returns:
168
+ Tensor: one-hot probability, shape=(batch_size, 2)
169
+ """
170
+
171
+ token_ids = data_without_label['text']['token_id']
172
+ masks = data_without_label['text']['mask']
173
+ domains = data_without_label['domain']
174
+
175
+ # shape=(n,), data = 1 or 0
176
+ round_pred = torch.round(self.forward(token_ids, masks,
177
+ domains)).long()
178
+ # after one hot: shape=(n,2), data = [0,1] or [1,0]
179
+ one_hot_pred = torch.nn.functional.one_hot(round_pred, num_classes=2)
180
+ return one_hot_pred
181
+
182
+
183
+ def download_from_gdrive(file_id, output_path):
184
+ output = os.path.join(output_path)
185
+
186
+ # Check if the file already exists
187
+ if not os.path.exists(output):
188
+ gdown.download(id=file_id, output=output, quiet=False)
189
+
190
+
191
+ return output
192
+
193
+
194
+
195
+ def loading_model_and_tokenizer():
196
+ max_len, bert = 160, 'FacebookAI/xlm-roberta-base'
197
+ #https://drive.google.com/file/d/1--6GB3Ff81sILwtuvVTuAW3shGW_5VWC/view
198
+
199
+ file_id = "1--6GB3Ff81sILwtuvVTuAW3shGW_5VWC"
200
+
201
+ model_path = '/content/drive/MyDrive/models/last-epoch-model-2024-03-17-01_00_32_1.pth'
202
+
203
+ MODEL_SAVE_PATH = download_from_gdrive(file_id, model_path)
204
+ domain_num = 4
205
+
206
+
207
+
208
+ tokenizer = TokenizerFromPreTrained(max_len, bert)
209
+
210
+ model = MDFEND(bert, domain_num , expert_num=12 , mlp_dims = [3010, 2024 ,1012 ,606 , 400])
211
+
212
+ model.load_state_dict(torch.load(f=MODEL_SAVE_PATH , map_location=torch.device('cpu')))
213
+
214
+ model.requires_grad_(False)
215
+
216
+ return tokenizer , model
components/vector_database/7844365b-3b55-4699-9b53-c95468a94487/data_level0.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:165924562a9957be6b9fedbe5da05ba23f5506229e7d066d4d2b26bd1a5b7e20
3
+ size 375111
components/vector_database/7844365b-3b55-4699-9b53-c95468a94487/header.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0ec6df10978b056a10062ed99efeef2702fa4a1301fad702b53dd2517103c746
3
+ size 100
components/vector_database/7844365b-3b55-4699-9b53-c95468a94487/length.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:40b1f157a58b537e5a572e241092364f1dabb1f43af36b944e8124ff6b35954f
3
+ size 4000
components/vector_database/chroma.sqlite3 ADDED
Binary file (147 kB). View file
 
components/vector_db_operations.py ADDED
@@ -0,0 +1,135 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pandas as pd
2
+ import os
3
+ import chromadb
4
+ from chromadb.utils import embedding_functions
5
+ import math
6
+
7
+
8
+
9
+
10
+
11
+ def create_domain_identification_database(vdb_path: str,collection_name:str , df: pd.DataFrame) -> None:
12
+ """This function processes the dataframe into the required format, and then creates the following collections in a ChromaDB instance
13
+ 1. domain_identification_collection - Contains input text embeddings, and the metadata the other columns
14
+
15
+ Args:
16
+ collection_name (str) : name of database collection
17
+ vdb_path (str): Relative path of the location of the ChromaDB instance.
18
+ df (pd.DataFrame): task scheduling dataset.
19
+
20
+ """
21
+
22
+ #identify the saving location of the ChromaDB
23
+ chroma_client = chromadb.PersistentClient(path=vdb_path)
24
+
25
+ #extract the embedding from hugging face
26
+ embedding_function = embedding_functions.SentenceTransformerEmbeddingFunction(model_name="sentence-transformers/LaBSE")
27
+
28
+ #creating the collection
29
+ domain_identification_collection = chroma_client.create_collection(
30
+ name=collection_name,
31
+ embedding_function=embedding_function,
32
+ )
33
+
34
+
35
+ # the main text "query" that will be embedded
36
+ domain_identification_documents = [row.query for row in df.itertuples()]
37
+
38
+ # the metadata
39
+ domain_identification_metadata = [
40
+ {"domain": row.domain , "label": row.label}
41
+ for row in df.itertuples()
42
+ ]
43
+
44
+ #index
45
+ domain_ids = ["domain_id " + str(row.Index) for row in df.itertuples()]
46
+
47
+
48
+ length = len(df)
49
+ num_iteration = length / 166
50
+ num_iteration = math.ceil(num_iteration)
51
+
52
+ start = 0
53
+ # start adding the the vectors
54
+ for i in range(num_iteration):
55
+ if i == num_iteration - 1 :
56
+ domain_identification_collection.add(documents=domain_identification_documents[start:], metadatas=domain_identification_metadata[start:], ids=domain_ids[start:])
57
+ else:
58
+ end = start + 166
59
+ domain_identification_collection.add(documents=domain_identification_documents[start:end], metadatas=domain_identification_metadata[start:end], ids=domain_ids[start:end])
60
+ start = end
61
+ return None
62
+
63
+
64
+
65
+ def delete_collection_from_vector_db(vdb_path: str, collection_name: str) -> None:
66
+ """Deletes a particular collection from the persistent ChromaDB instance.
67
+
68
+ Args:
69
+ vdb_path (str): Path of the persistent ChromaDB instance.
70
+ collection_name (str): Name of the collection to be deleted.
71
+ """
72
+ chroma_client = chromadb.PersistentClient(path=vdb_path)
73
+ chroma_client.delete_collection(collection_name)
74
+ return None
75
+
76
+
77
+ def list_collections_from_vector_db(vdb_path: str) -> None:
78
+ """Lists all the available collections from the persistent ChromaDB instance.
79
+
80
+ Args:
81
+ vdb_path (str): Path of the persistent ChromaDB instance.
82
+ """
83
+ chroma_client = chromadb.PersistentClient(path=vdb_path)
84
+ print(chroma_client.list_collections())
85
+
86
+
87
+ def get_collection_from_vector_db(
88
+ vdb_path: str, collection_name: str
89
+ ) -> chromadb.Collection:
90
+ """Fetches a particular ChromaDB collection object from the persistent ChromaDB instance.
91
+
92
+ Args:
93
+ vdb_path (str): Path of the persistent ChromaDB instance.
94
+ collection_name (str): Name of the collection which needs to be retrieved.
95
+ """
96
+ chroma_client = chromadb.PersistentClient(path=vdb_path)
97
+
98
+ huggingface_ef = embedding_functions.SentenceTransformerEmbeddingFunction(model_name="sentence-transformers/LaBSE")
99
+
100
+
101
+
102
+
103
+ collection = chroma_client.get_collection(
104
+ name=collection_name, embedding_function=huggingface_ef
105
+ )
106
+
107
+ return collection
108
+
109
+
110
+ def retrieval( input_text : str,
111
+ num_results : int,
112
+ collection: chromadb.Collection ):
113
+
114
+ """fetches the domain name from the collection based on the semantic similarity
115
+
116
+ args:
117
+ input_text : the received text which can be news , posts , or tweets
118
+ num_results : number of fetched examples from the collection
119
+ collection : the extracted collection from the database that we will fetch examples from
120
+
121
+ """
122
+
123
+
124
+ fetched_domain = collection.query(
125
+ query_texts = [input_text],
126
+ n_results = num_results,
127
+ )
128
+
129
+ #extracting domain name and label from the featched domains
130
+
131
+ domain = fetched_domain["metadatas"][0][0]["domain"]
132
+ label = fetched_domain["metadatas"][0][0]["label"]
133
+ distance = fetched_domain["distances"][0][0]
134
+
135
+ return domain , label , distance