mohamedemam commited on
Commit
80718d0
1 Parent(s): c0532c6

Upload 2 files

Browse files
Files changed (2) hide show
  1. bert_gradio.py +160 -0
  2. bmtss.py +189 -0
bert_gradio.py ADDED
@@ -0,0 +1,160 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+ # coding: utf-8
3
+
4
+ # In[2]:
5
+
6
+
7
+ import pandas as pd
8
+ import streamlit as st
9
+ import torch
10
+ from torch.utils.data import DataLoader ,Dataset
11
+ from transformers import AutoTokenizer,BertForQuestionAnswering,AutoModel
12
+
13
+
14
+ # In[3]:
15
+
16
+
17
+ from transformers import AutoTokenizer,BertForQuestionAnswering,AutoModel
18
+ model_checkpoint = "bert-base-uncased"
19
+ tokenizer = AutoTokenizer.from_pretrained(model_checkpoint)
20
+
21
+
22
+ # In[4]:
23
+
24
+
25
+ from transformers import DataCollatorWithPadding
26
+
27
+
28
+ # In[5]:
29
+
30
+
31
+ torch.set_default_device('cpu')
32
+
33
+
34
+ # In[6]:
35
+
36
+
37
+ from transformers import BertTokenizer, BertModel
38
+
39
+
40
+ # In[7]:
41
+
42
+
43
+ class bert_compare(torch.nn.Module):
44
+ def __init__ (self):
45
+ super(bert_compare,self).__init__()
46
+ self.bert=BertModel.from_pretrained("bert-base-uncased")
47
+
48
+ self.Linear=torch.nn.Linear(768,30 )
49
+ self.elu=torch.nn.ELU()
50
+ self.Linear2=torch.nn.Linear(280 ,1 )
51
+ self.cnn1=torch.nn.Conv1d(768,256,kernel_size=2)
52
+ self.cnn2=torch.nn.Conv1d(256,10,kernel_size=2)
53
+
54
+ self.relu=torch.nn.ReLU()
55
+ def forward(self,x):
56
+ x=self.bert(**x).last_hidden_state
57
+ x=x.permute(0,2,1)
58
+ x=self.cnn1(x)
59
+ x=self.relu(x)
60
+ x=self.cnn2(x)
61
+ x=torch.nn.Flatten()(x)
62
+ x=self.Linear2(x)
63
+ return x
64
+
65
+
66
+ # In[8]:
67
+
68
+
69
+ model=bert_compare()
70
+ optim=torch.optim.AdamW(model.parameters(),lr=5e-5)
71
+ loss=torch.nn.BCEWithLogitsLoss()
72
+
73
+
74
+ # In[9]:
75
+
76
+
77
+ def tok(x,y):
78
+ out=tokenizer(x,y, truncation=True, max_length=30,padding='max_length', return_tensors="pt")
79
+ out={key:value for key,value in out.items()}
80
+ return out
81
+ h=tok('my name is mohamed','what is your name')
82
+ model(h)
83
+
84
+
85
+ # In[10]:
86
+
87
+
88
+ model=torch.load('Downloads/model9.pth',map_location=torch.device('cpu'))
89
+
90
+
91
+ # In[11]:
92
+
93
+
94
+ word=['my name is mohamed ', "How do I read and find my YouTube comments?" ,"How can I see all my Youtube comments?","How can Internet speed be increased by hacking through DNS?","What is the step by step guide to invest in share market in india?","where is capital of egypt?",'when did you born ','what is your name',"what is capital of egypt",'how old are you']
95
+
96
+
97
+ # In[19]:
98
+
99
+
100
+ import gradio as gr
101
+
102
+
103
+ # In[12]:
104
+
105
+
106
+ def tok(x,y):
107
+ out=tokenizer(x,y, truncation=True, max_length=30,padding='max_length', return_tensors="pt")
108
+ out={key:value for key,value in out.items()}
109
+ return out
110
+ for i in range(9):
111
+ r=torch.randint(len(word),size=(1,))
112
+ r2=torch.randint(len(word),size=(1,))
113
+ h=tok(word[r],word[r2])
114
+ e=model(h)
115
+ ans= 'the same' if int(torch.sigmoid( e)>=.5) else 'not the same'
116
+ print (f'{word[r]} is {ans} {word[r2]}' )
117
+
118
+
119
+ # In[32]:
120
+
121
+
122
+ def sentance_calcute(sentance1,sentance2) ->(int,str) :
123
+ out=tokenizer(sentance1,sentance2, truncation=True, max_length=30,padding='max_length', return_tensors="pt")
124
+ h={key:value for key,value in out.items()}
125
+ e=model(h)
126
+ ans=torch.sigmoid( e)
127
+ ans2='Same' if ans>=.5 else 'Not same'
128
+ return ans,ans2
129
+
130
+
131
+ # In[46]:
132
+
133
+
134
+ input_color = "lightred" # Change the color of the input fields
135
+
136
+ iface = gr.Interface(
137
+ fn=sentance_calcute,
138
+ inputs=["text", "text"],
139
+ outputs=["number", "text"],
140
+ layout="horizontal",
141
+ title="Sentence Similarity Checker",
142
+ description="Enter two sentences to check their similarity.",
143
+ examples=[
144
+ ["The sun is in the west.", "The sun goes down in the west."],
145
+ ["Why is biodiversity important for ecosystems?", "She is extremely joyful."],
146
+ ["The cat is sleeping on the chair.", "The cat is napping on the chair."]
147
+ ,["Why is biodiversity important for ecosystems?", "When did the Renaissance period begin?"]
148
+ ],
149
+
150
+ )
151
+
152
+ # Launch the interface
153
+ iface.launch()
154
+
155
+
156
+ # In[ ]:
157
+
158
+
159
+
160
+
bmtss.py ADDED
@@ -0,0 +1,189 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+ # coding: utf-8
3
+
4
+ # In[1]:
5
+
6
+
7
+ get_ipython().system(' pip install transformers')
8
+
9
+
10
+ # In[2]:
11
+
12
+
13
+ import pandas as pd
14
+ import streamlit as st
15
+ import torch
16
+ from torch.utils.data import DataLoader ,Dataset
17
+ from transformers import AutoTokenizer,BertForQuestionAnswering,AutoModel
18
+
19
+
20
+ # In[3]:
21
+
22
+
23
+ from transformers import AutoTokenizer,BertForQuestionAnswering,AutoModel
24
+ model_checkpoint = "bert-base-uncased"
25
+ tokenizer = AutoTokenizer.from_pretrained(model_checkpoint)
26
+
27
+
28
+ # In[4]:
29
+
30
+
31
+ from transformers import DataCollatorWithPadding
32
+
33
+
34
+ # In[5]:
35
+
36
+
37
+ torch.set_default_device('cpu')
38
+
39
+
40
+ # In[6]:
41
+
42
+
43
+ from transformers import BertTokenizer, BertModel
44
+
45
+
46
+ # In[7]:
47
+
48
+
49
+ class bert_compare(torch.nn.Module):
50
+ def __init__ (self):
51
+ super(bert_compare,self).__init__()
52
+ self.bert=BertModel.from_pretrained("bert-base-uncased")
53
+
54
+ self.Linear=torch.nn.Linear(768,30 )
55
+ self.elu=torch.nn.ELU()
56
+ self.Linear2=torch.nn.Linear(280 ,1 )
57
+ self.cnn1=torch.nn.Conv1d(768,256,kernel_size=2)
58
+ self.cnn2=torch.nn.Conv1d(256,10,kernel_size=2)
59
+
60
+ self.relu=torch.nn.ReLU()
61
+ def forward(self,x):
62
+ x=self.bert(**x).last_hidden_state
63
+ x=x.permute(0,2,1)
64
+ x=self.cnn1(x)
65
+ x=self.relu(x)
66
+ x=self.cnn2(x)
67
+ x=torch.nn.Flatten()(x)
68
+ x=self.Linear2(x)
69
+ return x
70
+
71
+
72
+ # In[8]:
73
+
74
+
75
+ model=bert_compare()
76
+ optim=torch.optim.AdamW(model.parameters(),lr=5e-5)
77
+ loss=torch.nn.BCEWithLogitsLoss()
78
+
79
+
80
+ # In[9]:
81
+
82
+
83
+ def tok(x,y):
84
+ out=tokenizer(x,y, truncation=True, max_length=30,padding='max_length', return_tensors="pt")
85
+ out={key:value for key,value in out.items()}
86
+ return out
87
+ h=tok('my name is mohamed','what is your name')
88
+ model(h)
89
+
90
+
91
+ # In[10]:
92
+
93
+
94
+ get_ipython().system(' pip install tqdm')
95
+
96
+
97
+ # In[11]:
98
+
99
+
100
+ from tqdm import tqdm
101
+
102
+
103
+ # In[12]:
104
+
105
+
106
+ model.train()
107
+
108
+
109
+ # In[13]:
110
+
111
+
112
+ model.bert.train()
113
+
114
+
115
+ # In[14]:
116
+
117
+
118
+ model=torch.load('Downloads/model9.pth',map_location=torch.device('cpu'))
119
+
120
+
121
+ # In[15]:
122
+
123
+
124
+ word=['my name is mohamed ', "How do I read and find my YouTube comments?" ,"How can I see all my Youtube comments?","How can Internet speed be increased by hacking through DNS?","What is the step by step guide to invest in share market in india?","where is capital of egypt?",'when did you born ','what is your name',"what is capital of egypt",'how old are you']
125
+
126
+
127
+ # In[16]:
128
+
129
+
130
+ def tok(x,y):
131
+ out=tokenizer(x,y, truncation=True, max_length=30,padding='max_length', return_tensors="pt")
132
+ out={key:value for key,value in out.items()}
133
+ return out
134
+ for i in range(9):
135
+ r=torch.randint(len(word),size=(1,))
136
+ r2=torch.randint(len(word),size=(1,))
137
+ h=tok(word[r],word[r2])
138
+ e=model(h)
139
+ ans= 'the same' if int(torch.sigmoid( e)>=.5) else 'not the same'
140
+ print (f'{word[r]} is {ans} {word[r2]}' )
141
+
142
+
143
+ # In[17]:
144
+
145
+
146
+
147
+ h=tok("what is capital of egypt","when is capital of egypt")
148
+ e=model(h)
149
+ ans= 'the same' if int(torch.sigmoid( e)>=.5) else 'not the same'
150
+ print (f' {ans} ' )
151
+
152
+
153
+ # In[19]:
154
+
155
+
156
+ def are_sentences_same(sentence1, sentence2):
157
+ doc1=tok(sentence1,sentence2)
158
+ out_model=model(doc2)
159
+ ans= 'the same' if int(torch.sigmoid( out_model)>=.5) else 'not the same'
160
+
161
+ return torch.sigmoid( ans)
162
+
163
+ def main():
164
+ st.title('Sentence Similarity Checker')
165
+ st.write('Enter two sentences to check if they are the same.')
166
+
167
+ # Input sentences
168
+ sentence1 = st.text_input('Enter the first sentence:')
169
+ sentence2 = st.text_input('Enter the second sentence:')
170
+
171
+ # Check if both sentences are provided
172
+ if sentence1 and sentence2:
173
+ similarity_score = are_sentences_same(sentence1, sentence2)
174
+ st.write(f'Similarity Score: {similarity_score:.2f}')
175
+
176
+ if similarity_score >= 0.5:
177
+ st.write('The sentences are very similar.')
178
+ else:
179
+ st.write('The sentences are different.')
180
+
181
+ if __name__ == '__main__':
182
+ main()
183
+
184
+
185
+ # In[ ]:
186
+
187
+
188
+
189
+