joyinning commited on
Commit
a23be62
1 Parent(s): 132117d

Delete README.md and Upload files

Browse files
Files changed (5) hide show
  1. README.md +0 -13
  2. app.py +34 -59
  3. model_utils.py +256 -0
  4. models.zip +3 -0
  5. requirements.txt +4 -1
README.md DELETED
@@ -1,13 +0,0 @@
1
- ---
2
- title: Chatbot Info Extraction
3
- emoji: 💬
4
- colorFrom: yellow
5
- colorTo: purple
6
- sdk: gradio
7
- sdk_version: 4.36.1
8
- app_file: app.py
9
- pinned: false
10
- license: mit
11
- ---
12
-
13
- An example chatbot using [Gradio](https://gradio.app), [`huggingface_hub`](https://huggingface.co/docs/huggingface_hub/v0.22.2/en/index), and the [Hugging Face Inference API](https://huggingface.co/docs/api-inference/index).
 
 
 
 
 
 
 
 
 
 
 
 
 
 
app.py CHANGED
@@ -1,63 +1,38 @@
1
- import gradio as gr
2
- from huggingface_hub import InferenceClient
3
-
4
- """
5
- For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
6
- """
7
- client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
8
-
9
-
10
- def respond(
11
- message,
12
- history: list[tuple[str, str]],
13
- system_message,
14
- max_tokens,
15
- temperature,
16
- top_p,
17
- ):
18
- messages = [{"role": "system", "content": system_message}]
19
-
20
- for val in history:
21
- if val[0]:
22
- messages.append({"role": "user", "content": val[0]})
23
- if val[1]:
24
- messages.append({"role": "assistant", "content": val[1]})
25
-
26
- messages.append({"role": "user", "content": message})
27
 
28
- response = ""
29
-
30
- for message in client.chat_completion(
31
- messages,
32
- max_tokens=max_tokens,
33
- stream=True,
34
- temperature=temperature,
35
- top_p=top_p,
36
- ):
37
- token = message.choices[0].delta.content
38
-
39
- response += token
40
- yield response
41
 
 
 
42
  """
43
- For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
44
- """
45
- demo = gr.ChatInterface(
46
- respond,
47
- additional_inputs=[
48
- gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
49
- gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
50
- gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
51
- gr.Slider(
52
- minimum=0.1,
53
- maximum=1.0,
54
- value=0.95,
55
- step=0.05,
56
- label="Top-p (nucleus sampling)",
57
- ),
58
- ],
59
- )
60
 
61
-
62
- if __name__ == "__main__":
63
- demo.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ """app
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3
 
4
+ Automatically generated by Colab.
 
 
 
 
 
 
 
 
 
 
 
 
5
 
6
+ Original file is located at
7
+ https://colab.research.google.com/drive/1Glbl7TT2ZahRqXHGYp9J3zH5U4ZB0Dsd
8
  """
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9
 
10
+ import gradio as gr
11
+ from model_utils import load_models, extract_information, predict_tags, extract_4w_qa, generate_why_or_how_question_and_answer
12
+
13
+ bert_model, bilstm_model, ner_tokenizer, id2label_ner = load_models()
14
+
15
+ def extract_and_display_info(user_input):
16
+ if user_input:
17
+ ner_tags = predict_tags(user_input, bilstm_model, ner_tokenizer, id2label_ner)
18
+ extracted_info = extract_4w_qa(user_input, ner_tags)
19
+ qa_result = generate_why_or_how_question_and_answer(extracted_info, user_input)
20
+
21
+ if qa_result:
22
+ extracted_info["Generated Question"] = qa_result["question"]
23
+ extracted_info["Answer"] = qa_result["answer"]
24
+
25
+ output_text = "Extracted Information:\n"
26
+ for question, answer in extracted_info.items():
27
+ output_text += f"- **{question}:** {answer}\n"
28
+ return output_text
29
+ else:
30
+ return "Please enter some text."
31
+
32
+ iface = gr.Interface(
33
+ fn=extract_and_display_info,
34
+ inputs="text",
35
+ outputs="text",
36
+ title="Information Extraction Chatbot"
37
+ )
38
+ iface.launch()
model_utils.py ADDED
@@ -0,0 +1,256 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from transformers import BertTokenizer, BertForTokenClassification, pipeline
3
+ import pickle # for saving and loading Python objects
4
+ from openai import OpenAI
5
+ import tiktoken
6
+ from transformers import AutoConfig, AutoTokenizer
7
+ import os
8
+ import torch.nn as nn
9
+ from transformers import AutoModel, AutoConfig
10
+
11
+ client = OpenAI(api_key="sk-proj-K2n4UpzlAKfw464kITLHT3BlbkFJfXtLIl4Ejhn1KHQOjnTq")
12
+
13
+ # Define BiLSTMForTokenClassification Class
14
+
15
+
16
+ class BiLSTMForTokenClassification(nn.Module):
17
+ """
18
+ This model combines BERT embeddings with a Bidirectional LSTM (BiLSTM) for token-level classification
19
+ tasks like Named Entity Recognition (NER).
20
+
21
+ Args:
22
+ pretrained_model_name_or_path: Name of the pre-trained BERT model to use (e.g., "bert-base-cased").
23
+ num_labels: Number of different labels to predict.
24
+ hidden_size: Dimension of the hidden states in the BiLSTM (default: 128).
25
+ num_lstm_layers: Number of stacked BiLSTM layers (default: 1).
26
+ """
27
+ def __init__(self, model_name, num_labels, hidden_size=128, num_lstm_layers=1):
28
+ super().__init__()
29
+ self.num_labels = num_labels
30
+ self.config = AutoConfig.from_pretrained(model_name)
31
+ self.bert = AutoModel.from_pretrained(model_name)
32
+
33
+ # Freeze BERT embeddings
34
+ for name, param in self.bert.named_parameters():
35
+ if name.startswith("embeddings"):
36
+ param.requires_grad = False
37
+
38
+ self.bilstm = nn.LSTM(self.bert.config.hidden_size, hidden_size, num_layers=num_lstm_layers, bidirectional=True, batch_first=True)
39
+ self.dropout = nn.Dropout(0.1)
40
+ self.classifier = nn.Linear(hidden_size * 2, num_labels)
41
+
42
+ def forward(self, input_ids, attention_mask=None, labels=None):
43
+ if attention_mask is None:
44
+ attention_mask = torch.ones_like(input_ids)
45
+
46
+ outputs = self.bert(input_ids=input_ids, attention_mask=attention_mask)
47
+ sequence_output = outputs[0]
48
+ lstm_output, _ = self.bilstm(sequence_output)
49
+ lstm_output = self.dropout(lstm_output)
50
+
51
+ logits = self.classifier(lstm_output)
52
+ loss = None
53
+
54
+ if labels is not None:
55
+ loss_fct = nn.CrossEntropyLoss()
56
+ active_loss = attention_mask.view(-1) == 1
57
+ active_logits = logits.view(-1, self.num_labels)
58
+ active_labels = torch.where(active_loss, labels.view(-1), torch.tensor(loss_fct.ignore_index).type_as(labels))
59
+ valid_mask = (active_labels >= 0) & (active_labels < self.num_labels)
60
+ active_logits = active_logits[valid_mask]
61
+ active_labels = active_labels[valid_mask]
62
+ loss = loss_fct(active_logits, active_labels)
63
+
64
+ return {'loss': loss, 'logits': logits}
65
+
66
+ # Load custom BiLSTM and pre-trained BERT
67
+ def load_models():
68
+ bert_model = BertForTokenClassification.from_pretrained("joyinning/chatbot-info-extraction/models/bert-model.pkl")
69
+ bert_model.eval()
70
+
71
+ with open('joyinning/chatbot-info-extraction/models/bilstm-model.pkl', 'rb') as f:
72
+ bilstm_model = pickle.load(f)
73
+
74
+ return bert_model, bilstm_model
75
+
76
+ def load_custom_model(model_dir, tokenizer_dir, id2label):
77
+ config = AutoConfig.from_pretrained(model_dir, local_files_only=True)
78
+ config.id2label = id2label
79
+ config.num_labels = len(id2label)
80
+
81
+ model = BiLSTMForTokenClassification(model_name=config._name_or_path, num_labels=config.num_labels)
82
+ model.config.id2label = id2label
83
+ model.load_state_dict(torch.load(os.path.join(model_dir, 'pytorch_model.bin'), map_location=torch.device('cpu')))
84
+ tokenizer = AutoTokenizer.from_pretrained(tokenizer_dir, local_files_only=True)
85
+
86
+ return model, tokenizer
87
+
88
+ ner_model_dir = "models/bilstm_ner"
89
+ tokenizer_dir = "models/tokenizer"
90
+ id2label_ner = {0: 'O', 1: 'I-art', 2: 'B-org', 3: 'B-geo', 4: 'I-per', 5: 'B-eve', 6: 'I-geo', 7: 'B-per', 8: 'I-nat', 9: 'B-art', 10: 'B-tim', 11: 'I-gpe', 12: 'I-tim', 13: 'B-nat', 14: 'B-gpe', 15: 'I-org', 16: 'I-eve'}
91
+ ner_model, ner_tokenizer = load_custom_model(ner_model_dir, tokenizer_dir, id2label_ner)
92
+
93
+ # QA model
94
+ qa_model = pipeline('question-answering', model='deepset/bert-base-cased-squad2')
95
+
96
+ # Function to extract information
97
+ def extract_information(text, bert_model, bilstm_model, ner_tokenizer, id2label_ner):
98
+ extracted_info = {}
99
+
100
+ ner_tags = predict_tags(text, bilstm_model, ner_tokenizer, id2label_ner)
101
+
102
+ extracted_info.update(extract_4w_qa(text, ner_tags))
103
+
104
+ qa_result = generate_why_or_how_question_and_answer(extracted_info, text)
105
+ if qa_result:
106
+ extracted_info.update(qa_result)
107
+ prompt = f"Question: {qa_result['question']}\nContext: {text}\nAnswer:"
108
+ extracted_info["Token Count"] = count_tokens(prompt)
109
+
110
+ return extracted_info
111
+
112
+
113
+ def predict_tags(sentence, model, tokenizer, label_map):
114
+ """
115
+ Predicts NER tags for a given sentence using the specified model and tokenizer.
116
+
117
+ Args:
118
+ sentence: The input sentence as a string.
119
+ model: The pre-trained model (BiLSTM) for tag prediction.
120
+ tokenizer: The tokenizer used for converting the sentence into tokens.
121
+ label_map: A dictionary mapping numerical label indices to their corresponding tags.
122
+
123
+ Returns:
124
+ A list of predicted tags for each token in the sentence.
125
+ """
126
+ tokens = tokenizer.tokenize(tokenizer.decode(tokenizer.encode(sentence)))
127
+ inputs = tokenizer.encode(sentence, return_tensors='pt')
128
+
129
+ outputs = model(inputs)
130
+ logits = outputs['logits']
131
+ predictions = torch.argmax(logits, dim=2)
132
+
133
+ labels = [label_map.get(prediction.item(), "O") for prediction in predictions[0][1:-1]]
134
+ return labels
135
+
136
+ def extract_4w_qa(sentence, ner_tags):
137
+ """
138
+ Extracts 4w (Who, What, When, Where) information from a sentence
139
+ using NER tags and a question-answering model.
140
+
141
+ Args:
142
+ sentence: The input sentence as a string.
143
+ ner_tags: A list of predicted NER tags for each token in the sentence.
144
+
145
+ Returns:
146
+ A dictionary where keys are 5W1H question words and values are the corresponding
147
+ answers extracted from the sentence.
148
+ """
149
+ result = {}
150
+ questions = {
151
+ "B-per": "Who",
152
+ "I-per": "Who",
153
+ "B-geo": "Where",
154
+ "I-geo": "Where",
155
+ "B-org": "What organization",
156
+ "I-org": "What organization",
157
+ "B-tim": "When",
158
+ "I-tim": "When",
159
+ "B-art": "What art",
160
+ "I-art": "What art",
161
+ "B-eve": "What event",
162
+ "I-eve": "What event",
163
+ "B-nat": "What natural phenomenon",
164
+ "I-nat": "What natural phenomenon",
165
+ }
166
+
167
+ for ner_tag, entity in zip(ner_tags, sentence.split()): # Removed pos_tags
168
+ if ner_tag in questions:
169
+ question = f"{questions[ner_tag]} is {entity}?" # Removed pos_tag
170
+ answer = qa_model(question=question, context=sentence)["answer"]
171
+ result[questions[ner_tag]] = answer
172
+
173
+ return result
174
+
175
+ def count_tokens(text):
176
+ """
177
+ Counts the number of tokens in a text string using the tiktoken encoding for GPT-3.5 Turbo.
178
+
179
+ Args:
180
+ text: The input text string.
181
+
182
+ Returns:
183
+ The number of tokens in the text.
184
+ """
185
+ encoding = tiktoken.encoding_for_model("gpt-3.5-turbo-instruct")
186
+ return len(encoding.encode(text))
187
+
188
+ def generate_why_or_how_question_and_answer(extracted_info, sentence):
189
+ """
190
+ Generates a "Why" or "How" question based on the extracted 4W information and gets the answer using GPT-3.5.
191
+
192
+ Args:
193
+ extracted_info: A dictionary containing the extracted 4W information.
194
+ sentence: The original sentence.
195
+
196
+ Returns:
197
+ A dictionary containing the generated question and its answer, or None if no relevant question can be generated.
198
+ """
199
+
200
+ prompt_template = """
201
+ Given the following extracted information and the original sentence, generate a relevant "Why" or "How" question and provide a concise answer based on the given context.
202
+
203
+ Extracted Information: {extracted_info}
204
+ Sentence: {sentence}
205
+
206
+ Question and Answer:
207
+ """
208
+
209
+ prompt = prompt_template.format(extracted_info=extracted_info, sentence=sentence)
210
+ response = client.chat.completions.create(
211
+ model="gpt-3.5-turbo",
212
+ messages=[
213
+ {"role": "system", "content": "You are a helpful assistant."},
214
+ {"role": "user", "content": prompt},
215
+ ],
216
+ max_tokens=150,
217
+ stop=None,
218
+ temperature=0.5,
219
+ )
220
+
221
+ question_and_answer = response.choices[0].message.content.strip()
222
+
223
+ if question_and_answer:
224
+ try:
225
+ question, answer = question_and_answer.split("\n", 1)
226
+ return {"question": question, "answer": answer}
227
+ except ValueError:
228
+ return None
229
+ else:
230
+ return None
231
+
232
+ def get_why_or_how_answer(question, context):
233
+ """
234
+ Queries OpenAI's GPT-3.5 model to generate an answer for a given question based on the provided context.
235
+
236
+ Args:
237
+ question (str): The question to be answered.
238
+ context (str): The text context from which the answer should be extracted.
239
+
240
+ Returns:
241
+ str: The generated answer from GPT-3.5.
242
+ """
243
+ prompt = f"Question: {question}\nContext: {context}\nAnswer:"
244
+
245
+ response = client.chat.completions.create(
246
+ model="gpt-3.5-turbo",
247
+ messages=[
248
+ {"role": "system", "content": "You are a helpful assistant."},
249
+ {"role": "user", "content": prompt},
250
+ ],
251
+ max_tokens=150,
252
+ stop=None,
253
+ temperature=0.5,
254
+ )
255
+
256
+ return response.choices[0].text.strip()
models.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bfe8ec2516acda102b287fb506e72cfc626e39cde319f454153f3fb3ad01145a
3
+ size 1450014492
requirements.txt CHANGED
@@ -1 +1,4 @@
1
- huggingface_hub==0.22.2
 
 
 
 
1
+ streamlit
2
+ torch
3
+ transformers
4
+ pickle