william4416 commited on
Commit
6e93ad1
1 Parent(s): 3fd0f5e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +42 -31
app.py CHANGED
@@ -1,43 +1,54 @@
1
- import gradio as gr
2
  from transformers import AutoModelForCausalLM, AutoTokenizer
 
3
  import torch
4
  import json
5
 
6
- title = "AI ChatBot"
7
- description = "A State-of-the-Art Large-scale Pretrained Response generation model (DialoGPT)"
8
- examples = [["How are you?"]]
9
 
 
10
  tokenizer = AutoTokenizer.from_pretrained("microsoft/DialoGPT-large")
11
  model = AutoModelForCausalLM.from_pretrained("microsoft/DialoGPT-large")
12
 
13
- def predict(input, history=[], file_path=None):
14
- if file_path:
15
- json_data = read_json_file(file_path)
16
- print(f"Contents of {file_path}:")
17
- print(json_data)
18
- print()
19
- new_user_input_ids = tokenizer.encode(input + tokenizer.eos_token, return_tensors="pt")
20
- bot_input_ids = torch.cat([torch.LongTensor(history), new_user_input_ids], dim=-1)
21
- history = model.generate(bot_input_ids, max_length=4000, pad_token_id=tokenizer.eos_token_id).tolist()
22
- response = tokenizer.decode(history[0]).split("\n") # Splitting on new lines
23
- return response[0], history
24
 
25
- def read_json_file(file_path):
26
- with open(file_path, 'r') as file:
27
- data = json.load(file)
28
- return data
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
29
 
30
  def main():
31
- gr.Interface(
32
- fn=predict,
33
- title=title,
34
- description=description,
35
- examples=examples,
36
- inputs=[gr.inputs.Textbox(label="User Input"), gr.inputs.File(label="JSON File")],
37
- outputs=["text", "text"],
38
- theme="finlaymacklon/boxy_violet",
39
- ).launch()
40
-
41
 
42
- if __name__ == "__main__":
43
- main()
 
 
 
 
 
 
 
 
 
1
  from transformers import AutoModelForCausalLM, AutoTokenizer
2
+ import gradio as gr
3
  import torch
4
  import json
5
 
6
+ title = "Smart AI ChatBot"
7
+ description = "A conversational model capable of intelligently answering questions (DialoGPT)"
8
+ examples = [["How are you?"], ["What's the weather like?"]]
9
 
10
+ # Load DialoGPT model and tokenizer
11
  tokenizer = AutoTokenizer.from_pretrained("microsoft/DialoGPT-large")
12
  model = AutoModelForCausalLM.from_pretrained("microsoft/DialoGPT-large")
13
 
14
+ # Known question-answer pairs, you can add more as per your requirement
15
+ known_questions_answers = {
16
+ "How are you?": "I'm fine, thank you for asking.",
17
+ "What's the weather like?": "The weather is nice today, sunny and warm.",
18
+ "What's your name?": "I am Smart AI ChatBot.",
19
+ "Do you speak English?": "I can understand and respond to English questions.",
20
+ }
 
 
 
 
21
 
22
+ def predict(input, history=[]):
23
+ response = None
24
+
25
+ # Check if the input question is in the known question-answer pairs
26
+ if input in known_questions_answers:
27
+ response = known_questions_answers[input]
28
+ else:
29
+ # Tokenize the new user input sentence
30
+ new_user_input_ids = tokenizer.encode(input + tokenizer.eos_token, return_tensors="pt")
31
+ # Append the new user input tokens to the chat history
32
+ bot_input_ids = torch.cat([torch.LongTensor(history), new_user_input_ids], dim=-1)
33
+ # Generate a response
34
+ history = model.generate(
35
+ bot_input_ids, max_length=400, pad_token_id=tokenizer.eos_token_id
36
+ ).tolist()
37
+ # Convert tokens to text, and split the response into lines
38
+ response = tokenizer.decode(history[0], skip_special_tokens=True)
39
+
40
+ return response, history
41
 
42
  def main():
43
+ # You can add logic here to read known question-answer pairs, for example, from a JSON file
44
+ pass
 
 
 
 
 
 
 
 
45
 
46
+ gr.Interface(
47
+ fn=predict,
48
+ title=title,
49
+ description=description,
50
+ examples=examples,
51
+ inputs=["text", "state"],
52
+ outputs=["chatbot", "state"],
53
+ theme="finlaymacklon/boxy_violet",
54
+ ).launch()