Spaces:
Sleeping
Sleeping
william4416
commited on
Commit
•
e6d1128
1
Parent(s):
743067f
Update app.py
Browse files
app.py
CHANGED
@@ -1,68 +1,58 @@
|
|
1 |
-
from transformers import AutoModelForCausalLM, AutoTokenizer
|
2 |
import gradio as gr
|
3 |
-
import
|
4 |
import json
|
5 |
|
6 |
-
|
7 |
-
|
8 |
-
|
9 |
-
|
10 |
-
|
11 |
-
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
|
|
|
|
|
|
20 |
)
|
21 |
|
22 |
-
#
|
23 |
-
|
24 |
-
|
25 |
-
#
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
|
60 |
-
gr.Interface(
|
61 |
-
fn=predict,
|
62 |
-
title=title,
|
63 |
-
description=description,
|
64 |
-
examples=examples,
|
65 |
-
inputs=["text", "state"],
|
66 |
-
outputs=["chatbot", "state"],
|
67 |
-
theme="finlaymacklon/boxy_violet",
|
68 |
-
).launch()
|
|
|
|
|
1 |
import gradio as gr
|
2 |
+
from transformers import AutoModelForCausalLM, AutoTokenizer
|
3 |
import json
|
4 |
|
5 |
+
# Load pre-trained model and tokenizer (replace with desired model if needed)
|
6 |
+
model_name = "microsoft/DialoGPT-large"
|
7 |
+
model = AutoModelForCausalLM.from_pretrained(model_name)
|
8 |
+
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
9 |
+
|
10 |
+
# Function to process user input and generate response
|
11 |
+
def chat(message, history):
|
12 |
+
# Preprocess user input
|
13 |
+
input_ids = tokenizer(message, return_tensors="pt")["input_ids"]
|
14 |
+
|
15 |
+
# Generate response with beam search to improve fluency
|
16 |
+
generated_outputs = model.generate(
|
17 |
+
input_ids,
|
18 |
+
max_length=512, # Adjust max_length as needed for response length
|
19 |
+
num_beams=5, # Experiment with num_beams for better phrasing
|
20 |
+
no_repeat_ngram_size=2, # Prevent repetition in responses
|
21 |
+
early_stopping=True, # Stop generation if response seems complete
|
22 |
)
|
23 |
|
24 |
+
# Decode generated tokens to text
|
25 |
+
response = tokenizer.batch_decode(generated_outputs, skip_special_tokens=True)[0]
|
26 |
+
|
27 |
+
# Access and process JSON files (improved structure)
|
28 |
+
json_files = {
|
29 |
+
"fileone.json": "your_key_in_fileone",
|
30 |
+
"filesecond.json": "your_key_in_filesecond",
|
31 |
+
"filethird.json": "your_key_in_filethird",
|
32 |
+
"filefourth.json": "your_key_in_filefourth",
|
33 |
+
"filefifth.json": "your_key_in_filefifth",
|
34 |
+
}
|
35 |
+
|
36 |
+
if any(word in message.lower() for word in ["file", "data", "information"]):
|
37 |
+
try:
|
38 |
+
# Find the relevant JSON file based on keywords in message
|
39 |
+
relevant_file = next(
|
40 |
+
file for file, key in json_files.items() if key.lower() in message.lower()
|
41 |
+
)
|
42 |
+
with open(relevant_file, "r") as f:
|
43 |
+
data = json.load(f)
|
44 |
+
relevant_info = data.get(json_files[relevant_file], "No relevant information found")
|
45 |
+
response += f"\nHere's some information I found in {relevant_file}: {relevant_info}"
|
46 |
+
except (FileNotFoundError, StopIteration):
|
47 |
+
response += "\nCouldn't find the requested file or information."
|
48 |
+
except json.JSONDecodeError:
|
49 |
+
response += "\nError processing the JSON data."
|
50 |
+
|
51 |
+
# Update history with current conversation (optional)
|
52 |
+
history.append([message, response])
|
53 |
+
|
54 |
+
return response
|
55 |
+
|
56 |
+
# Create Gradio interface for the chatbot
|
57 |
+
interface = gr.Interface(chat, inputs="textbox", outputs="textbox", catch_exceptions=True)
|
58 |
+
interface.launch(share=True) # Launch the Gradio app and share link
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|