Mishal23 commited on
Commit
a68dee6
·
verified ·
1 Parent(s): 2c164ed

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +67 -0
app.py ADDED
@@ -0,0 +1,67 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ from transformers import AutoModelForCausalLM, AutoTokenizer
3
+ import torch
4
+
5
+ # Load the fine-tuned DialoGPT model and tokenizer from Hugging Face
6
+ @st.cache_resource # Cache the model to avoid reloading every time
7
+ def load_model():
8
+ try:
9
+ # Load the fine-tuned model and tokenizer from the Hugging Face Hub
10
+ st.write("Loading model and tokenizer...")
11
+ model = AutoModelForCausalLM.from_pretrained("username/fine-tuned-dialoGPT-crm-chatbot")
12
+ tokenizer = AutoTokenizer.from_pretrained("username/fine-tuned-dialoGPT-crm-chatbot")
13
+ return model, tokenizer
14
+ except Exception as e:
15
+ st.error(f"Failed to load the model or tokenizer: {e}")
16
+ return None, None
17
+
18
+ # Define the chatbot function that generates responses
19
+ def generate_response(model, tokenizer, input_text, max_length=100):
20
+ try:
21
+ # Tokenize the input text
22
+ inputs = tokenizer.encode(input_text, return_tensors="pt")
23
+
24
+ # Generate a response using the model
25
+ outputs = model.generate(inputs, max_length=max_length, pad_token_id=tokenizer.eos_token_id)
26
+ response = tokenizer.decode(outputs[0], skip_special_tokens=True)
27
+
28
+ return response
29
+ except Exception as e:
30
+ st.error(f"Error during text generation: {e}")
31
+ return "Sorry, something went wrong while generating the response."
32
+
33
+ # Main Streamlit app function
34
+ def chatbot_app():
35
+ # Load model and tokenizer
36
+ model, tokenizer = load_model()
37
+
38
+ if model is None or tokenizer is None:
39
+ st.error("Unable to load the chatbot model. Please check API or model availability.")
40
+ return
41
+
42
+ st.title("CRM Chatbot")
43
+ st.write("This chatbot helps with customer service inquiries. Feel free to ask anything!")
44
+
45
+ # Chat history to maintain a conversation flow
46
+ if 'chat_history' not in st.session_state:
47
+ st.session_state['chat_history'] = []
48
+
49
+ # Input box for user message
50
+ user_input = st.text_input("You:", value="", key="input")
51
+
52
+ # Submit button to generate a response
53
+ if st.button("Send") and user_input:
54
+ # Display the user's message
55
+ st.session_state.chat_history.append(f"You: {user_input}")
56
+
57
+ # Generate chatbot's response
58
+ bot_response = generate_response(model, tokenizer, user_input)
59
+ st.session_state.chat_history.append(f"Chatbot: {bot_response}")
60
+
61
+ # Display the chat history
62
+ for message in st.session_state.chat_history:
63
+ st.write(message)
64
+
65
+ # Run the chatbot app
66
+ if __name__ == "__main__":
67
+ chatbot_app()