devfire commited on
Commit
5a6df5d
Β·
verified Β·
1 Parent(s): 0f24ab1

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +68 -53
app.py CHANGED
@@ -1,86 +1,101 @@
1
  import os
2
  import streamlit as st
3
- import torch
4
  from groq import Groq
5
- from transformers import pipeline, AutoModelForCausalLM, AutoTokenizer
6
-
7
- # βœ… Ensure set_page_config() is the first Streamlit command
8
- st.set_page_config(page_title="AI Study Assistant", page_icon="πŸ€–", layout="wide")
9
 
10
  # Set up the Groq API Key
11
- GROQ_API_KEY = "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" # Replace with your actual key
12
  os.environ["GROQ_API_KEY"] = GROQ_API_KEY
13
 
14
  # Initialize the Groq client
15
  client = Groq(api_key=GROQ_API_KEY)
16
 
17
- # βœ… Ensure Accelerate is installed
18
- try:
19
- import accelerate # noqa: F401
20
- except ImportError:
21
- st.error("⚠️ `accelerate` library is required. Install it with: `pip install accelerate`")
22
-
23
- # βœ… Initialize Hugging Face DeepSeek R1 model correctly
24
- MODEL_NAME = "deepseek-ai/DeepSeek-R1"
25
-
26
- try:
27
- tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME, trust_remote_code=True)
28
-
29
- model = AutoModelForCausalLM.from_pretrained(
30
- MODEL_NAME,
31
- trust_remote_code=True,
32
- torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32, # βœ… Use FP16 on GPU, FP32 on CPU
33
- device_map="auto" if torch.cuda.is_available() else None, # βœ… Enable auto GPU usage
34
- quantization_config=None # βœ… Disable unsupported FP8 quantization
35
- )
36
-
37
- def generate_response_hf(user_message):
38
- inputs = tokenizer(user_message, return_tensors="pt").to(model.device)
39
- outputs = model.generate(**inputs, max_length=200)
40
- return tokenizer.decode(outputs[0], skip_special_tokens=True)
41
-
42
- except Exception as e:
43
- st.error(f"❌ Error loading DeepSeek-R1: {str(e)}")
44
- generate_response_hf = lambda x: "⚠️ Error: Model not loaded."
45
-
46
- # Streamlit UI setup
47
  st.title("πŸ“š Subject-specific AI Chatbot")
48
  st.write("Hello! I'm your AI Study Assistant. You can ask me any questions related to your subjects, and I'll try to help.")
49
 
50
- # Sidebar settings
51
  st.sidebar.header("βš™οΈ Settings")
52
- chat_model = st.sidebar.radio("Choose AI Model:", ["Groq API", "DeepSeek R1 (Hugging Face)"])
53
-
54
- # Initialize session state for conversation
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
55
  if 'conversation_history' not in st.session_state:
56
  st.session_state.conversation_history = []
57
 
58
- # Subjects list
59
  subjects = ["Chemistry", "Computer", "English", "Islamiat", "Mathematics", "Physics", "Urdu"]
60
 
 
61
  def generate_chatbot_response(user_message):
62
- related_subject = next((subject for subject in subjects if subject.lower() in user_message.lower()), None)
 
 
 
 
 
63
 
 
64
  if "kisne banaya" in user_message.lower() or "who created you" in user_message.lower():
65
- return "I was created by Abdul Basit 😊"
66
 
67
- prompt = f"You are a helpful AI chatbot for studying {related_subject if related_subject else 'general knowledge'}. The user is asking: {user_message}. Provide a detailed, helpful response."
68
-
69
- if chat_model == "Groq API":
70
- chat_completion = client.chat.completions.create(
71
- messages=[{"role": "user", "content": prompt}],
72
- model="deepseek-chat"
73
- )
74
- return chat_completion.choices[0].message.content
75
  else:
76
- return generate_response_hf(prompt)
77
 
78
- # Chat input
 
 
 
 
 
 
 
 
 
79
  st.markdown("### πŸ’¬ Chat with me")
80
  user_input = st.chat_input("Ask me a subject-related question:")
81
 
 
82
  if user_input:
83
  chatbot_response = generate_chatbot_response(user_input)
 
 
84
  st.session_state.conversation_history.append(("User: " + user_input, "Chatbot: " + chatbot_response))
85
 
86
  # Display chat history
 
1
  import os
2
  import streamlit as st
 
3
  from groq import Groq
 
 
 
 
4
 
5
  # Set up the Groq API Key
6
+ GROQ_API_KEY = "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"
7
  os.environ["GROQ_API_KEY"] = GROQ_API_KEY
8
 
9
  # Initialize the Groq client
10
  client = Groq(api_key=GROQ_API_KEY)
11
 
12
+ # Streamlit user interface setup
13
+ st.set_page_config(page_title="AI Study Assistant", page_icon="πŸ€–", layout="wide")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
14
  st.title("πŸ“š Subject-specific AI Chatbot")
15
  st.write("Hello! I'm your AI Study Assistant. You can ask me any questions related to your subjects, and I'll try to help.")
16
 
17
+ # Add sidebar with styling options
18
  st.sidebar.header("βš™οΈ Settings")
19
+ st.sidebar.write("Customize your chatbot experience!")
20
+ chat_theme = st.sidebar.radio("Choose a theme:", ["Light", "Dark", "Blue", "Green"])
21
+
22
+ # Apply theme
23
+ if chat_theme == "Dark":
24
+ st.markdown("""
25
+ <style>
26
+ body {background-color: #1e1e1e; color: white;}
27
+ .stButton>button {background-color: #4CAF50; color: white;}
28
+ .chat-bubble {background-color: #2c2c2c; border-radius: 10px; padding: 10px;}
29
+ </style>
30
+ """, unsafe_allow_html=True)
31
+ elif chat_theme == "Blue":
32
+ st.markdown("""
33
+ <style>
34
+ body {background-color: #e3f2fd; color: black;}
35
+ .stButton>button {background-color: #2196F3; color: white;}
36
+ .chat-bubble {background-color: #bbdefb; border-radius: 10px; padding: 10px;}
37
+ </style>
38
+ """, unsafe_allow_html=True)
39
+ elif chat_theme == "Green":
40
+ st.markdown("""
41
+ <style>
42
+ body {background-color: #e8f5e9; color: black;}
43
+ .stButton>button {background-color: #4CAF50; color: white;}
44
+ .chat-bubble {background-color: #c8e6c9; border-radius: 10px; padding: 10px;}
45
+ </style>
46
+ """, unsafe_allow_html=True)
47
+ else:
48
+ st.markdown("""
49
+ <style>
50
+ body {background-color: #ffffff; color: black;}
51
+ .stButton>button {background-color: #008CBA; color: white;}
52
+ .chat-bubble {background-color: #f1f1f1; border-radius: 10px; padding: 10px;}
53
+ </style>
54
+ """, unsafe_allow_html=True)
55
+
56
+ # Initialize session state for maintaining conversation
57
  if 'conversation_history' not in st.session_state:
58
  st.session_state.conversation_history = []
59
 
60
+ # Define a list of subjects for which the chatbot will answer
61
  subjects = ["Chemistry", "Computer", "English", "Islamiat", "Mathematics", "Physics", "Urdu"]
62
 
63
+ # Function to generate chatbot response based on subject-specific user input
64
  def generate_chatbot_response(user_message):
65
+ # Check if the user's question is related to any subject
66
+ related_subject = None
67
+ for subject in subjects:
68
+ if subject.lower() in user_message.lower():
69
+ related_subject = subject
70
+ break
71
 
72
+ # Custom response for "who created you" type of questions
73
  if "kisne banaya" in user_message.lower() or "who created you" in user_message.lower():
74
+ return "I Created by Abdul Basit 😊"
75
 
76
+ if related_subject:
77
+ prompt = f"You are a helpful AI chatbot for studying {related_subject}. The user is asking: {user_message}. Provide a detailed, helpful response related to {related_subject}."
 
 
 
 
 
 
78
  else:
79
+ prompt = f"You are a helpful AI chatbot. The user is asking: {user_message}. If the question is not related to any of the specified subjects (Chemistry, Computer, English, Islamiat, Mathematics, Physics, Urdu), politely let them know."
80
 
81
+ # Generate response using Groq API
82
+ chat_completion = client.chat.completions.create(
83
+ messages=[{"role": "user", "content": prompt}],
84
+ model="llama3-8b-8192", # You can replace with the appropriate model name
85
+ )
86
+
87
+ response = chat_completion.choices[0].message.content
88
+ return response
89
+
90
+ # User input for conversation (now placed at the bottom)
91
  st.markdown("### πŸ’¬ Chat with me")
92
  user_input = st.chat_input("Ask me a subject-related question:")
93
 
94
+ # Handle user input and display conversation
95
  if user_input:
96
  chatbot_response = generate_chatbot_response(user_input)
97
+
98
+ # Save the conversation history
99
  st.session_state.conversation_history.append(("User: " + user_input, "Chatbot: " + chatbot_response))
100
 
101
  # Display chat history