vsj0702 commited on
Commit
20eef02
Β·
verified Β·
1 Parent(s): 1a89634

Updating chatbot.py

Browse files
Files changed (1) hide show
  1. chatbot.py +90 -98
chatbot.py CHANGED
@@ -1,120 +1,116 @@
 
 
 
1
  import streamlit as st
 
 
 
2
  from groq import Groq
3
  from langchain_groq import ChatGroq
4
  from langchain_core.prompts import ChatPromptTemplate
5
  from langchain_core.output_parsers import StrOutputParser
6
- from html import escape
7
- import edge_tts
8
- import asyncio
9
- import os
10
- import uuid
11
 
 
12
  GROQ_API_KEY = os.getenv("GROQ_API_KEY")
13
 
 
 
14
  class CodeAssistantBot:
15
  def __init__(self):
16
  self.client = Groq(api_key=GROQ_API_KEY)
17
  self.model = ChatGroq(model="llama-3.3-70b-versatile", temperature=0.6)
 
18
  self.analysis_prompt = ChatPromptTemplate.from_messages([
19
- ("system",
20
- "You are a skilled coding assistant. Use the following context and user input to help."
21
- " Refer to previous summary and recent interactions to make answers accurate."
22
- " Keep your response short, relevant, and conversational."),
23
- ("user",
24
- "Code: {code}\nOutput: {output}\nError: {error}\n"
25
- "Summary: {summary}\nRecent: {recent}\nQuestion: {question}")
26
  ])
 
27
  self.summary_prompt = ChatPromptTemplate.from_messages([
28
  ("system", "Summarize key technical points from the conversation so far."),
29
  ("user", "Conversation: {conversation}")
30
  ])
 
31
  self.voice_prompt = ChatPromptTemplate.from_messages([
32
- ("system",
33
- "You are a friendly narrator voice bot. Given a technical answer and its context,"
34
- " explain it aloud like you're helping someone understand the topic clearly and confidently."
35
- " Keep your response conversational and short not too long, but not over short."),
36
- ("user",
37
- "Code: {code}\nOutput: {output}\nError: {error}\n"
38
- "Conversation so far: {summary}\nAnswer to explain: {answer}")
39
  ])
40
 
41
  def analyze_code(self, code, output, error, question, summary="", history=None):
42
  parser = StrOutputParser()
43
  recent = "\n".join([f"User: {q}\nBot: {a}" for q, a in (history or [])[-4:]])
44
- chain = self.analysis_prompt | self.model | parser
45
- return chain.invoke({
46
- 'code': code,
47
- 'output': output,
48
- 'error': error,
49
- 'summary': summary,
50
- 'recent': recent,
51
- 'question': question
52
  })
53
 
54
  def narrate_response(self, code, output, error, answer, summary=""):
55
  parser = StrOutputParser()
56
- narration_chain = self.voice_prompt | self.model | parser
57
- return narration_chain.invoke({
58
- 'code': code,
59
- 'output': output,
60
- 'error': error,
61
- 'summary': summary,
62
- 'answer': answer
63
  })
64
 
 
 
65
  async def text_to_speech(text, filename):
66
  voice = "fr-FR-VivienneMultilingualNeural"
67
- communicate = edge_tts.Communicate(text, voice)
68
- await communicate.save(filename)
69
 
 
 
70
  def render_chatbot(code, output, error):
71
  st.markdown("""
72
  <style>
73
- .chat-container {
74
- max-height: 60vh;
75
- overflow-y: auto;
76
- padding-right: 0.5rem;
77
- border: 1px solid #ddd;
78
- border-radius: 8px;
79
- margin-top: 1rem;
80
- padding: 1rem;
81
- background-color: #f9f9f9;
82
- }
83
- .chat-message {
84
- margin-bottom: 1rem;
85
- word-wrap: break-word;
86
- }
87
- .user-message {
88
- font-weight: bold;
89
- color: #1a73e8;
90
- }
91
- .bot-message pre {
92
- background-color: #f0f0f0;
93
- padding: 0.5rem;
94
- border-radius: 5px;
95
- overflow-x: auto;
96
- }
97
  </style>
98
  """, unsafe_allow_html=True)
99
 
 
100
  st.session_state.setdefault('conversation', [])
101
  st.session_state.setdefault('chat_summary', "")
102
  st.session_state.setdefault('chat_display_count', 5)
103
  st.session_state.setdefault('narrated_audio', {})
104
 
105
- c1, c2 = st.columns([4, 1], gap='small')
106
- with c1:
 
107
  question = st.text_input("Ask something about your code...", key="chat_input")
108
- with c2:
109
  send = st.button("πŸš€")
110
 
 
111
  if send and question:
112
  bot = CodeAssistantBot()
113
  history = st.session_state.conversation[-4:]
114
  summary = st.session_state.chat_summary
115
- response = bot.analyze_code(code, output, error, question, summary, history)
116
- st.session_state.conversation.append((question, response))
117
  st.session_state.chat_display_count = 5
 
118
  if len(st.session_state.conversation) >= 3:
119
  try:
120
  full_chat = "\n".join([f"User: {q}\nBot: {a}" for q, a in st.session_state.conversation[-10:]])
@@ -123,65 +119,61 @@ def render_chatbot(code, output, error):
123
  except:
124
  pass
125
 
126
- total = len(st.session_state.conversation)
127
- start = max(0, total - st.session_state.chat_display_count)
128
- visible = list(reversed(st.session_state.conversation[start:]))
129
 
130
  for idx, (q, a) in enumerate(visible):
131
  st.markdown(f'<div class="chat-message user-message">{escape(q)}</div>', unsafe_allow_html=True)
132
 
133
- def format_response(txt):
134
- parts = txt.split('```')
135
- result = ''
136
- for j, part in enumerate(parts):
137
- if j % 2 == 1:
138
  lines = part.splitlines()
139
  if lines and lines[0].isalpha():
140
  lines = lines[1:]
141
- code_html = escape("\n".join(lines))
142
- result += f'<pre><code>{code_html}</code></pre>'
143
  else:
144
  result += escape(part)
145
  return result
146
 
147
- formatted = format_response(a)
148
- st.markdown(f'<div class="chat-message bot-message">{formatted}</div>', unsafe_allow_html=True)
149
 
150
- # Check if already narrated
151
  audio_file = st.session_state.narrated_audio.get((q, a))
152
-
153
  if not audio_file:
154
  if st.button("πŸ”Š Narrate", key=f"narrate_{idx}"):
155
- status_placeholder = st.empty()
156
- status_placeholder.info("🧠 Generating narration...")
157
  bot = CodeAssistantBot()
158
  narration = bot.narrate_response(code, output, error, a, st.session_state.chat_summary)
159
- status_placeholder.info("πŸŽ™οΈ Converting to audio...")
160
  audio_file = f"audio_{uuid.uuid4().hex}.mp3"
161
  asyncio.run(text_to_speech(narration, audio_file))
162
  st.session_state.narrated_audio[(q, a)] = audio_file
163
- status_placeholder.success("πŸ”Š Narration ready!")
164
  st.audio(audio_file, format="audio/mp3", autoplay=True)
165
  else:
166
  st.audio(audio_file, format="audio/mp3", autoplay=False)
167
 
168
- if start > 0 and st.button("πŸ”½ Show more"):
169
- st.session_state.chat_display_count += 5
170
- st.rerun()
 
171
 
172
- st.markdown("""
173
- <script>
174
- const c = window.parent.document.querySelector('.chat-container');
175
- if (c) c.scrollTop = c.scrollHeight;
176
- </script>
177
 
178
- <script>
179
- document.querySelectorAll('audio').forEach(audio => {
180
- audio.addEventListener('play', function () {
181
  document.querySelectorAll('audio').forEach(a => {
182
- if (a !== this) a.pause();
183
  });
184
- });
185
  });
186
- </script>
187
- """, unsafe_allow_html=True)
 
 
1
+ import os
2
+ import uuid
3
+ import asyncio
4
  import streamlit as st
5
+ from html import escape
6
+
7
+ import edge_tts
8
  from groq import Groq
9
  from langchain_groq import ChatGroq
10
  from langchain_core.prompts import ChatPromptTemplate
11
  from langchain_core.output_parsers import StrOutputParser
 
 
 
 
 
12
 
13
+ # Load API key from environment
14
  GROQ_API_KEY = os.getenv("GROQ_API_KEY")
15
 
16
+
17
+ # ── Core Bot Logic ─────────────────────────────
18
  class CodeAssistantBot:
19
  def __init__(self):
20
  self.client = Groq(api_key=GROQ_API_KEY)
21
  self.model = ChatGroq(model="llama-3.3-70b-versatile", temperature=0.6)
22
+
23
  self.analysis_prompt = ChatPromptTemplate.from_messages([
24
+ ("system", "You are a skilled coding assistant. Keep answers relevant and concise."),
25
+ ("user", "Code: {code}\nOutput: {output}\nError: {error}\n"
26
+ "Summary: {summary}\nRecent: {recent}\nQuestion: {question}")
 
 
 
 
27
  ])
28
+
29
  self.summary_prompt = ChatPromptTemplate.from_messages([
30
  ("system", "Summarize key technical points from the conversation so far."),
31
  ("user", "Conversation: {conversation}")
32
  ])
33
+
34
  self.voice_prompt = ChatPromptTemplate.from_messages([
35
+ ("system", "You are a friendly narrator. Explain the answer clearly and casually."),
36
+ ("user", "Code: {code}\nOutput: {output}\nError: {error}\n"
37
+ "Conversation so far: {summary}\nAnswer to explain: {answer}")
 
 
 
 
38
  ])
39
 
40
  def analyze_code(self, code, output, error, question, summary="", history=None):
41
  parser = StrOutputParser()
42
  recent = "\n".join([f"User: {q}\nBot: {a}" for q, a in (history or [])[-4:]])
43
+ return (self.analysis_prompt | self.model | parser).invoke({
44
+ 'code': code, 'output': output, 'error': error,
45
+ 'summary': summary, 'recent': recent, 'question': question
 
 
 
 
 
46
  })
47
 
48
  def narrate_response(self, code, output, error, answer, summary=""):
49
  parser = StrOutputParser()
50
+ return (self.voice_prompt | self.model | parser).invoke({
51
+ 'code': code, 'output': output, 'error': error,
52
+ 'summary': summary, 'answer': answer
 
 
 
 
53
  })
54
 
55
+
56
+ # ── Text to Speech ─────────────────────────────
57
  async def text_to_speech(text, filename):
58
  voice = "fr-FR-VivienneMultilingualNeural"
59
+ communicator = edge_tts.Communicate(text, voice)
60
+ await communicator.save(filename)
61
 
62
+
63
+ # ── Chat UI Logic ──────────────────────────────
64
  def render_chatbot(code, output, error):
65
  st.markdown("""
66
  <style>
67
+ .chat-container {
68
+ max-height: 60vh;
69
+ overflow-y: auto;
70
+ padding: 1rem 0.5rem 1rem 1rem;
71
+ border: 1px solid #ddd;
72
+ border-radius: 8px;
73
+ background-color: #f9f9f9;
74
+ }
75
+ .chat-message {
76
+ margin-bottom: 1rem;
77
+ word-wrap: break-word;
78
+ }
79
+ .user-message {
80
+ font-weight: bold;
81
+ color: #1a73e8;
82
+ }
83
+ .bot-message pre {
84
+ background-color: #f0f0f0;
85
+ padding: 0.5rem;
86
+ border-radius: 5px;
87
+ overflow-x: auto;
88
+ }
 
 
89
  </style>
90
  """, unsafe_allow_html=True)
91
 
92
+ # Session setup
93
  st.session_state.setdefault('conversation', [])
94
  st.session_state.setdefault('chat_summary', "")
95
  st.session_state.setdefault('chat_display_count', 5)
96
  st.session_state.setdefault('narrated_audio', {})
97
 
98
+ # Input row
99
+ col1, col2 = st.columns([4, 1])
100
+ with col1:
101
  question = st.text_input("Ask something about your code...", key="chat_input")
102
+ with col2:
103
  send = st.button("πŸš€")
104
 
105
+ # Bot response
106
  if send and question:
107
  bot = CodeAssistantBot()
108
  history = st.session_state.conversation[-4:]
109
  summary = st.session_state.chat_summary
110
+ answer = bot.analyze_code(code, output, error, question, summary, history)
111
+ st.session_state.conversation.append((question, answer))
112
  st.session_state.chat_display_count = 5
113
+
114
  if len(st.session_state.conversation) >= 3:
115
  try:
116
  full_chat = "\n".join([f"User: {q}\nBot: {a}" for q, a in st.session_state.conversation[-10:]])
 
119
  except:
120
  pass
121
 
122
+ # Display messages
123
+ visible = list(reversed(st.session_state.conversation[-st.session_state.chat_display_count:]))
 
124
 
125
  for idx, (q, a) in enumerate(visible):
126
  st.markdown(f'<div class="chat-message user-message">{escape(q)}</div>', unsafe_allow_html=True)
127
 
128
+ def format_response(text):
129
+ parts = text.split("```")
130
+ result = ""
131
+ for i, part in enumerate(parts):
132
+ if i % 2 == 1:
133
  lines = part.splitlines()
134
  if lines and lines[0].isalpha():
135
  lines = lines[1:]
136
+ result += f'<pre><code>{escape("\\n".join(lines))}</code></pre>'
 
137
  else:
138
  result += escape(part)
139
  return result
140
 
141
+ st.markdown(f'<div class="chat-message bot-message">{format_response(a)}</div>', unsafe_allow_html=True)
 
142
 
143
+ # Narration logic
144
  audio_file = st.session_state.narrated_audio.get((q, a))
 
145
  if not audio_file:
146
  if st.button("πŸ”Š Narrate", key=f"narrate_{idx}"):
147
+ status = st.empty()
148
+ status.info("🧠 Generating narration...")
149
  bot = CodeAssistantBot()
150
  narration = bot.narrate_response(code, output, error, a, st.session_state.chat_summary)
151
+ status.info("πŸŽ™οΈ Converting to audio...")
152
  audio_file = f"audio_{uuid.uuid4().hex}.mp3"
153
  asyncio.run(text_to_speech(narration, audio_file))
154
  st.session_state.narrated_audio[(q, a)] = audio_file
155
+ status.success("πŸ”Š Narration ready!")
156
  st.audio(audio_file, format="audio/mp3", autoplay=True)
157
  else:
158
  st.audio(audio_file, format="audio/mp3", autoplay=False)
159
 
160
+ if len(visible) < len(st.session_state.conversation):
161
+ if st.button("πŸ”½ Show more"):
162
+ st.session_state.chat_display_count += 5
163
+ st.rerun()
164
 
165
+ # Auto-scroll & pause others on audio play
166
+ st.markdown("""
167
+ <script>
168
+ const container = window.parent.document.querySelector('.chat-container');
169
+ if (container) container.scrollTop = container.scrollHeight;
170
 
171
+ document.querySelectorAll('audio').forEach(audio => {
172
+ audio.addEventListener('play', function () {
 
173
  document.querySelectorAll('audio').forEach(a => {
174
+ if (a !== this) a.pause();
175
  });
 
176
  });
177
+ });
178
+ </script>
179
+ """, unsafe_allow_html=True)