nihalaninihal commited on
Commit
a60b4de
Β·
verified Β·
1 Parent(s): 0916430

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +577 -59
app.py CHANGED
@@ -1,64 +1,582 @@
1
  import gradio as gr
2
- from huggingface_hub import InferenceClient
3
-
4
- """
5
- For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
6
- """
7
- client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
8
-
9
-
10
- def respond(
11
- message,
12
- history: list[tuple[str, str]],
13
- system_message,
14
- max_tokens,
15
- temperature,
16
- top_p,
17
- ):
18
- messages = [{"role": "system", "content": system_message}]
19
-
20
- for val in history:
21
- if val[0]:
22
- messages.append({"role": "user", "content": val[0]})
23
- if val[1]:
24
- messages.append({"role": "assistant", "content": val[1]})
25
-
26
- messages.append({"role": "user", "content": message})
27
-
28
- response = ""
29
-
30
- for message in client.chat_completion(
31
- messages,
32
- max_tokens=max_tokens,
33
- stream=True,
34
- temperature=temperature,
35
- top_p=top_p,
36
- ):
37
- token = message.choices[0].delta.content
38
-
39
- response += token
40
- yield response
41
-
42
-
43
- """
44
- For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
45
- """
46
- demo = gr.ChatInterface(
47
- respond,
48
- additional_inputs=[
49
- gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
50
- gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
51
- gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
52
- gr.Slider(
53
- minimum=0.1,
54
- maximum=1.0,
55
- value=0.95,
56
- step=0.05,
57
- label="Top-p (nucleus sampling)",
58
- ),
59
- ],
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
60
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
61
 
 
 
 
 
 
 
 
 
 
 
 
62
 
 
63
  if __name__ == "__main__":
64
- demo.launch()
 
 
 
 
 
 
1
  import gradio as gr
2
+ import google.generativeai as genai
3
+ import os
4
+ from dotenv import load_dotenv
5
+ from github import Github
6
+ import json
7
+ from pathlib import Path
8
+ from datetime import datetime
9
+ from collections import defaultdict
10
+ import base64
11
+ from typing import Dict, List, Any, Optional, Tuple, Iterator
12
+ from dataclasses import dataclass
13
+ import tempfile
14
+ from tenacity import retry, stop_after_attempt, wait_exponential, retry_if_exception_type
15
+ import time
16
+
17
+ # Load environment variables
18
+ load_dotenv()
19
+
20
+ # Configure API keys
21
+ GITHUB_TOKEN = os.getenv("github_api")
22
+ GEMINI_API_KEY = os.getenv("gemini_api")
23
+
24
+ if not GITHUB_TOKEN or not GEMINI_API_KEY:
25
+ raise ValueError("Both GITHUB_TOKEN and GEMINI_API_KEY must be set in environment")
26
+
27
+ # Initialize APIs
28
+ gh = Github(GITHUB_TOKEN)
29
+ genai.configure(api_key=GEMINI_API_KEY)
30
+ model = genai.GenerativeModel(
31
+ model_name="gemini-2.0-flash-thinking-exp-01-21",
32
+ generation_config={
33
+ "temperature": 1,
34
+ "top_p": 0.95,
35
+ "top_k": 40,
36
+ "max_output_tokens": 8192,
37
+ "response_mime_type": "text/plain",
38
+ },
39
+ safety_settings=[
40
+ {
41
+ "category": "HARM_CATEGORY_HARASSMENT",
42
+ "threshold": "BLOCK_MEDIUM_AND_ABOVE"
43
+ },
44
+ {
45
+ "category": "HARM_CATEGORY_HATE_SPEECH",
46
+ "threshold": "BLOCK_MEDIUM_AND_ABOVE"
47
+ },
48
+ {
49
+ "category": "HARM_CATEGORY_SEXUALLY_EXPLICIT",
50
+ "threshold": "BLOCK_MEDIUM_AND_ABOVE"
51
+ },
52
+ {
53
+ "category": "HARM_CATEGORY_DANGEROUS_CONTENT",
54
+ "threshold": "BLOCK_MEDIUM_AND_ABOVE"
55
+ },
56
+ ]
57
+ )
58
+
59
+ RELEVANT_EXTENSIONS = {
60
+ ".py", ".js", ".ts", ".jsx", ".tsx", ".java", ".cpp", ".c", ".h",
61
+ ".hpp", ".rb", ".php", ".go", ".rs", ".swift", ".kt"
62
+ }
63
+
64
+ @dataclass
65
+ class ChatMessage:
66
+ role: str
67
+ content: str
68
+ metadata: Dict[str, Any] = None
69
+
70
+ class ThinkingAnalyzer:
71
+ """Handles streaming thoughts and responses from Gemini model"""
72
+
73
+ def __init__(self, model):
74
+ self.model = model
75
+
76
+ def stream_analysis(self, analysis_data: Dict[str, Any], system_prompt: str) -> Iterator[List[ChatMessage]]:
77
+ """Streams analysis with visible thinking process"""
78
+
79
+ # Format the prompt
80
+ prompt = f"{system_prompt}\n\nRepository Analysis Data:\n{json.dumps(analysis_data, indent=2)}"
81
+
82
+ # Initialize streaming response
83
+ response = self.model.generate_content(prompt, stream=True)
84
+
85
+ messages = []
86
+ thought_buffer = ""
87
+ response_buffer = ""
88
+ thinking_complete = False
89
+
90
+ # Add initial thinking message
91
+ messages.append(
92
+ ChatMessage(
93
+ role="assistant",
94
+ content="",
95
+ metadata={"title": "⏳ Analyzing Repository: Thought Process"}
96
+ )
97
+ )
98
+
99
+ for chunk in response:
100
+ parts = chunk.candidates[0].content.parts
101
+ current_chunk = parts[0].text
102
+
103
+ if len(parts) == 2 and not thinking_complete:
104
+ # Complete thought and start response
105
+ thought_buffer += current_chunk
106
+ messages[-1] = ChatMessage(
107
+ role="assistant",
108
+ content=thought_buffer,
109
+ metadata={"title": "⏳ Analysis Thought Process"}
110
+ )
111
+
112
+ # Add response message
113
+ messages.append(
114
+ ChatMessage(
115
+ role="assistant",
116
+ content=parts[1].text
117
+ )
118
+ )
119
+ thinking_complete = True
120
+
121
+ elif thinking_complete:
122
+ # Continue streaming response
123
+ response_buffer += current_chunk
124
+ messages[-1] = ChatMessage(
125
+ role="assistant",
126
+ content=response_buffer
127
+ )
128
+
129
+ else:
130
+ # Continue streaming thoughts
131
+ thought_buffer += current_chunk
132
+ messages[-1] = ChatMessage(
133
+ role="assistant",
134
+ content=thought_buffer,
135
+ metadata={"title": "⏳ Analysis Thought Process"}
136
+ )
137
+
138
+ yield messages
139
+
140
+ def stream_question_response(self, question: str, analysis_data: Dict[str, Any],
141
+ chat_history: List[Tuple[str, str]]) -> Iterator[List[ChatMessage]]:
142
+ """Streams response to follow-up questions with thinking process"""
143
+
144
+ # Build context
145
+ context = "You are an expert code analyst helping users understand repository analysis results.\n\n"
146
+ context += f"Repository Analysis Data:\n{json.dumps(analysis_data, indent=2)}\n\n"
147
+
148
+ if chat_history:
149
+ context += "Previous conversation:\n"
150
+ for user_msg, assistant_msg in chat_history:
151
+ context += f"User: {user_msg}\nAssistant: {assistant_msg}\n"
152
+
153
+ prompt = context + f"\nUser: {question}\nPlease think through your analysis:"
154
+
155
+ # Use stream_analysis with the constructed prompt
156
+ yield from self.stream_analysis(analysis_data, prompt)
157
+
158
+ class RepositoryAnalyzer:
159
+ """Handles GitHub repository analysis"""
160
+
161
+ def __init__(self, repo_url: str):
162
+ # Extract owner and repo name from URL
163
+ parts = repo_url.rstrip('/').split('/')
164
+ if len(parts) < 2:
165
+ raise ValueError("Invalid repository URL format")
166
+
167
+ self.repo_name = parts[-1]
168
+ self.owner = parts[-2]
169
+ self.repo = gh.get_repo(f"{self.owner}/{self.repo_name}")
170
+ self.analysis_data: Dict[str, Any] = {}
171
+
172
+ def analyze(self) -> Dict[str, Any]:
173
+ """Perform complete repository analysis"""
174
+ try:
175
+ # Basic repository information
176
+ self.analysis_data["basic_info"] = {
177
+ "name": self.repo.name,
178
+ "owner": self.repo.owner.login,
179
+ "description": self.repo.description or "No description available",
180
+ "stars": self.repo.stargazers_count,
181
+ "forks": self.repo.forks_count,
182
+ "created_at": self.repo.created_at.isoformat(),
183
+ "last_updated": self.repo.updated_at.isoformat(),
184
+ "primary_language": self.repo.language or "Not specified",
185
+ }
186
+
187
+ # Analyze repository structure
188
+ self.analysis_data["structure"] = self._analyze_structure()
189
+
190
+ # Analyze code patterns
191
+ self.analysis_data["code_patterns"] = self._analyze_code_patterns()
192
+
193
+ # Analyze commit history
194
+ self.analysis_data["commit_history"] = self._analyze_commits()
195
+
196
+ # Get contributor statistics
197
+ self.analysis_data["contributors"] = self._analyze_contributors()
198
+
199
+ return self.analysis_data
200
+
201
+ except Exception as e:
202
+ raise Exception(f"Error analyzing repository: {str(e)}")
203
+
204
+ def _analyze_structure(self) -> Dict[str, Any]:
205
+ """Analyze repository structure and organization"""
206
+ structure = {
207
+ "files": defaultdict(int),
208
+ "directories": set(),
209
+ "total_size": 0,
210
+ }
211
+
212
+ try:
213
+ contents = self.repo.get_contents("")
214
+ while contents:
215
+ content = contents.pop(0)
216
+ if content.type == "dir":
217
+ structure["directories"].add(content.path)
218
+ contents.extend(self.repo.get_contents(content.path))
219
+ else:
220
+ ext = Path(content.path).suffix.lower()
221
+ if ext in RELEVANT_EXTENSIONS:
222
+ structure["files"][ext] += 1
223
+ structure["total_size"] += content.size
224
+ except Exception as e:
225
+ print(f"Error analyzing structure: {str(e)}")
226
+
227
+ return {
228
+ "file_types": dict(structure["files"]),
229
+ "directory_count": len(structure["directories"]),
230
+ "total_size": structure["total_size"],
231
+ "file_count": sum(structure["files"].values())
232
+ }
233
+
234
+ def _analyze_code_patterns(self) -> Dict[str, Any]:
235
+ """Analyze code patterns and style"""
236
+ patterns = {
237
+ "samples": [],
238
+ "languages": defaultdict(int),
239
+ "complexity_metrics": defaultdict(list)
240
+ }
241
+
242
+ try:
243
+ files = self.repo.get_contents("")
244
+ analyzed = 0
245
+
246
+ while files and analyzed < 5:
247
+ file = files.pop(0)
248
+ if file.type == "dir":
249
+ files.extend(self.repo.get_contents(file.path))
250
+ elif Path(file.path).suffix.lower() in RELEVANT_EXTENSIONS:
251
+ try:
252
+ content = base64.b64decode(file.content).decode('utf-8')
253
+ lines = content.splitlines()
254
+
255
+ if not lines:
256
+ continue
257
+
258
+ loc = len([line for line in lines if line.strip()])
259
+ avg_line_length = sum(len(line) for line in lines) / len(lines)
260
+
261
+ patterns["samples"].append({
262
+ "path": file.path,
263
+ "language": Path(file.path).suffix[1:],
264
+ "loc": loc,
265
+ "avg_line_length": round(avg_line_length, 2)
266
+ })
267
+
268
+ patterns["languages"][Path(file.path).suffix[1:]] += loc
269
+ patterns["complexity_metrics"]["loc"].append(loc)
270
+ patterns["complexity_metrics"]["avg_line_length"].append(avg_line_length)
271
+
272
+ analyzed += 1
273
+
274
+ except Exception as e:
275
+ print(f"Error analyzing file {file.path}: {str(e)}")
276
+ continue
277
+
278
+ except Exception as e:
279
+ print(f"Error in code pattern analysis: {str(e)}")
280
+
281
+ return patterns
282
+
283
+ def _analyze_commits(self) -> Dict[str, Any]:
284
+ """Analyze commit history and patterns"""
285
+ commit_data = []
286
+ commit_times = []
287
+
288
+ try:
289
+ commits = list(self.repo.get_commits()[:100]) # Get last 100 commits
290
+
291
+ for commit in commits:
292
+ try:
293
+ commit_info = {
294
+ "sha": commit.sha,
295
+ "author": commit.author.login if commit.author else "Unknown",
296
+ "date": commit.commit.author.date.isoformat(),
297
+ "message": commit.commit.message,
298
+ "changes": {
299
+ "additions": commit.stats.additions,
300
+ "deletions": commit.stats.deletions,
301
+ }
302
+ }
303
+ commit_data.append(commit_info)
304
+ commit_times.append(commit.commit.author.date.hour)
305
+ except Exception as e:
306
+ print(f"Error processing commit {commit.sha}: {str(e)}")
307
+ continue
308
+
309
+ # Analyze commit patterns
310
+ commit_hours = defaultdict(int)
311
+ for hour in commit_times:
312
+ commit_hours[hour] += 1
313
+
314
+ total_commits = len(commit_data)
315
+ return {
316
+ "commits": commit_data,
317
+ "total_commits": total_commits,
318
+ "commit_hours": dict(commit_hours),
319
+ "avg_additions": sum(c["changes"]["additions"] for c in commit_data) / total_commits if total_commits else 0,
320
+ "avg_deletions": sum(c["changes"]["deletions"] for c in commit_data) / total_commits if total_commits else 0,
321
+ }
322
+
323
+ except Exception as e:
324
+ print(f"Error in commit analysis: {str(e)}")
325
+ return {
326
+ "commits": [],
327
+ "total_commits": 0,
328
+ "commit_hours": {},
329
+ "avg_additions": 0,
330
+ "avg_deletions": 0
331
+ }
332
+
333
+ def _analyze_contributors(self) -> Dict[str, Any]:
334
+ """Analyze contributor statistics"""
335
+ contributor_data = []
336
+
337
+ try:
338
+ contributors = list(self.repo.get_contributors())
339
+ for contributor in contributors:
340
+ contributor_data.append({
341
+ "login": contributor.login,
342
+ "contributions": contributor.contributions,
343
+ "type": contributor.type,
344
+ })
345
+ except Exception as e:
346
+ print(f"Error analyzing contributors: {str(e)}")
347
+
348
+ return {
349
+ "total_contributors": len(contributor_data),
350
+ "contributors": contributor_data
351
+ }
352
+
353
+ @retry(
354
+ retry=retry_if_exception_type(Exception),
355
+ stop=stop_after_attempt(3),
356
+ wait=wait_exponential(multiplier=1, min=4, max=10)
357
  )
358
+ def process_analysis(repo_url: str, progress=gr.Progress()):
359
+ """Process repository analysis with visible thinking"""
360
+ try:
361
+ # Initialize analysis
362
+ progress(0, desc="Initializing repository analysis...")
363
+ analyzer = RepositoryAnalyzer(repo_url)
364
+ analysis_data = analyzer.analyze()
365
+
366
+ # Initialize thinking analyzer
367
+ thinking_analyzer = ThinkingAnalyzer(model)
368
+
369
+ # System prompt for analysis
370
+ system_prompt = """You are an expert code analyst with deep experience in software architecture, development practices, and team dynamics. Analyze the provided repository data and create a detailed, insightful analysis using the following markdown template:
371
+
372
+ # Repository Analysis
373
+
374
+ ## πŸ“Š Project Overview
375
+ [Provide a comprehensive overview including:
376
+ - Project purpose and scope
377
+ - Age and maturity of the project
378
+ - Current activity level and maintenance status
379
+ - Key metrics (stars, forks, etc.)
380
+ - Primary technologies and languages used]
381
+
382
+ ## πŸ—οΈ Architecture and Code Organization
383
+ [Analyze in detail:
384
+ - Repository structure and organization
385
+ - Code distribution across different technologies
386
+ - File and directory organization patterns
387
+ - Project size and complexity metrics
388
+ - Code modularity and component structure
389
+ - Presence of key architectural patterns]
390
+
391
+ ## πŸ’» Development Practices & Code Quality
392
+ [Evaluate:
393
+ - Coding standards and consistency
394
+ - Code complexity and maintainability metrics
395
+ - Documentation practices
396
+ - Testing approach and coverage (if visible)
397
+ - Error handling and logging practices
398
+ - Use of design patterns and best practices]
399
+
400
+ ## πŸ“ˆ Development Workflow & History
401
+ [Analyze:
402
+ - Commit patterns and frequency
403
+ - Release cycles and versioning
404
+ - Branch management strategy
405
+ - Code review practices
406
+ - Continuous integration/deployment indicators
407
+ - Peak development periods and cycles]
408
+
409
+ ## πŸ‘₯ Team Dynamics & Collaboration
410
+ [Examine:
411
+ - Team size and composition
412
+ - Contribution patterns
413
+ - Core maintainer identification
414
+ - Community engagement level
415
+ - Communication patterns
416
+ - Collaboration efficiency]
417
+
418
+ ## πŸ”§ Technical Depth & Innovation
419
+ [Assess:
420
+ - Technical sophistication level
421
+ - Innovative approaches or solutions
422
+ - Complex problem-solving examples
423
+ - Performance optimization efforts
424
+ - Security considerations
425
+ - Scalability approach]
426
+
427
+ ## πŸš€ Project Health & Sustainability
428
+ [Evaluate:
429
+ - Project momentum and growth trends
430
+ - Maintenance patterns
431
+ - Community health indicators
432
+ - Documentation completeness
433
+ - Onboarding friendliness
434
+ - Long-term viability indicators]
435
+
436
+ ## πŸ’‘ Key Insights & Recommendations
437
+ [Provide:
438
+ - 3-5 key strengths identified
439
+ - 3-5 potential improvement areas
440
+ - Notable patterns or practices
441
+ - Unique characteristics
442
+ - Strategic recommendations]"""
443
+
444
+ # Stream thinking and analysis
445
+ progress(0.5, desc="Generating analysis with thinking process...")
446
+ messages = []
447
+ for msg_update in thinking_analyzer.stream_analysis(
448
+ analysis_data,
449
+ system_prompt
450
+ ):
451
+ messages = msg_update
452
+
453
+ return messages, analysis_data
454
+
455
+ except Exception as e:
456
+ return
457
+
458
+ def process_question(question: str, analysis_data: Dict[str, Any], chat_history: List[str]):
459
+ """Process follow-up questions with visible thinking"""
460
+ if not analysis_data:
461
+ return [ChatMessage(role="assistant", content="Please analyze a repository first before asking questions.")]
462
+
463
+ thinking_analyzer = ThinkingAnalyzer(model)
464
+ messages = []
465
+ for msg_update in thinking_analyzer.stream_question_response(
466
+ question,
467
+ analysis_data,
468
+ chat_history
469
+ ):
470
+ messages = msg_update
471
+ return messages
472
+
473
+ # Create Gradio interface with thinking visualization
474
+ with gr.Blocks(theme=gr.themes.Soft()) as app:
475
+ gr.Markdown("""
476
+ # πŸ” GitHub Repository Analyzer with Thinking Process
477
+
478
+ Analyze any public GitHub repository using AI. Watch the AI's thought process as it:
479
+ 1. πŸ“Š Analyzes repository structure and patterns
480
+ 2. πŸ’‘ Generates insights about development practices
481
+ 3. πŸ’­ Shows its thinking while answering your follow-up questions
482
+
483
+ Enter a GitHub repository URL (e.g., `https://github.com/owner/repo`)
484
+ """)
485
+
486
+ with gr.Row():
487
+ repo_url = gr.Textbox(
488
+ label="GitHub Repository URL",
489
+ placeholder="https://github.com/owner/repo",
490
+ scale=4
491
+ )
492
+ analyze_btn = gr.Button("πŸ” Analyze", variant="primary", scale=1)
493
+
494
+ # Status message
495
+ status_msg = gr.Markdown("", elem_id="status_message")
496
+
497
+ with gr.Row():
498
+ chatbot = gr.Chatbot(
499
+ label="Analysis & Discussion",
500
+ height=500,
501
+ show_label=True,
502
+ render_markdown=True,
503
+ type="messages"
504
+ )
505
+
506
+ with gr.Row():
507
+ question = gr.Textbox(
508
+ label="Your Question",
509
+ placeholder="Ask about the analysis...",
510
+ scale=4
511
+ )
512
+ ask_btn = gr.Button("πŸ’­ Ask", variant="primary", scale=1)
513
+ clear_btn = gr.Button("πŸ—‘οΈ Clear Chat", variant="secondary", scale=1)
514
+
515
+ # Hidden states
516
+ analysis_data = gr.State({})
517
+ chat_history = gr.State([])
518
+ msg_store = gr.State("")
519
+
520
+ def clear_outputs():
521
+ return [], {}, [], ""
522
+
523
+ # Set up event handlers with thinking visualization
524
+ analyze_btn.click(
525
+ fn=lambda: "⏳ Analysis in progress... Watch the thinking process below!",
526
+ inputs=None,
527
+ outputs=status_msg,
528
+ queue=False
529
+ ).then(
530
+ process_analysis,
531
+ inputs=[repo_url],
532
+ outputs=[chatbot, analysis_data]
533
+ ).success(
534
+ lambda: "βœ… Analysis complete! You can now ask questions about the repository.",
535
+ inputs=None,
536
+ outputs=status_msg
537
+ )
538
+
539
+ def update_chat(question, history):
540
+ """Update chat history with user question"""
541
+ history = history or []
542
+ history.append(question)
543
+ return question, history, ""
544
+
545
+ ask_btn.click(
546
+ update_chat,
547
+ inputs=[question, chat_history],
548
+ outputs=[msg_store, chat_history, question],
549
+ queue=False
550
+ ).then(
551
+ process_question,
552
+ inputs=[msg_store, analysis_data, chat_history],
553
+ outputs=chatbot
554
+ )
555
+
556
+ clear_btn.click(
557
+ clear_outputs,
558
+ inputs=None,
559
+ outputs=[chatbot, analysis_data, chat_history, status_msg],
560
+ queue=False
561
+ )
562
 
563
+ # Handle enter key in question input
564
+ question.submit(
565
+ update_chat,
566
+ inputs=[question, chat_history],
567
+ outputs=[msg_store, chat_history, question],
568
+ queue=False
569
+ ).then(
570
+ process_question,
571
+ inputs=[msg_store, analysis_data, chat_history],
572
+ outputs=chatbot
573
+ )
574
 
575
+ # Launch the app
576
  if __name__ == "__main__":
577
+ app.launch(
578
+ server_name="0.0.0.0",
579
+ server_port=7860,
580
+ share=True,
581
+ debug=True
582
+ )