Spaces:
Paused
Paused
| import spaces | |
| # Configure ZeroGPU | |
| def process_video_with_gpu(video, resize_option): | |
| """ZeroGPU-accelerated video processing""" | |
| # Create assessor inside the GPU function to avoid pickling issues | |
| from google import genai | |
| client = genai.Client(api_key=GOOGLE_API_KEY) | |
| assessor = CICE_Assessment(client) | |
| return process_video_core(video, resize_option, assessor) | |
| import gradio as gr | |
| from google import genai | |
| from google.genai import types | |
| import os | |
| import time | |
| from datetime import datetime | |
| import re | |
| from gtts import gTTS | |
| import tempfile | |
| import numpy as np | |
| from PIL import Image | |
| import cv2 | |
| from reportlab.lib.pagesizes import letter | |
| from reportlab.lib.styles import getSampleStyleSheet, ParagraphStyle | |
| from reportlab.platypus import SimpleDocTemplate, Paragraph, Spacer, PageBreak | |
| from reportlab.lib.units import inch | |
| from reportlab.lib.enums import TA_JUSTIFY, TA_CENTER | |
| from reportlab.lib.colors import HexColor | |
| import subprocess | |
| import shutil | |
| # Configure Google API Key from environment variable or Hugging Face secrets | |
| print("π Setting up Google API Key...") | |
| GOOGLE_API_KEY = os.environ.get('GOOGLE_API_KEY') | |
| if not GOOGLE_API_KEY: | |
| raise ValueError("GOOGLE_API_KEY environment variable is not set. Please set it in Hugging Face Spaces secrets.") | |
| client = genai.Client(api_key=GOOGLE_API_KEY) | |
| print("β Google Generative AI configured successfully!") | |
| # Define the CICE Assessment Class | |
| class CICE_Assessment: | |
| def __init__(self, client): | |
| self.client = client | |
| self.model_name = "gemini-2.0-flash-exp" | |
| def analyze_video(self, video_path): | |
| """Analyze video using the 18-point CICE 2.0 assessment with specific behavioral cues""" | |
| try: | |
| # Determine mime type based on file extension | |
| import mimetypes | |
| mime_type, _ = mimetypes.guess_type(video_path) | |
| if mime_type is None: | |
| # Default to mp4 if cannot determine | |
| mime_type = 'video/mp4' | |
| # Upload video to Gemini | |
| print(f"π€ Uploading video to Gemini AI (type: {mime_type})...") | |
| with open(video_path, 'rb') as f: | |
| video_file = self.client.files.upload(file=f, config={'mime_type': mime_type}) | |
| # Wait for processing | |
| print("β³ Processing video (this may take 30-60 seconds)...") | |
| max_wait = 300 | |
| wait_time = 0 | |
| while video_file.state == "PROCESSING" and wait_time < max_wait: | |
| time.sleep(3) | |
| wait_time += 3 | |
| video_file = self.client.files.get(name=video_file.name) | |
| if video_file.state == "FAILED": | |
| raise Exception("Video processing failed") | |
| print("π Analyzing team interactions...") | |
| # CICE 2.0 Assessment Prompt | |
| prompt = """Analyze this healthcare team interaction video and provide a comprehensive assessment based on the CICE 2.0 instrument's 18 interprofessional competencies, looking for these SPECIFIC BEHAVIORAL CUES: | |
| For EACH competency, clearly state whether it was "OBSERVED" or "NOT OBSERVED" based on these specific behaviors: | |
| 1. IDENTIFIES FACTORS INFLUENCING HEALTH STATUS | |
| LOOK FOR: Team mentions allergy bracelet, fall-related trauma, multiple injuries, or states airway/breathing/circulation concerns out loud | |
| 2. IDENTIFIES TEAM GOALS FOR THE PATIENT | |
| LOOK FOR: Team verbalizes goals like: stabilize airway, CPR/AED, give epinephrine, control bleeding, preserve tooth, prepare EMS handoff | |
| 3. PRIORITIZES GOALS FOCUSED ON IMPROVING HEALTH OUTCOMES | |
| LOOK FOR: CPR/AED prioritized before bleeding/dental injury, EpiPen administered before addressing secondary injuries | |
| 4. VERBALIZES DISCIPLINE-SPECIFIC ROLE (PRE-BRIEF) | |
| LOOK FOR: Students acknowledge interprofessional communication expectations and scene safety review before scenario begins | |
| 5. OFFERS TO SEEK GUIDANCE FROM COLLEAGUES | |
| LOOK FOR: Peer-to-peer checks (e.g., dental to dental: confirm tooth storage; nursing to nursing: confirm CPR quality) | |
| 6. COMMUNICATES ABOUT COST-EFFECTIVE AND TIMELY CARE | |
| LOOK FOR: Team chooses readily available supplies (AED, saline, tourniquet) without delay, states need for rapid EMS transfer | |
| 7. DIRECTS QUESTIONS TO OTHER HEALTH PROFESSIONALS BASED ON EXPERTISE | |
| LOOK FOR: Asks discipline-specific expertise (e.g., "Dentalβwhat do we do with the tooth?"), invites pharmacy/medical input on epinephrine use | |
| 8. AVOIDS DISCIPLINE-SPECIFIC TERMINOLOGY | |
| LOOK FOR: Uses plain language like "no pulse" instead of "asystole" | |
| 9. EXPLAINS DISCIPLINE-SPECIFIC TERMINOLOGY WHEN NECESSARY | |
| LOOK FOR: Clarifies medical/dental terms for others when necessary | |
| 10. COMMUNICATES ROLES AND RESPONSIBILITIES CLEARLY | |
| LOOK FOR: Announces assignments out loud: "I'll do compressions," "I'll call 911," "I'll document" | |
| 11. ENGAGES IN ACTIVE LISTENING | |
| LOOK FOR: Repeats back instructions ("Everyone clear for shock"), pauses to hear teammates' updates | |
| 12. SOLICITS AND ACKNOWLEDGES PERSPECTIVES | |
| LOOK FOR: Leader asks "Anything else we need to address?", responds to peer input respectfully | |
| 13. RECOGNIZES APPROPRIATE CONTRIBUTIONS | |
| LOOK FOR: Affirms correct actions verbally ("Good catch on allergy bracelet"), non-verbal acknowledgment (nodding, thumbs up) | |
| 14. RESPECTFUL OF OTHER TEAM MEMBERS | |
| LOOK FOR: Listens without interrupting, values input across professions | |
| 15. COLLABORATIVELY WORKS THROUGH INTERPROFESSIONAL CONFLICTS | |
| LOOK FOR: Negotiates intervention priorities (airway vs. bleeding) respectfully | |
| 16. REFLECTS ON STRENGTHS OF TEAM INTERACTIONS (POST-BRIEF) | |
| LOOK FOR: Notes strong teamwork, communication, or role clarity after the scenario | |
| 17. REFLECTS ON CHALLENGES OF TEAM INTERACTIONS (POST-BRIEF) | |
| LOOK FOR: Identifies confusion, delays, or role overlap in debriefing | |
| 18. IDENTIFIES HOW TO IMPROVE TEAM EFFECTIVENESS (POST-BRIEF) | |
| LOOK FOR: Suggests faster role assignment, consistent closed-loop communication, earlier epi use | |
| STRUCTURE YOUR RESPONSE AS FOLLOWS: | |
| ## OVERALL ASSESSMENT | |
| Brief overview of the team interaction quality. | |
| ## DETAILED COMPETENCY EVALUATION | |
| For each of the 18 competencies, format as: | |
| Competency [number]: [name] | |
| Status: [OBSERVED/NOT OBSERVED] | |
| Evidence: [Specific behavioral cue observed or explanation of absence] | |
| ## STRENGTHS | |
| Top 3-5 key strengths with specific examples | |
| ## AREAS FOR IMPROVEMENT | |
| Top 3-5 areas needing work with specific suggestions | |
| ## AUDIO SUMMARY | |
| [Create a concise 60-second spoken summary focusing on: overall performance level, top 3 strengths, top 3 areas for improvement, and 2 key actionable recommendations. Write this in a natural, conversational tone suitable for text-to-speech narration.] | |
| ## FINAL SCORE | |
| Competencies Observed: X/18 | |
| Overall Performance Level: [Exemplary (85-100%)/Proficient (70-84%)/Developing (50-69%)/Needs Improvement (0-49%)]""" | |
| response = self.client.models.generate_content( | |
| model=self.model_name, | |
| contents=[ | |
| types.Part.from_uri(file_uri=video_file.uri, mime_type=video_file.mime_type), | |
| prompt | |
| ] | |
| ) | |
| print("β Analysis complete!") | |
| return response.text | |
| except Exception as e: | |
| return f"Error during analysis: {str(e)}" | |
| def generate_audio_feedback(self, text): | |
| """Generate a concise 1-minute audio feedback summary""" | |
| # Extract the audio summary section from the assessment | |
| audio_summary_match = re.search(r'## AUDIO SUMMARY\s*(.*?)(?=##|\Z)', text, re.DOTALL) | |
| if audio_summary_match: | |
| summary_text = audio_summary_match.group(1).strip() | |
| else: | |
| # Fallback: Create a brief summary from the assessment | |
| summary_text = self.create_brief_summary(text) | |
| # Clean text for speech | |
| clean_text = re.sub(r'[#*_\[\]()]', ' ', summary_text) | |
| clean_text = re.sub(r'\s+', ' ', clean_text) | |
| clean_text = re.sub(r'[-β’Β·]\s+', '', clean_text) | |
| # Add introduction and conclusion for better audio experience | |
| audio_script = f"""CICE Healthcare Team Assessment Summary. | |
| {clean_text} | |
| Please refer to the detailed written report for complete competency evaluation and specific recommendations. | |
| End of audio summary.""" | |
| # Generate audio with gTTS | |
| try: | |
| tts = gTTS(text=audio_script, lang='en', slow=False, tld='com') | |
| # Create a proper temporary file | |
| temp_audio = tempfile.NamedTemporaryFile(delete=False, suffix='.mp3') | |
| tts.save(temp_audio.name) | |
| temp_audio.close() | |
| return temp_audio.name | |
| except Exception as e: | |
| print(f"β οΈ Audio generation failed: {str(e)}") | |
| return None | |
| def create_brief_summary(self, text): | |
| """Create a brief summary if AUDIO SUMMARY section is not found""" | |
| # Parse scores | |
| observed_count = text.lower().count("observed") - text.lower().count("not observed") | |
| total = 18 | |
| percentage = (observed_count / total) * 100 | |
| # Determine performance level | |
| if percentage >= 85: | |
| level = "Exemplary" | |
| elif percentage >= 70: | |
| level = "Proficient" | |
| elif percentage >= 50: | |
| level = "Developing" | |
| else: | |
| level = "Needs Improvement" | |
| summary = f"""The team demonstrated {level} performance with {observed_count} out of {total} competencies observed, | |
| achieving {percentage:.0f} percent overall. | |
| Key strengths included strong team communication and role clarity. | |
| Areas for improvement include enhancing active listening and conflict resolution skills. | |
| The team should focus on pre-briefing protocols and post-scenario debriefing to enhance future performance. | |
| Emphasis should be placed on clear role assignment and closed-loop communication during critical interventions.""" | |
| return summary | |
| def parse_assessment_scores(self, assessment_text): | |
| """Parse assessment text to extract scores""" | |
| # Method 1: Look for "Status: OBSERVED" vs "Status: NOT OBSERVED" patterns | |
| import re | |
| # Find all status lines | |
| status_pattern = r'Status:\s*(OBSERVED|NOT OBSERVED)' | |
| matches = re.findall(status_pattern, assessment_text, re.IGNORECASE) | |
| # Count only "OBSERVED" (not "NOT OBSERVED") | |
| observed_count = sum(1 for match in matches if match.upper() == "OBSERVED") | |
| # If no matches found with Status: pattern, try alternative parsing | |
| if len(matches) == 0: | |
| # Alternative: Look for competency lines with OBSERVED/NOT OBSERVED | |
| lines = assessment_text.split('\n') | |
| observed_count = 0 | |
| for i, line in enumerate(lines): | |
| # Look for competency indicators followed by status | |
| if 'Competency' in line and i + 1 < len(lines): | |
| next_line = lines[i + 1] | |
| # Check if the status line indicates OBSERVED (not NOT OBSERVED) | |
| if 'OBSERVED' in next_line.upper() and 'NOT OBSERVED' not in next_line.upper(): | |
| observed_count += 1 | |
| # If still no matches, use a more robust pattern | |
| if observed_count == 0: | |
| # Count lines that say "OBSERVED" but not "NOT OBSERVED" | |
| for line in lines: | |
| # Clean line for better matching | |
| clean_line = line.strip().upper() | |
| if clean_line.startswith('STATUS:'): | |
| if 'NOT OBSERVED' in clean_line: | |
| continue | |
| elif 'OBSERVED' in clean_line: | |
| observed_count += 1 | |
| total_competencies = 18 | |
| percentage = (observed_count / total_competencies) * 100 if total_competencies > 0 else 0 | |
| # Professional color scheme with better contrast | |
| if percentage >= 85: | |
| level = "Exemplary" | |
| color = "#0F766E" # Deep teal | |
| elif percentage >= 70: | |
| level = "Proficient" | |
| color = "#1E40AF" # Professional blue | |
| elif percentage >= 50: | |
| level = "Developing" | |
| color = "#EA580C" # Professional orange | |
| else: | |
| level = "Needs Improvement" | |
| color = "#B91C1C" # Deep red | |
| return observed_count, total_competencies, percentage, level, color | |
| def generate_pdf_report(self, assessment_text): | |
| """Generate a PDF report from the assessment text""" | |
| try: | |
| # Create a temporary file for the PDF | |
| temp_pdf = tempfile.NamedTemporaryFile(delete=False, suffix='.pdf') | |
| # Create the PDF document | |
| doc = SimpleDocTemplate( | |
| temp_pdf.name, | |
| pagesize=letter, | |
| rightMargin=72, | |
| leftMargin=72, | |
| topMargin=72, | |
| bottomMargin=18, | |
| ) | |
| # Container for the 'Flowable' objects | |
| elements = [] | |
| # Define styles with professional colors | |
| styles = getSampleStyleSheet() | |
| title_style = ParagraphStyle( | |
| 'CustomTitle', | |
| parent=styles['Heading1'], | |
| fontSize=24, | |
| textColor=HexColor('#111827'), # Darker gray for better readability | |
| spaceAfter=30, | |
| alignment=TA_CENTER | |
| ) | |
| heading_style = ParagraphStyle( | |
| 'CustomHeading', | |
| parent=styles['Heading2'], | |
| fontSize=14, | |
| textColor=HexColor('#1E40AF'), # Professional blue | |
| spaceAfter=12, | |
| spaceBefore=12, | |
| bold=True | |
| ) | |
| body_style = ParagraphStyle( | |
| 'CustomBody', | |
| parent=styles['BodyText'], | |
| fontSize=11, | |
| alignment=TA_JUSTIFY, | |
| spaceAfter=12 | |
| ) | |
| # Add title | |
| elements.append(Paragraph("CICE 2.0 Healthcare Team Assessment Report", title_style)) | |
| elements.append(Spacer(1, 12)) | |
| # Add timestamp | |
| timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S") | |
| elements.append(Paragraph(f"<b>Assessment Date:</b> {timestamp}", body_style)) | |
| elements.append(Spacer(1, 20)) | |
| # Process the assessment text into PDF-friendly format | |
| lines = assessment_text.split('\n') | |
| for line in lines: | |
| line = line.strip() | |
| if not line: | |
| elements.append(Spacer(1, 6)) | |
| elif line.startswith('##'): | |
| # Major heading | |
| heading_text = line.replace('##', '').strip() | |
| elements.append(Paragraph(heading_text, heading_style)) | |
| elif line.startswith('Competency'): | |
| # Competency item | |
| elements.append(Paragraph(f"<b>{line}</b>", body_style)) | |
| elif line.startswith('Status:') or line.startswith('Evidence:'): | |
| # Sub-items | |
| elements.append(Paragraph(line, body_style)) | |
| else: | |
| # Regular text | |
| # Escape special characters for PDF | |
| line = line.replace('&', '&').replace('<', '<').replace('>', '>') | |
| elements.append(Paragraph(line, body_style)) | |
| # Build PDF | |
| doc.build(elements) | |
| temp_pdf.close() | |
| return temp_pdf.name | |
| except Exception as e: | |
| print(f"β οΈ PDF generation failed: {str(e)}") | |
| # Fallback to text file | |
| temp_txt = tempfile.NamedTemporaryFile(delete=False, suffix='.txt', mode='w') | |
| temp_txt.write("CICE 2.0 Healthcare Team Interaction Assessment\n") | |
| temp_txt.write("="*60 + "\n") | |
| temp_txt.write(f"Date: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}\n") | |
| temp_txt.write("="*60 + "\n\n") | |
| temp_txt.write(assessment_text) | |
| temp_txt.close() | |
| return temp_txt.name | |
| # Initialize the assessment tool | |
| assessor = CICE_Assessment(client) | |
| # Add video processing helper functions | |
| def resize_video(input_path, target_width, target_height): | |
| """Resize video to target dimensions to speed up processing""" | |
| try: | |
| # Open the video | |
| cap = cv2.VideoCapture(input_path) | |
| # Get original video properties | |
| fps = int(cap.get(cv2.CAP_PROP_FPS)) | |
| fourcc = cv2.VideoWriter_fourcc(*'mp4v') | |
| # Create temporary output file | |
| temp_output = tempfile.NamedTemporaryFile(delete=False, suffix='.mp4') | |
| temp_output.close() | |
| # Create video writer with new dimensions | |
| out = cv2.VideoWriter(temp_output.name, fourcc, fps, (target_width, target_height)) | |
| print(f"π Resizing video to {target_width}x{target_height}...") | |
| frame_count = 0 | |
| while True: | |
| ret, frame = cap.read() | |
| if not ret: | |
| break | |
| # Resize frame | |
| resized_frame = cv2.resize(frame, (target_width, target_height)) | |
| out.write(resized_frame) | |
| frame_count += 1 | |
| cap.release() | |
| out.release() | |
| print(f"β Video resized successfully ({frame_count} frames)") | |
| return temp_output.name | |
| except Exception as e: | |
| print(f"β οΈ Video resize failed: {str(e)}") | |
| return input_path # Return original if resize fails | |
| def get_video_info(video_path): | |
| """Get video dimensions and other info""" | |
| try: | |
| cap = cv2.VideoCapture(video_path) | |
| width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) | |
| height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) | |
| fps = int(cap.get(cv2.CAP_PROP_FPS)) | |
| frame_count = int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) | |
| cap.release() | |
| return width, height, fps, frame_count | |
| except: | |
| return None, None, None, None | |
| # Function to show immediate status when recording stops | |
| def show_saving_status(video): | |
| """Show immediate status bar when recording stops""" | |
| if video is None: | |
| return gr.update(visible=False), None | |
| # Create animated status HTML | |
| status_html = """ | |
| <div style="background: linear-gradient(135deg, #667eea 0%, #764ba2 100%); padding: 20px; border-radius: 10px; margin: 20px 0; animation: pulse 1.5s ease-in-out infinite;"> | |
| <style> | |
| @keyframes pulse { | |
| 0%, 100% { opacity: 1; } | |
| 50% { opacity: 0.8; } | |
| } | |
| @keyframes slide { | |
| 0% { transform: translateX(-100%); } | |
| 100% { transform: translateX(100%); } | |
| } | |
| .progress-bar { | |
| position: relative; | |
| height: 6px; | |
| background: rgba(255, 255, 255, 0.3); | |
| border-radius: 3px; | |
| overflow: hidden; | |
| margin-top: 15px; | |
| } | |
| .progress-bar::after { | |
| content: ''; | |
| position: absolute; | |
| top: 0; | |
| left: 0; | |
| width: 40%; | |
| height: 100%; | |
| background: white; | |
| animation: slide 1.5s ease-in-out infinite; | |
| } | |
| </style> | |
| <div style="text-align: center; color: white;"> | |
| <div style="font-size: 24px; font-weight: bold; margin-bottom: 10px;"> | |
| πΉ Processing Your Recording... | |
| </div> | |
| <div style="font-size: 16px; opacity: 0.95;"> | |
| Saving video file β’ Preparing for download | |
| </div> | |
| <div class="progress-bar"></div> | |
| </div> | |
| </div> | |
| """ | |
| return gr.update(value=status_html, visible=True), video | |
| # Enhanced save function with status updates | |
| def save_recorded_video_with_status(video): | |
| """Save the recorded video with status updates""" | |
| if video is None: | |
| return None, gr.update(value="", visible=False) | |
| try: | |
| # Create a copy of the video file with a timestamp | |
| timestamp = datetime.now().strftime("%Y%m%d_%H%M%S") | |
| output_filename = f"recorded_video_{timestamp}.mp4" | |
| temp_output = tempfile.NamedTemporaryFile(delete=False, suffix='.mp4', prefix=f"recorded_{timestamp}_") | |
| # Copy the video file | |
| import shutil | |
| shutil.copy2(video, temp_output.name) | |
| temp_output.close() | |
| # Success status | |
| success_html = """ | |
| <div style="background: linear-gradient(135deg, #10b981 0%, #059669 100%); padding: 15px; border-radius: 10px; margin: 20px 0;"> | |
| <div style="text-align: center; color: white;"> | |
| <div style="font-size: 20px; font-weight: bold;"> | |
| β Video Saved Successfully! | |
| </div> | |
| <div style="font-size: 14px; margin-top: 5px; opacity: 0.95;"> | |
| Ready for download β’ Click "Analyze Video" to assess | |
| </div> | |
| </div> | |
| </div> | |
| """ | |
| print(f"πΉ Video saved: {output_filename}") | |
| return temp_output.name, gr.update(value=success_html, visible=True) | |
| except Exception as e: | |
| print(f"β οΈ Failed to save video: {str(e)}") | |
| error_html = """ | |
| <div style="background: linear-gradient(135deg, #ef4444 0%, #dc2626 100%); padding: 15px; border-radius: 10px; margin: 20px 0;"> | |
| <div style="text-align: center; color: white;"> | |
| <div style="font-size: 20px; font-weight: bold;"> | |
| β οΈ Error Saving Video | |
| </div> | |
| <div style="font-size: 14px; margin-top: 5px;"> | |
| Please try recording again | |
| </div> | |
| </div> | |
| </div> | |
| """ | |
| return None, gr.update(value=error_html, visible=True) | |
| # Function to hide status after a delay | |
| def hide_status_after_delay(): | |
| """Hide the status bar after showing success""" | |
| time.sleep(3) # Wait 3 seconds | |
| return gr.update(value="", visible=False) | |
| # Define the core processing function (separate from GPU wrapper) | |
| def process_video_core(video, resize_option, assessor): | |
| """Process uploaded or recorded video""" | |
| if video is None: | |
| return "Please upload or record a video first.", None, None, None | |
| try: | |
| # Get original video info | |
| orig_width, orig_height, fps, frame_count = get_video_info(video) | |
| if orig_width and orig_height: | |
| print(f"πΉ Original video: {orig_width}x{orig_height} @ {fps}fps ({frame_count} frames)") | |
| # Get file size | |
| file_size_mb = os.path.getsize(video) / (1024 * 1024) | |
| print(f"πΉ Processing video ({file_size_mb:.1f}MB)...") | |
| # Apply resizing based on user selection | |
| video_to_process = video | |
| temp_resized_file = None | |
| if resize_option != "Original (No Resize)": | |
| # Parse the resolution from the option string | |
| if "640x480" in resize_option: | |
| target_width, target_height = 640, 480 | |
| elif "800x600" in resize_option: | |
| target_width, target_height = 800, 600 | |
| elif "1280x720" in resize_option: | |
| target_width, target_height = 1280, 720 | |
| else: | |
| target_width, target_height = orig_width, orig_height | |
| # Only resize if different from original | |
| if orig_width and orig_height and (orig_width != target_width or orig_height != target_height): | |
| temp_resized_file = resize_video(video, target_width, target_height) | |
| video_to_process = temp_resized_file | |
| # Check new file size | |
| new_file_size_mb = os.path.getsize(video_to_process) / (1024 * 1024) | |
| print(f"π¦ Resized video: {new_file_size_mb:.1f}MB (saved {file_size_mb - new_file_size_mb:.1f}MB)") | |
| # Start assessment | |
| print("π₯ Starting CICE 2.0 Healthcare Team Assessment...") | |
| assessment_result = assessor.analyze_video(video_to_process) | |
| # Clean up temporary resized file if created | |
| if temp_resized_file and temp_resized_file != video: | |
| try: | |
| os.unlink(temp_resized_file) | |
| except: | |
| pass | |
| if "Error" in assessment_result: | |
| return assessment_result, None, None, None | |
| # Generate 1-minute audio feedback | |
| print("π Generating 1-minute audio summary...") | |
| audio_path = assessor.generate_audio_feedback(assessment_result) | |
| # Generate PDF report | |
| print("π Generating PDF report...") | |
| pdf_path = assessor.generate_pdf_report(assessment_result) | |
| # Parse scores for visual summary | |
| observed, total, percentage, level, color = assessor.parse_assessment_scores(assessment_result) | |
| # Create enhanced visual summary HTML with professional colors | |
| summary_html = f""" | |
| <div style="max-width:800px; margin:20px auto; padding:30px; border-radius:15px; box-shadow:0 2px 10px rgba(0,0,0,0.08); background:white;"> | |
| <h2 style="text-align:center; color:#111827; margin-bottom:30px; font-weight:600;">CICE 2.0 Assessment Summary</h2> | |
| <div style="display:flex; justify-content:space-around; margin:30px 0;"> | |
| <div style="text-align:center;"> | |
| <div style="font-size:48px; font-weight:bold; color:{color};">{observed}/{total}</div> | |
| <div style="color:#4B5563; margin-top:10px; font-weight:500;">Competencies Observed</div> | |
| </div> | |
| <div style="text-align:center;"> | |
| <div style="font-size:48px; font-weight:bold; color:{color};">{percentage:.0f}%</div> | |
| <div style="color:#4B5563; margin-top:10px; font-weight:500;">Overall Score</div> | |
| </div> | |
| </div> | |
| <div style="text-align:center; padding:20px; background:#F8FAFC; border-radius:10px; margin:20px 0; border:1px solid #E2E8F0;"> | |
| <div style="font-size:24px; font-weight:bold; color:{color};">Performance Level: {level}</div> | |
| </div> | |
| <div style="margin-top:30px;"> | |
| <h3 style="color:#111827; margin-bottom:20px; font-weight:600;">π― Key Behavioral Indicators Assessed:</h3> | |
| <div style="background:#F8FAFC; padding:15px; border-radius:10px; margin:15px 0; border:1px solid #E2E8F0;"> | |
| <h4 style="color:#0F766E; margin-top:0; font-weight:600;">β Critical Actions</h4> | |
| <ul style="line-height:1.8; margin:10px 0;"> | |
| <li style="color:#374151;">CPR/AED prioritization</li> | |
| <li style="color:#374151;">Epinephrine administration timing</li> | |
| <li style="color:#374151;">Clear role assignments ("I'll do compressions")</li> | |
| <li style="color:#374151;">Closed-loop communication</li> | |
| </ul> | |
| </div> | |
| <div style="background:#F8FAFC; padding:15px; border-radius:10px; margin:15px 0; border:1px solid #E2E8F0;"> | |
| <h4 style="color:#1E40AF; margin-top:0; font-weight:600;">π£οΈ Communication Markers</h4> | |
| <ul style="line-height:1.8; margin:10px 0;"> | |
| <li style="color:#374151;">Plain language use (avoiding medical jargon)</li> | |
| <li style="color:#374151;">Active listening (repeating back instructions)</li> | |
| <li style="color:#374151;">Soliciting input ("Anything else we need?")</li> | |
| <li style="color:#374151;">Recognizing contributions ("Good catch!")</li> | |
| </ul> | |
| </div> | |
| <div style="background:#F8FAFC; padding:15px; border-radius:10px; margin:15px 0; border:1px solid #E2E8F0;"> | |
| <h4 style="color:#6B21A8; margin-top:0; font-weight:600;">π Team Dynamics</h4> | |
| <ul style="line-height:1.8; margin:10px 0;"> | |
| <li style="color:#374151;">Pre-brief safety review</li> | |
| <li style="color:#374151;">Peer-to-peer verification</li> | |
| <li style="color:#374151;">Respectful conflict resolution</li> | |
| <li style="color:#374151;">Post-brief reflection on strengths/challenges</li> | |
| </ul> | |
| </div> | |
| </div> | |
| <div style="margin-top:30px; padding:20px; background:#FFF7ED; border-radius:10px; border-left:4px solid #EA580C;"> | |
| <p style="text-align:center; color:#431407; margin:0; font-weight:600;"> | |
| π Listen to the 1-minute audio summary for key findings<br> | |
| π Download the PDF report for complete documentation | |
| </p> | |
| </div> | |
| </div> | |
| """ | |
| return assessment_result, summary_html, audio_path, pdf_path | |
| except Exception as e: | |
| error_msg = f"β Error during processing: {str(e)}" | |
| print(error_msg) | |
| return error_msg, None, None, None | |
| # Wrapper function that calls the GPU-accelerated version | |
| def process_video(video, resize_option): | |
| """Wrapper function to call GPU-accelerated processing""" | |
| return process_video_with_gpu(video, resize_option) | |
| # Create and launch the Gradio interface | |
| print("π Launching CICE 2.0 Healthcare Assessment Tool...") | |
| with gr.Blocks(title="CICE 2.0 Healthcare Assessment Tool", theme=gr.themes.Soft()) as demo: | |
| gr.Markdown(""" | |
| # π₯ CICE 2.0 Healthcare Team Assessment Tool | |
| **Analyze healthcare team interactions using specific behavioral cues from the 18-point CICE 2.0 framework** | |
| This tool evaluates critical team behaviors including: | |
| - Values/ethics for interprofessional practice | |
| - Roles/responsibilities | |
| - Interprofessional communication | |
| - Teams and teamwork | |
| --- | |
| """) | |
| with gr.Row(): | |
| with gr.Column(scale=1): | |
| gr.Markdown("### πΉ Video Input") | |
| # Video resolution dropdown | |
| resize_dropdown = gr.Dropdown( | |
| choices=[ | |
| "Original (No Resize)", | |
| "640x480 (Fastest - Recommended for quick tests)", | |
| "800x600 (Fast - Good balance)", | |
| "1280x720 (HD - Best quality, slower)" | |
| ], | |
| value="800x600 (Fast - Good balance)", | |
| label="Video Resolution", | |
| info="Lower resolutions process faster and use less API quota" | |
| ) | |
| video_input = gr.Video( | |
| label="Upload or Record Video", | |
| sources=["upload", "webcam"], | |
| format="mp4", | |
| include_audio=True, | |
| interactive=True, | |
| webcam_constraints={"video": {"width": 800, "height": 600}}, | |
| autoplay=False, # Disable autoplay for faster loading | |
| show_download_button=True # Show download button immediately | |
| ) | |
| # Status bar for immediate feedback | |
| status_bar = gr.HTML( | |
| value="", | |
| visible=False, | |
| elem_id="status-bar" | |
| ) | |
| # Add download component for recorded videos | |
| recorded_video_download = gr.File( | |
| label="π₯ Download Recorded Video", | |
| interactive=False, | |
| visible=False | |
| ) | |
| gr.Markdown(""" | |
| ### π Instructions: | |
| 1. **Select video resolution** (lower = faster processing) | |
| 2. **Upload** a pre-recorded video or **Record** using your webcam | |
| 3. Video will be saved and downloadable immediately after recording stops | |
| 4. Click **Analyze Video** (on the right) to start the assessment | |
| 5. Wait for the AI to process (1-2 minutes) | |
| 6. Listen to the **1-minute audio summary** | |
| 7. Download the **PDF report** for documentation | |
| **Video Resolution Guide:** | |
| - **640x480**: Fastest processing, uses least quota | |
| - **800x600**: Recommended balance (default) | |
| - **1280x720**: Best quality, takes longer | |
| - **Original**: No resizing (slowest) | |
| **Key Behaviors Assessed:** | |
| - Allergy/medical history identification | |
| - CPR/AED prioritization | |
| - Clear role assignments | |
| - Plain language communication | |
| - Active listening behaviors | |
| - Team respect and conflict resolution | |
| """) | |
| with gr.Column(scale=2): | |
| gr.Markdown("### π Assessment Results") | |
| # Move analyze button here (to the right column) | |
| analyze_btn = gr.Button("π Analyze Video", variant="primary", size="lg") | |
| # Visual summary | |
| summary_output = gr.HTML( | |
| label="Visual Summary", | |
| value="<p style='text-align:center; color:#6b7280; padding:40px;'>Results will appear here after analysis...</p>" | |
| ) | |
| # Audio feedback - downloadable | |
| audio_output = gr.Audio( | |
| label="π 1-Minute Audio Summary (Downloadable)", | |
| type="filepath", | |
| interactive=False | |
| ) | |
| # PDF report - downloadable | |
| pdf_output = gr.File( | |
| label="π Download Full PDF Report", | |
| interactive=False, | |
| file_types=[".pdf", ".txt"] | |
| ) | |
| # Detailed assessment text | |
| assessment_output = gr.Textbox( | |
| label="Detailed CICE 2.0 Assessment (Text View)", | |
| lines=20, | |
| max_lines=30, | |
| interactive=False, | |
| placeholder="Detailed assessment will appear here..." | |
| ) | |
| # Footer | |
| gr.Markdown(""" | |
| --- | |
| ### About This Assessment | |
| This tool uses Google's Gemini AI to identify specific behavioral markers that indicate effective interprofessional collaboration | |
| in healthcare settings. The assessment focuses on observable actions such as: | |
| - Verbal role assignments ("I'll do compressions") | |
| - Recognition phrases ("Good catch on the allergy bracelet") | |
| - Plain language use instead of medical jargon | |
| - Pre-brief and post-brief team discussions | |
| **Output Files:** | |
| - π 1-minute audio summary (MP3 format) | |
| - π Complete PDF assessment report | |
| **Powered by Google Gemini 2.0 Flash | ZeroGPU on HuggingFace Spaces** | |
| """) | |
| # Auto-save video when recording stops with immediate status feedback | |
| video_input.stop_recording( | |
| fn=show_saving_status, | |
| inputs=[video_input], | |
| outputs=[status_bar, video_input], | |
| api_name="show_status" | |
| ).then( | |
| fn=save_recorded_video_with_status, | |
| inputs=[video_input], | |
| outputs=[recorded_video_download, status_bar], | |
| api_name="save_video" | |
| ).then( | |
| fn=lambda x: gr.update(visible=True if x else False), | |
| inputs=[recorded_video_download], | |
| outputs=[recorded_video_download] | |
| ).then( | |
| fn=lambda: time.sleep(3), | |
| inputs=[], | |
| outputs=[] | |
| ).then( | |
| fn=lambda: gr.update(value="", visible=False), | |
| inputs=[], | |
| outputs=[status_bar] | |
| ) | |
| # Connect the analyze button | |
| analyze_btn.click( | |
| fn=process_video, | |
| inputs=[video_input, resize_dropdown], | |
| outputs=[assessment_output, summary_output, audio_output, pdf_output], | |
| api_name="analyze" | |
| ) | |
| # Launch the app | |
| if __name__ == "__main__": | |
| demo.launch() |