File size: 6,076 Bytes
6e4e485
 
 
 
 
 
 
 
 
 
ecf85ea
6e4e485
 
 
 
 
 
 
 
 
 
ecf85ea
6e4e485
 
 
 
ecf85ea
 
 
 
 
 
6e4e485
 
 
ecf85ea
6e4e485
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ecf85ea
6e4e485
ecf85ea
6e4e485
 
 
 
ecf85ea
6e4e485
 
 
 
 
 
 
 
ecf85ea
 
6e4e485
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ecf85ea
6e4e485
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ecf85ea
6e4e485
 
 
 
 
 
 
ecf85ea
6e4e485
 
 
 
ecf85ea
6e4e485
 
 
 
 
 
 
 
 
 
 
ecf85ea
6e4e485
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
import os
from dotenv import load_dotenv
from groq import Groq
import json
from datetime import datetime
import logging

# Set up logging
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')

# Load environment variables (only needed if using a local .env file)
load_dotenv()

class GorQClient:
    def __init__(self):
        self.current_api_index = 0
        self.client = Groq(api_key=self.get_api_key())
        logging.info("GorQClient initialized with API key: %s", self.get_api_key())

    def switch_api_key(self):
        """Switch to the next available API key."""
        self.current_api_index = (self.current_api_index + 1) % self.number_of_api_keys()
        logging.info("Switched to API key index: %d", self.current_api_index)
        self.client = Groq(api_key=self.get_api_key())

    def get_api_key(self):
        """Get the current API key from environment variables."""
        return os.getenv(f"GORQ_API_KEY_{self.current_api_index + 1}")

    def number_of_api_keys(self):
        """Get the number of available API keys."""
        return 4  # Change this if you add more keys

    def call_llama_model(self, user_message):
        """Call the LLaMA model and return the response."""
        max_retries = self.number_of_api_keys()
        retries = 0
        
        while retries < max_retries:
            try:
                prompt = f"""You are a medical AI assistant specializing in fracture analysis. 
                Provide a detailed, empathetic response including:
                1. A formal report header with current date and time
                2. Detailed analysis of detected fractures
                3. Confidence assessment
                4. Immediate recommendations with explanations (Why and How)
                5. Pain management guidelines
                6. Follow-up care instructions
                7. Important precautions and warnings

                Analyze the following fracture detection data and provide recommendations:
                {user_message}
                """

                logging.info("Calling LLaMA model with user message.")
                completion = self.client.chat.completions.create(
                    model="llama-3.2-11b-text-preview",
                    messages=[{"role": "user", "content": prompt}],
                    temperature=0.7,
                    max_tokens=3290,
                    top_p=1,
                    stream=True,
                    stop=None
                )

                response = ""
                for chunk in completion:
                    response += chunk.choices[0].delta.content or ""

                # Clean up the response to remove formatting artifacts
                response = self.clean_response(response)

                logging.info("Response received from LLaMA model.")
                return response

            except Exception as e:
                logging.error(f"API error: {e}")
                self.switch_api_key()
                retries += 1
                logging.info("Retrying with new API key.")
        
        return "All API keys have been exhausted. Please try again later."

    def clean_response(self, response):
        """Remove any unwanted characters or formatting artifacts."""
        response = response.replace("*", "").replace("•", "").replace("#", "").replace("`", "").strip()
        return response

def format_confidence_level(confidence):
    """Convert numerical confidence to descriptive text."""
    if confidence > 0.85:
        return "High"
    elif confidence > 0.65:
        return "Moderate"
    else:
        return "Low"

def generate_response_based_on_yolo(yolo_output):
    """Generate a detailed, personalized response based on YOLO model output."""
    gorq_client = GorQClient()

    if not yolo_output:
        return "Fracture Analysis Report: No fractures detected in the provided image. However, if you're experiencing pain or discomfort, please consult a healthcare professional for a thorough evaluation."

    # Process YOLO output to create detailed analysis
    current_time = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
    details = []
    
    for detection in yolo_output:
        x1, y1, x2, y2 = detection['coordinates']
        cls = detection['class']
        conf = detection['confidence']
        name = detection.get('name', 'unspecified fracture')
        
        details.append({
            "fracture_type": name,
            "location": cls,
            "confidence_level": format_confidence_level(conf),
            "numerical_confidence": f"{conf:.4f}",
            "position": {
                "x1": x1, "y1": y1,
                "x2": x2, "y2": y2
            },
            "severity_indicators": {
                "displacement": "Requires professional assessment",
                "surrounding_tissue": "Potential soft tissue involvement"
            }
        })

    # Construct detailed message for LLM
    user_message = json.dumps({
        "timestamp": current_time,
        "analysis_type": "Fracture Detection and Analysis",
        "detected_fractures": details,
        "request": "Provide a comprehensive analysis including immediate care recommendations, pain management strategies, and follow-up care instructions. Include specific details about each detected fracture and potential complications to watch for."
    }, indent=2)

    # Get detailed response from LLM
    response = gorq_client.call_llama_model(user_message)
    
    return response

# Test the functionality (This part should be executed in a separate test file or interactive environment)
if __name__ == "__main__":
    # Example YOLO output for testing
    example_yolo_output = [
        {
            "coordinates": [100, 150, 200, 300],
            "class": "fracture",
            "confidence": 0.87,
            "name": "radius fracture"
        }
    ]

    # Generate a response based on the example YOLO output
    response = generate_response_based_on_yolo(example_yolo_output)
    print(response)