Files changed (1) hide show
  1. backend.py +113 -660
backend.py CHANGED
@@ -1,683 +1,136 @@
1
- from ast import List
2
- from fastapi import FastAPI, UploadFile, File, Form, HTTPException,APIRouter, Request
3
- from fastapi.middleware.cors import CORSMiddleware
4
- from pydantic import BaseModel
5
- from typing import Optional
6
- import pytesseract
7
- from PIL import Image
8
- import io
9
- import fitz
10
  import base64
11
- import traceback
12
- import pandas as pd
13
  import re
14
- import os
15
- import google.generativeai as genai
16
- from dotenv import load_dotenv
17
- from fastapi.responses import RedirectResponse
18
- from fastapi.staticfiles import StaticFiles
19
- import firebase_admin
20
- from firebase_admin import credentials, firestore
21
- from google.generativeai import generative_models
22
-
23
- from api_key import GEMINI_API_KEY
24
- from bert import analyze_with_clinicalBert, classify_disease_and_severity, extract_non_negated_keywords, analyze_measurements, detect_past_diseases
25
- from disease_links import diseases as disease_links
26
- from disease_steps import disease_next_steps
27
- from disease_support import disease_doctor_specialty, disease_home_care
28
- from past_reports import router as reports_router, db_fetch_reports
29
-
30
- model = genai.GenerativeModel('gemini-1.5-flash')
31
- df = pd.read_csv("measurement.csv")
32
- df.columns = df.columns.str.lower()
33
- df['measurement'] = df['measurement'].str.lower()
34
-
35
- app = FastAPI()
36
- api = APIRouter(prefix="/api")
37
- app.include_router(api)
38
-
39
- EXTRACTED_TEXT_CACHE = ""
40
-
41
- app.mount("/app", StaticFiles(directory="web", html=True), name="web")
42
- app.include_router(reports_router)
43
-
44
- app.add_middleware(
45
- CORSMiddleware,
46
- allow_origins=["*"],
47
- allow_credentials=True,
48
- allow_methods=["*"],
49
- allow_headers=["*"],
50
- )
51
 
52
- @app.get("/")
53
- def root():
54
- return RedirectResponse(url="/app/")
 
55
 
56
- try:
57
- gemini_api_key = os.environ.get("GEMINI_API_KEY", GEMINI_API_KEY)
58
- if not gemini_api_key:
59
- raise ValueError("No Gemini API key found in environment or api_key.py")
60
- genai.configure(api_key=gemini_api_key)
61
- except Exception as e:
62
- raise RuntimeError(f"Failed to configure Gemini API: {e}")
63
 
 
64
  try:
65
- cred_path = os.environ.get("FIREBASE_SERVICE_ACCOUNT_KEY_PATH", "firebase_key.json")
66
-
67
- if not os.path.exists(cred_path):
68
- raise ValueError(
69
- f"Firebase service account key not found. Looked for: {cred_path}. "
70
- "Set FIREBASE_SERVICE_ACCOUNT_KEY_PATH or place firebase_key.json in project root."
71
- )
72
-
73
- cred = credentials.Certificate(cred_path)
74
- firebase_admin.initialize_app(cred)
75
- db = firestore.client()
76
- except Exception as e:
77
- raise RuntimeError(f"Failed to configure Firebase: {e}")
78
-
79
-
80
- # --- Pydantic Models for API Endpoints ---
81
- class ChatRequest(BaseModel):
82
- user_id: Optional[str] = "anonymous"
83
- question: str
84
-
85
- class ChatResponse(BaseModel):
86
- answer: str
87
-
88
- class TextRequest(BaseModel):
89
- text: str
90
-
91
- # --- System Prompt for Gemini ---
92
- system_prompt_chat = """
93
- *** Role: Medical Guidance Facilitator
94
- *** Objective:
95
- Analyze medical data, provide concise, evidence-based insights, and recommend actionable next steps for patient care. This includes suggesting local physicians or specialists within a user-specified mile radius, prioritizing in-network options when insurance information is available, and maintaining strict safety compliance with appropriate disclaimers.
96
- *** Capabilities:
97
- 1. Report Analysis – Review and interpret findings in uploaded medical reports.
98
- 2. Historical Context – Compare current findings with any available previous reports.
99
- 3. Medical Q&A – Answer specific questions about the report using trusted medical sources.
100
- 4. Specialist Matching – Recommend relevant physician specialties for identified conditions.
101
- 5. Safety Protocols – Include a brief disclaimer encouraging users to verify information, confirm insurance coverage, and consult providers directly.
102
- *** Response Structure:
103
- Start with a direct answer to the user’s primary question (maximum 4 concise sentences, each on a new line).
104
- If a physician/specialist is needed, recommend at least two local providers within the requested radius (include name, specialty, address, distance, and contact info).
105
- If insurance details are available, indicate which physicians are in-network.
106
- End with a short safety disclaimer.
107
- ***Input Fields:
108
- Provided Document Text: {document_text}
109
- User Question: {user_question}
110
- Assistant Answer:
111
- """
112
-
113
- def extract_images_from_pdf_bytes(pdf_bytes: bytes) -> list:
114
- print("***Start of Code***")
115
- doc = fitz.open(stream=pdf_bytes, filetype="pdf")
116
- images = []
117
- for page in doc:
118
- pix = page.get_pixmap()
119
- buf = io.BytesIO()
120
- buf.write(pix.tobytes("png"))
121
- images.append(buf.getvalue())
122
- return images
123
-
124
- def clean_ocr_text(text: str) -> str:
125
- text = text.replace("\x0c", " ")
126
- text = text.replace("\u00a0", " ")
127
- text = re.sub(r'(\d)\s*\.\s*(\d)', r'\1.\2', text)
128
- text = re.sub(r'\s+', ' ', text)
129
- return text.strip()
130
-
131
-
132
- def ocr_text_from_image(image_bytes: bytes) -> str:
133
- base64_image = base64.b64encode(image_bytes).decode('utf-8')
134
 
135
- image_content = {
136
- 'mime_type': 'image/jpeg',
137
- 'data': base64_image
138
- }
139
-
140
- prompt = "Could you read this document and just take all the text that is in it and just paste it back to me in text format. Open and read this document:"
141
-
142
- response = model.generate_content(
143
- [prompt, image_content]
144
  )
145
 
146
- response_text = response.text
147
- print(response_text)
148
-
149
- return response_text
150
-
151
- def get_past_reports_from_sqllite(user_id: str):
152
- try:
153
- reports = db_fetch_reports(user_id=user_id, limit=10, offset=0)
154
-
155
- history_text = ""
156
- for report in reports:
157
- history_text += f"Report from {report.get('report_date', 'N/A')}:\n{report.get('ocr_text', 'No OCR text found')}\n\n"
158
- except Exception as e:
159
- history_text = "No past reports found for this user."
160
- return history_text
161
-
162
- @app.post("/analyze/")
163
- async def analyze(
164
- file: UploadFile = File(...),
165
- model: Optional[str] = Form("bert"),
166
- mode: Optional[str] = Form(None)
167
- ):
168
- global resolution, EXTRACTED_TEXT_CACHE
169
- if not file.filename:
170
- raise HTTPException(status_code=400, detail="No file uploaded.")
171
-
172
- filename = file.filename.lower()
173
- detected_diseases = set()
174
- ocr_full = ""
175
- if filename.endswith(".pdf"):
176
- pdf_bytes = await file.read()
177
- image_bytes_list = extract_images_from_pdf_bytes(pdf_bytes)
178
- else:
179
- content = await file.read()
180
- image_bytes_list = [content]
181
-
182
- for img_bytes in image_bytes_list:
183
- ocr_text = ocr_text_from_image(img_bytes)
184
- ocr_full += ocr_text + "\n\n"
185
- ocr_full = clean_ocr_text(ocr_full)
186
- print(f"CALLING OCR FULL: {ocr_full}")
187
-
188
- EXTRACTED_TEXT_CACHE = ocr_full
189
-
190
-
191
- if model.lower() == "gemini":
192
- return {"message": "Gemini model not available; please use BERT model."}
193
 
194
- found_diseases = extract_non_negated_keywords(ocr_full)
195
- print(f"CALLING FOUND DISEASES: {found_diseases}")
196
- past = detect_past_diseases(ocr_full)
197
- print(f"CALLING PAST DISEASES: {past}")
198
 
199
- for disease in found_diseases:
200
- if disease in past:
201
- severity = classify_disease_and_severity(disease)
202
- detected_diseases.add(((f"{disease}(detected as historical condition, but still under risk.)"), severity))
203
- print(f"DETECTED DISEASES(PAST): {detected_diseases}")
204
- else:
205
- severity = classify_disease_and_severity(disease)
206
- detected_diseases.add((disease, severity))
207
- print(f"DETECTED DISEASES: {detected_diseases}")
208
-
209
- print("OCR TEXT:", ocr_text)
210
- print("Detected diseases:", found_diseases)
211
- ranges = analyze_measurements(ocr_full, df)
212
 
 
 
 
 
 
 
 
 
 
 
213
 
214
- resolution = []
215
- detected_ranges = []
216
- for disease, severity in detected_diseases:
217
- link = disease_links.get(disease.lower(), "https://www.webmd.com/")
218
- next_steps = disease_next_steps.get(disease.lower(), ["Consult a doctor."])
219
- specialist = disease_doctor_specialty.get(disease.lower(), "General Practitioner")
220
- home_care = disease_home_care.get(disease.lower(), [])
221
 
222
- resolution.append({
223
- "findings": disease.upper(),
224
- "severity": severity,
225
- "recommendations": next_steps,
226
- "treatment_suggestions": f"Consult a specialist: {specialist}",
227
- "home_care_guidance": home_care,
228
- "info_link": link
229
-
230
- })
231
-
232
- for i in ranges:
233
- condition = i[0]
234
- measurement = i[1]
235
- unit = i[2]
236
- severity = i[3]
237
- value = i[4]
238
- range_value = i[5] # renamed to avoid overwriting Python's built-in "range"
239
-
240
- link_range = disease_links.get(condition.lower(), "https://www.webmd.com/")
241
- next_steps_range = disease_next_steps.get(condition.lower(), ['Consult a doctor'])
242
- specialist_range = disease_doctor_specialty.get(condition.lower(), "General Practitioner")
243
- home_care_range = disease_home_care.get(condition.lower(), [])
244
- print(f"HELLO!: {measurement}")
245
-
246
- condition_version = condition.upper()
247
- severity_version = severity.upper()
248
-
249
- resolution.append({
250
- "findings": f"{condition_version} -- {measurement}",
251
- "severity": f"{value} {unit} - {severity_version}",
252
- "recommendations": next_steps_range,
253
- "treatment_suggestions": f"Consult a specialist: {specialist_range}",
254
- "home_care_guidance": home_care_range,
255
- "info_link": link_range
256
- })
257
-
258
- print(ocr_full)
259
- ranges = analyze_measurements(ocr_full, df)
260
- print(analyze_measurements(ocr_full, df))
261
- # print ("Ranges is being printed", ranges)
262
- historical_med_data = detect_past_diseases(ocr_full)
263
- print("***End of Code***")
264
-
265
- return {
266
- "ocr_text": ocr_full.strip(),
267
- "Detected_Anomolies": resolution,
268
- }
269
 
270
- class TextRequest(BaseModel):
271
- text: str
 
 
 
272
 
273
- @app.post("/analyze-text")
274
- async def analyze_text_endpoint(request: TextRequest):
275
  try:
276
- return analyze_text(request.text)
277
  except Exception as e:
278
- print("ERROR in /analyze-text:", traceback.format_exc())
279
- raise HTTPException(status_code=500, detail=f"Error analyzing text: {str(e)}")
280
-
281
-
282
- def analyze_text(text):
283
- severity, disease = classify_disease_and_severity(text)
284
- return {
285
- "extracted_text": text,
286
- "summary": f"Detected Disease: {disease}, Severity: {severity}"
287
- }
288
-
289
- @app.post("/chat/", response_model=ChatResponse)
290
- async def chat_endpoint(request: ChatRequest):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
291
  """
292
- Chatbot endpoint that answers questions based on the last analyzed document and user history.
 
293
  """
294
- global EXTRACTED_TEXT_CACHE
295
- if not EXTRACTED_TEXT_CACHE:
296
- raise HTTPException(status_code=400, detail="Please provide a document context by analyzing text first.")
297
-
298
  try:
299
- reports_ref = db.collection('users').document(request.user_id).collection('reports')
300
- docs = reports_ref.order_by('timestamp', direction=firestore.Query.DESCENDING).limit(10).stream()
301
-
302
- history_text = ""
303
- for doc in docs:
304
- report_data = doc.to_dict()
305
- history_text += f"Report from {report_data.get('timestamp', 'N/A')}:\n{report_data.get('ocr_text', 'No OCR text found')}\n\n"
306
  except Exception as e:
307
- history_text = "No past reports found for this user."
308
-
309
- full_document_text = EXTRACTED_TEXT_CACHE + "\n\n" + "PAST REPORTS:\n" + history_text
310
-
311
- try:
312
- full_prompt = system_prompt_chat.format(
313
- document_text=full_document_text,
314
- user_question=request.question
315
- )
316
- response = model.generate_content(full_prompt)
317
- return ChatResponse(answer=response.text)
318
- except Exception as e:
319
- print(f"Gemini API error: {traceback.format_exc()}")
320
- raise HTTPException(status_code=500, detail=f"An error occurred during chat response generation: {e}")
321
-
322
- @app.get("/health/")
323
- def health():
324
- return {"response": "ok"}
325
-
326
- @app.on_event("startup")
327
- def _log_routes():
328
- from fastapi.routing import APIRoute
329
- print("Mounted routes:")
330
- for r in app.routes:
331
- if isinstance(r, APIRoute):
332
- print(" ", r.path, r.methods)
333
-
334
- # from ast import List
335
- # from fastapi import FastAPI, UploadFile, File, Form, HTTPException,APIRouter, Request
336
- # from fastapi.middleware.cors import CORSMiddleware
337
- # from pydantic import BaseModel
338
- # from typing import Optional
339
- # import pytesseract
340
- # from PIL import Image
341
- # import io
342
- # import fitz
343
- # import base64
344
- # import traceback
345
- # import pandas as pd
346
- # import re
347
- # import os
348
- # import google.generativeai as genai
349
- # from dotenv import load_dotenv
350
- # from fastapi.responses import RedirectResponse
351
- # from fastapi.staticfiles import StaticFiles
352
- # import firebase_admin
353
- # from firebase_admin import credentials, firestore
354
- # from google.generativeai import generative_models
355
-
356
- # from api_key import GEMINI_API_KEY
357
- # from bert import analyze_with_clinicalBert, classify_disease_and_severity, extract_non_negated_keywords, analyze_measurements, detect_past_diseases
358
- # from disease_links import diseases as disease_links
359
- # from disease_steps import disease_next_steps
360
- # from disease_support import disease_doctor_specialty, disease_home_care
361
- # from past_reports import router as reports_router, db_fetch_reports
362
-
363
- # model = genai.GenerativeModel('gemini-1.5-flash')
364
- # df = pd.read_csv("measurement.csv")
365
- # df.columns = df.columns.str.lower()
366
- # df['measurement'] = df['measurement'].str.lower()
367
-
368
- # disease_links = {"cholesterol": "https://www.webmd.com/cholesterol"}
369
- # disease_next_steps = {"cholesterol": ["Consult a doctor for a lipid panel."]}
370
- # disease_doctor_specialty = {"cholesterol": "Cardiologist"}
371
- # disease_home_care = {"cholesterol": ["Maintain a healthy diet."]}
372
-
373
- # app = FastAPI()
374
-
375
- # api = APIRouter(prefix="/api")
376
- # app.include_router(api)
377
-
378
-
379
- # '''app.add_middleware(
380
- # CORSMiddleware,
381
- # allow_origins=[
382
- # "http://localhost:8002"
383
- # "http://localhost:9000"
384
- # "http://localhost:5501"
385
- # ],
386
- # allow_credentials=True,
387
- # allow_methods=["*"],
388
- # allow_headers=["*"],
389
- # )'''
390
-
391
-
392
- # app.mount("/app", StaticFiles(directory="web", html=True), name="web")
393
- # app.include_router(reports_router)
394
-
395
- # app.add_middleware(
396
- # CORSMiddleware,
397
- # allow_origins=["*"],
398
- # allow_credentials=True,
399
- # allow_methods=["*"],
400
- # allow_headers=["*"],
401
- # )
402
-
403
- # @app.get("/")
404
- # def root():
405
- # return RedirectResponse(url="/app/")
406
-
407
- # EXTRACTED_TEXT_CACHE: str = ""
408
-
409
- # try:
410
- # gemini_api_key = os.environ.get("GEMINI_API_KEY", GEMINI_API_KEY)
411
- # if not gemini_api_key:
412
- # raise ValueError("No Gemini API key found in environment or api_key.py")
413
- # genai.configure(api_key=gemini_api_key)
414
- # except Exception as e:
415
- # raise RuntimeError(f"Failed to configure Gemini API: {e}")
416
-
417
- # try:
418
- # cred_path = os.environ.get("FIREBASE_SERVICE_ACCOUNT_KEY_PATH", "firebase_key.json")
419
-
420
- # if not os.path.exists(cred_path):
421
- # raise ValueError(
422
- # f"Firebase service account key not found. Looked for: {cred_path}. "
423
- # "Set FIREBASE_SERVICE_ACCOUNT_KEY_PATH or place firebase_key.json in project root."
424
- # )
425
-
426
- # cred = credentials.Certificate(cred_path)
427
- # firebase_admin.initialize_app(cred)
428
- # db = firestore.client()
429
- # except Exception as e:
430
- # raise RuntimeError(f"Failed to configure Firebase: {e}")
431
-
432
- # class ChatRequest(BaseModel):
433
- # user_id: Optional[str] = "anonymous"
434
- # question: str
435
-
436
- # class ChatResponse(BaseModel):
437
- # answer: str
438
-
439
- # system_prompt_chat = """
440
- # *** Role: Medical Guidance Facilitator
441
- # *** Objective:
442
- # Analyze medical data, provide concise, evidence-based insights, and recommend actionable next steps for patient care. This includes suggesting local physicians or specialists within a user-specified mile radius, prioritizing in-network options when insurance information is available, and maintaining strict safety compliance with appropriate disclaimers.
443
- # *** Capabilities:
444
- # 1. Report Analysis – Review and interpret findings in uploaded medical reports.
445
- # 2. Historical Context – Compare current findings with any available previous reports.
446
- # 3. Medical Q&A – Answer specific questions about the report using trusted medical sources.
447
- # 4. Specialist Matching – Recommend relevant physician specialties for identified conditions.
448
- # 5. Safety Protocols – Include a brief disclaimer encouraging users to verify information, confirm insurance coverage, and consult providers directly.
449
- # *** Response Structure:
450
- # Start with a direct answer to the user’s primary question (maximum 4 concise sentences, each on a new line).
451
- # If a physician/specialist is needed, recommend at least two local providers within the requested radius (include name, specialty, address, distance, and contact info).
452
- # If insurance details are available, indicate which physicians are in-network.
453
- # End with a short safety disclaimer.
454
- # ***Input Fields:
455
- # Provided Document Text: {document_text}
456
- # User Question: {user_question}
457
- # Assistant Answer:
458
- # """
459
-
460
- # def extract_images_from_pdf_bytes(pdf_bytes: bytes) -> list:
461
- # print("***Start of Code***")
462
- # doc = fitz.open(stream=pdf_bytes, filetype="pdf")
463
- # images = []
464
- # for page in doc:
465
- # pix = page.get_pixmap()
466
- # buf = io.BytesIO()
467
- # buf.write(pix.tobytes("png"))
468
- # images.append(buf.getvalue())
469
- # return images
470
-
471
-
472
- # def clean_ocr_text(text: str) -> str:
473
- # text = text.replace("\x0c", " ")
474
- # text = text.replace("\u00a0", " ")
475
- # text = re.sub(r'(\d)\s*\.\s*(\d)', r'\1.\2', text)
476
- # text = re.sub(r'\s+', ' ', text)
477
- # return text.strip()
478
-
479
- # def ocr_text_from_image(image_bytes: bytes) -> str:
480
- # base64_image = base64.b64encode(image_bytes).decode('utf-8')
481
-
482
- # image_content = {
483
- # 'mime_type': 'image/jpeg',
484
- # 'data': base64_image
485
- # }
486
-
487
- # prompt = "Could you read this document and just take all the text that is in it and just paste it back to me in text format. Open and read this document:"
488
-
489
- # response = model.generate_content(
490
- # [prompt, image_content]
491
- # )
492
-
493
- # response_text = response.text
494
- # print(response_text)
495
-
496
- # return response_text
497
- # def get_past_reports_from_firestore(user_id: str):
498
- # try:
499
- # reports_ref = db.collection('users').document(user_id).collection('reports')
500
- # docs = reports_ref.order_by('timestamp', direction=firestore.Query.DESCENDING).limit(10).stream()
501
-
502
- # history_text = ""
503
- # for doc in docs:
504
- # report_data = doc.to_dict()
505
- # history_text += f"Report from {report_data.get('timestamp', 'N/A')}:\n{report_data.get('ocr_text', 'No OCR text found')}\n\n"
506
- # except Exception as e:
507
- # history_text = "No past reports found for this user."
508
- # return history_text
509
-
510
- # def get_past_reports_from_sqllite(user_id: str):
511
- # try:
512
- # reports = db_fetch_reports(user_id=user_id, limit=10, offset=0)
513
-
514
- # history_text = ""
515
- # for report in reports:
516
- # history_text += f"Report from {report.get('report_date', 'N/A')}:\n{report.get('ocr_text', 'No OCR text found')}\n\n"
517
- # except Exception as e:
518
- # history_text = "No past reports found for this user."
519
- # return history_text
520
-
521
- # @app.post("/chat/", response_model=ChatResponse)
522
- # async def chat_endpoint(request: ChatRequest):
523
- # """
524
- # Chatbot endpoint that answers questions based on the last analyzed document and user history.
525
- # """
526
- # print("Received chat request for user:", request.user_id)
527
- # #history_text = get_past_reports_from_firestore(request.user_id)
528
- # history_text = get_past_reports_from_sqllite(request.user_id)
529
-
530
- # full_document_text = EXTRACTED_TEXT_CACHE + "\n\n" + "PAST REPORTS:\n" + history_text
531
-
532
- # if not full_document_text:
533
- # raise HTTPException(status_code=400, detail="No past reports or current data exists for this user")
534
-
535
-
536
 
537
-
538
- # try:
539
- # full_prompt = system_prompt_chat.format(
540
- # document_text=full_document_text,
541
- # user_question=request.question
542
- # )
543
- # response = model.generate_content(full_prompt)
544
- # return ChatResponse(answer=response.text)
545
- # except Exception as e:
546
- # print(f"Gemini API error: {traceback.format_exc()}")
547
- # raise HTTPException(status_code=500, detail=f"An error occurred during chat response generation: {e}")
548
-
549
- # @app.post("/analyze/")
550
- # async def analyze(
551
- # file: UploadFile = File(...),
552
- # model: Optional[str] = Form("bert"),
553
- # mode: Optional[str] = Form(None)
554
- # ):
555
- # global resolution, EXTRACTED_TEXT_CACHE
556
- # if not file.filename:
557
- # raise HTTPException(status_code=400, detail="No file uploaded.")
558
-
559
- # filename = file.filename.lower()
560
- # detected_diseases = set()
561
- # ocr_full = ""
562
- # print("Received request for file:", filename)
563
- # if filename.endswith(".pdf"):
564
- # pdf_bytes = await file.read()
565
- # image_bytes_list = extract_images_from_pdf_bytes(pdf_bytes)
566
- # else:
567
- # content = await file.read()
568
- # image_bytes_list = [content]
569
-
570
- # for img_bytes in image_bytes_list:
571
- # ocr_text = ocr_text_from_image(img_bytes)
572
- # ocr_full += ocr_text + "\n\n"
573
- # ocr_full = clean_ocr_text(ocr_full)
574
- # print(f"CALLING OCR FULL: {ocr_full}")
575
-
576
- # EXTRACTED_TEXT_CACHE = ocr_full
577
-
578
-
579
- # if model.lower() == "gemini":
580
- # return {"message": "Gemini model not available; please use BERT model."}
581
-
582
- # found_diseases = extract_non_negated_keywords(ocr_full)
583
- # past = detect_past_diseases(ocr_full)
584
-
585
- # for disease in found_diseases:
586
- # if disease in past:
587
- # severity = classify_disease_and_severity(disease)
588
- # detected_diseases.add(((f"{disease}(detected as historical condition, but still under risk.)"), severity))
589
- # else:
590
- # severity = classify_disease_and_severity(disease)
591
- # detected_diseases.add((disease, severity))
592
-
593
-
594
-
595
- # print("Detected diseases:", detected_diseases)
596
- # ranges = analyze_measurements(ocr_full, df)
597
-
598
-
599
- # resolution = []
600
- # detected_ranges = []
601
- # for disease, severity in detected_diseases:
602
- # link = disease_links.get(disease.lower(), "https://www.webmd.com/")
603
- # next_steps = disease_next_steps.get(disease.lower(), ["Consult a doctor."])
604
- # specialist = disease_doctor_specialty.get(disease.lower(), "General Practitioner")
605
- # home_care = disease_home_care.get(disease.lower(), [])
606
-
607
- # resolution.append({
608
- # "findings": disease.upper(),
609
- # "severity": severity,
610
- # "recommendations": next_steps,
611
- # "treatment_suggestions": f"Consult a specialist: {specialist}",
612
- # "home_care_guidance": home_care,
613
- # "info_link": link
614
-
615
- # })
616
-
617
- # for i in ranges:
618
- # condition = i[0]
619
- # measurement = i[1]
620
- # unit = i[2]
621
- # severity = i[3]
622
- # value = i[4]
623
- # range_value = i[5] # renamed to avoid overwriting Python's built-in "range"
624
-
625
- # link_range = disease_links.get(condition.lower(), "https://www.webmd.com/")
626
- # next_steps_range = disease_next_steps.get(condition.lower(), ['Consult a doctor'])
627
- # specialist_range = disease_doctor_specialty.get(condition.lower(), "General Practitioner")
628
- # home_care_range = disease_home_care.get(condition.lower(), [])
629
-
630
- # condition_version = condition.upper()
631
- # severity_version = severity.upper()
632
-
633
- # resolution.append({
634
- # "findings": f"{condition_version} -- {measurement}",
635
- # "severity": f"{value} {unit} - {severity_version}",
636
- # "recommendations": next_steps_range,
637
- # "treatment_suggestions": f"Consult a specialist: {specialist_range}",
638
- # "home_care_guidance": home_care_range,
639
- # "info_link": link_range
640
- # })
641
-
642
-
643
- # ranges = analyze_measurements(ocr_full, df)
644
- # print(analyze_measurements(ocr_full, df))
645
- # # print ("Ranges is being printed", ranges)
646
- # historical_med_data = detect_past_diseases(ocr_full)
647
-
648
- # return {
649
- # "ocr_text": ocr_full.strip(),
650
- # "Detected_Anomolies": resolution,
651
- # }
652
-
653
- # class TextRequest(BaseModel):
654
- # text: str
655
-
656
- # @app.post("/analyze-text")
657
- # async def analyze_text_endpoint(request: TextRequest):
658
- # try:
659
- # return analyze_text(request.text)
660
- # except Exception as e:
661
- # print("ERROR in /analyze-text:", traceback.format_exc())
662
- # raise HTTPException(status_code=500, detail=f"Error analyzing text: {str(e)}")
663
-
664
-
665
- # def analyze_text(text):
666
- # severity, disease = classify_disease_and_severity(text)
667
- # return {
668
- # "extracted_text": text,
669
- # "summary": f"Detected Disease: {disease}, Severity: {severity}"
670
- # }
671
-
672
-
673
- # @app.get("/health/")
674
- # def health():
675
- # return {"response": "ok"}
676
-
677
- # @app.on_event("startup")
678
- # def _log_routes():
679
- # from fastapi.routing import APIRoute
680
- # print("Mounted routes:")
681
- # for r in app.routes:
682
- # if isinstance(r, APIRoute):
683
- # print(" ", r.path, r.methods)
 
1
+ import os
 
 
 
 
 
 
 
 
2
  import base64
3
+ import json
 
4
  import re
5
+ import asyncio
6
+ import functools
7
+ from typing import Any, Optional
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8
 
9
+ import google.generativeai as genai
10
+ from fastapi import FastAPI, UploadFile, File, Form, HTTPException
11
+ from fastapi.responses import JSONResponse
12
+ from pydantic import BaseModel
13
 
14
+ class AnalyzeRequest(BaseModel):
15
+ image_base64: str
16
+ prompt: str | None = None
 
 
 
 
17
 
18
+ API_KEY = None
19
  try:
20
+ from api_key import GEMINI_API_KEY as API_KEY # <-- match the name in api_key.py
21
+ except ImportError:
22
+ API_KEY = os.getenv("GEMINI_API_KEY")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
23
 
24
+ if not API_KEY:
25
+ raise RuntimeError(
26
+ "No Google API key found. Put it in api_key.py as `GEMINI_API_KEY = '...'` or set env var GEMINI_API_KEY."
 
 
 
 
 
 
27
  )
28
 
29
+ genai.configure(api_key=API_KEY)
30
+
31
+ generation_config = {
32
+ "temperature": 0.2,
33
+ "top_p": 0.95,
34
+ "top_k": 40,
35
+ "max_output_tokens": 2048,
36
+ "response_mime_type": "application/json",
37
+ }
38
+
39
+ safety_settings = [
40
+ {"category": "HARM_CATEGORY_HARASSMENT", "threshold": "BLOCK_MEDIUM_AND_ABOVE"},
41
+ {"category": "HARM_CATEGORY_HATE_SPEECH", "threshold": "BLOCK_MEDIUM_AND_ABOVE"},
42
+ {"category": "HARM_CATEGORY_SEXUALLY_EXPLICIT", "threshold": "BLOCK_MEDIUM_AND_ABOVE"},
43
+ {"category": "HARM_CATEGORY_DANGEROUS_CONTENT", "threshold": "BLOCK_MEDIUM_AND_ABOVE"},
44
+ ]
45
+
46
+ system_prompt = """ As a highly skilled medical practitioner specializing in image analysis, you are tasked with examining medical images for a renowned hospital. Your expertise is crucial in identifying any anomalies, diseases, or health issues that may be present in the images. Your responsibilities include:
47
+ 1. Detailed Analysis: Thoroughly analyze each image, focusing on identifying any abnormal findings that may indicate underlying medical conditions.
48
+ 2. Finding Report: Document all observed anomalies or signs of disease. Clearly articulate these findings in a structured report format, ensuring accuracy and clarity.
49
+ 3. Recommendations and Next Steps: Provide detailed recommendations based on your findings. Outline the necessary follow-up actions or additional tests required to confirm diagnoses or assess treatment options.
50
+ 4. Treatment Suggestions: Offer preliminary treatment suggestions or interventions based on the identified conditions, collaborating with the healthcare team to develop comprehensive patient care plans.
51
+ 5. Output Format: Your output should be a JSON array (list) of objects, each describing one disease or medical finding using the structure below:
52
+ [{"findings": "Description of the first disease or condition.", "severity": "MILD/SEVERE/CRITICAL", "recommendations": ["Follow-up test 1", "Follow-up test 2"], "treatment_suggestions": ["Treatment 1", "Treatment 2"], "home_care_guidance": ["Care tip 1", "Care tip 2"] }, { "findings": "Description of the second disease or condition.", "severity": "MILD/SEVERE/CRITICAL", "recommendations": ["Follow-up test A", "Follow-up test B"], "treatment_suggestions": ["Treatment A", "Treatment B"], "home_care_guidance": ["Care tip A", "Care tip B"] } ]
53
+ Important Notes: 1. Scope of Response: Only respond if the image pertains to a human health issue. 2. Clarity of Image: Ensure the image is clear and suitable for accurate analysis. 3. Disclaimer: Accompany your analysis with the disclaimer: “Consult with a doctor before making any decisions.” 4. Your Insights are Invaluable: Your insights play a crucial role in guiding clinical decisions. Please proceed with your analysis, adhering to the structured approach outlined above. """
54
+
55
+ # Initialize model
56
+ model = genai.GenerativeModel(model_name="gemini-2.5-flash-lite")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
57
 
58
+ app = FastAPI()
 
 
 
59
 
 
 
 
 
 
 
 
 
 
 
 
 
 
60
 
61
+ async def _call_model_blocking(request_inputs, generation_cfg, safety_cfg):
62
+ """Run blocking model call in threadpool (so uvicorn's event loop isn't blocked)."""
63
+ fn = functools.partial(
64
+ model.generate_content,
65
+ request_inputs,
66
+ generation_config=generation_cfg,
67
+ safety_settings=safety_cfg,
68
+ )
69
+ loop = asyncio.get_event_loop()
70
+ return await loop.run_in_executor(None, fn)
71
 
 
 
 
 
 
 
 
72
 
73
+ async def analyze_image(image_bytes: bytes, mime_type: str, prompt: Optional[str] = None) -> Any:
74
+ base64_img = base64.b64encode(image_bytes).decode("utf-8")
75
+ text_prompt = (prompt or system_prompt).strip()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
76
 
77
+ # prepare request — two messages: image inline + text prompt
78
+ request_inputs = [
79
+ {"inline_data": {"mime_type": mime_type, "data": base64_img}},
80
+ {"text": text_prompt},
81
+ ]
82
 
 
 
83
  try:
84
+ response = await _call_model_blocking(request_inputs, generation_config, safety_settings)
85
  except Exception as e:
86
+ raise RuntimeError(f"Model call failed: {e}")
87
+
88
+ # Try to extract textual content robustly
89
+ text = getattr(response, "text", None)
90
+ if not text and isinstance(response, dict):
91
+ # older or alternative shapes
92
+ candidates = response.get("candidates") or []
93
+ if candidates:
94
+ text = candidates[0].get("content") or candidates[0].get("text")
95
+ if not text:
96
+ text = str(response)
97
+
98
+ # remove triple-backtick fences and stray code hints
99
+ clean = re.sub(r"```(?:json)?", "", text).strip()
100
+
101
+ # Try to parse JSON. If strict parse fails, try to extract first JSON-like block.
102
+ try:
103
+ parsed = json.loads(clean)
104
+ return parsed
105
+ except json.JSONDecodeError:
106
+ match = re.search(r"(\[.*\]|\{.*\})", clean, re.DOTALL)
107
+ if match:
108
+ try:
109
+ return json.loads(match.group(1))
110
+ except json.JSONDecodeError:
111
+ return {"raw_found_json": match.group(1)}
112
+ return {"raw_output": clean}
113
+
114
+
115
+ @app.post("/analyze")
116
+ async def analyze_endpoint(file: UploadFile = File(...), prompt: str = Form(None)):
117
  """
118
+ Upload an image file (field name `file`) and optional text `prompt`.
119
+ Returns parsed JSON (or raw model output if JSON couldn't be parsed).
120
  """
121
+ contents = await file.read() # <-- this gets the uploaded file bytes
122
+ mime = file.content_type or "image/png"
123
+ result = await analyze_image(contents, mime, prompt)
 
124
  try:
125
+ result = await analyze_image(contents, mime, prompt)
 
 
 
 
 
 
126
  except Exception as e:
127
+ raise HTTPException(status_code=500, detail=str(e))
128
+ return JSONResponse(content={"Detected_Anomolies": result})
129
+
130
+ @app.post("/analyze_json")
131
+ async def analyze_json(req: AnalyzeRequest):
132
+ import base64
133
+ image_bytes = base64.b64decode(req.image_base64)
134
+ result = await analyze_image(image_bytes, "image/png", req.prompt)
135
+ return {"result": result}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
136