import streamlit as st
import cv2
import numpy as np
import tempfile
import os
from langchain_community.document_loaders import UnstructuredImageLoader
from langchain.prompts import PromptTemplate
from langchain.chains import LLMChain
from langchain_huggingface import HuggingFaceEndpoint, ChatHuggingFace
# Set Hugging Face API key
os.environ["HUGGINGFACEHUB_API_KEY"] = os.getenv("hf")
st.set_page_config(page_title="MediAssist - Prescription Analyzer", layout="wide")
# Sidebar
st.sidebar.title("😷 Medical Chatbot")
st.sidebar.markdown("Analyze prescriptions with ease using AI")
st.sidebar.markdown("---")
# App Header
st.markdown("""
🧠 Medical Chatbot
Prescription Analyzer using AI
Upload a doctor's prescription image, and MediAssist will extract, translate, and explain it for you.
""", unsafe_allow_html=True)
# File uploader
uploaded_file = st.file_uploader("📤 Upload Prescription Image (JPG/PNG)", type=["jpg", "jpeg", "png"])
if uploaded_file:
with tempfile.NamedTemporaryFile(delete=False, suffix=".png") as temp_file:
temp_file.write(uploaded_file.read())
orig_path = temp_file.name
# Step 1: Read and preprocess image
image = cv2.imread(orig_path)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
_, binary_inv = cv2.threshold(gray, 128, 255, cv2.THRESH_BINARY_INV)
kernel = np.ones((3, 3), np.uint8)
dilated = cv2.dilate(binary_inv, kernel, iterations=1)
# Save processed image for OCR
dilated_path = orig_path.replace(".png", "_dilated.png")
cv2.imwrite(dilated_path, dilated)
# Load with LangChain
loader = UnstructuredImageLoader(dilated_path)
documents = loader.load()
extracted_text = "\n".join([doc.page_content for doc in documents])
# Prompt template
template = """
You are a helpful medical assistant.
Here is a prescription text extracted from an image:
{prescription_text}
Please do the following:
1. Extract only the medicine names mentioned in the prescription (ignore any other text).
2. For each medicine, provide:
- When to take it (timing and dosage)
- Possible side effects
- Any special instructions
Format your answer as bullet points, listing only medicines and their details.
"""
prompt = PromptTemplate(input_variables=["prescription_text"], template=template)
# Set up Hugging Face LLM
llm_model = HuggingFaceEndpoint(
repo_id="aaditya/Llama3-OpenBioLLM-70B",
provider="nebius",
temperature=0.6,
max_new_tokens=300,
task="conversational"
)
model = ChatHuggingFace(
llm=llm_model,
repo_id="aaditya/Llama3-OpenBioLLM-70B",
provider="nebius",
temperature=0.6,
max_new_tokens=300,
task="conversational"
)
chain = LLMChain(llm=model, prompt=prompt)
# Display image and extracted text
col1, col2 = st.columns([1, 2])
with col1:
st.image(dilated, caption="Preprocessed Prescription", channels="GRAY", use_container_width=True)
with col2:
st.success("✅ Prescription Uploaded & Preprocessed Successfully")
st.markdown("### 📜 Extracted Text")
st.code(extracted_text)
if st.button("🔍 Analyze Text"):
with st.spinner("Analyzing..."):
response = chain.run(prescription_text=extracted_text)
st.success(response)
# Cleanup temp files
os.remove(orig_path)
os.remove(dilated_path)
else:
st.markdown("Upload a prescription image to begin analysis.", unsafe_allow_html=True)