Update app.py
Browse files
app.py
CHANGED
@@ -1,13 +1,13 @@
|
|
1 |
import gradio as gr
|
2 |
-
from transformers import pipeline
|
3 |
-
from PIL import Image
|
4 |
import cv2
|
5 |
import numpy as np
|
6 |
import requests
|
7 |
import g4f
|
8 |
import time
|
9 |
import os
|
10 |
-
import
|
|
|
|
|
11 |
|
12 |
theme = gr.themes.Base(
|
13 |
primary_hue="cyan",
|
@@ -16,35 +16,26 @@ theme = gr.themes.Base(
|
|
16 |
)
|
17 |
|
18 |
API_KEY = os.getenv("API_KEY")
|
|
|
|
|
19 |
|
20 |
-
|
21 |
BREAST_CANCER_API_URL = "https://api-inference.huggingface.co/models/MUmairAB/Breast_Cancer_Detector"
|
22 |
# ALZHEIMER_API_URL = "https://api-inference.huggingface.co/models/dewifaj/alzheimer_mri_classification"
|
23 |
headers = {"Authorization": "Bearer "+API_KEY+"", 'Content-Type': 'application/json'}
|
24 |
alzheimer_classifier = pipeline("image-classification", model="dewifaj/alzheimer_mri_classification")
|
25 |
# breast_cancer_classifier = pipeline("image-classification", model="MUmairAB/Breast_Cancer_Detector")
|
26 |
-
brain_tumor_classifier = pipeline("image-classification", model="Devarshi/Brain_Tumor_Classification")
|
|
|
27 |
|
28 |
# Create a function to Detect/Classify Alzheimer
|
29 |
def classify_alzheimer(image):
|
30 |
-
# image_data = np.array(image, dtype=np.uint8)
|
31 |
-
# _, buffer = cv2.imencode('.jpg', image_data)
|
32 |
-
# binary_data = buffer.tobytes()
|
33 |
-
|
34 |
-
# response = requests.post(ALZHEIMER_API_URL, headers=headers, data=binary_data)
|
35 |
-
# result = {}
|
36 |
-
# print(response.json())
|
37 |
-
# for ele in response.json():
|
38 |
-
# label, score = ele.values()
|
39 |
-
# result[label] = score
|
40 |
-
|
41 |
-
# return result
|
42 |
-
image_pil = Image.open(image)
|
43 |
result = alzheimer_classifier(image)
|
44 |
-
prediction =
|
45 |
-
|
46 |
-
|
47 |
-
|
|
|
48 |
|
49 |
|
50 |
# Create a function to Detect/Classify Breast_Cancer
|
@@ -54,26 +45,27 @@ def classify_breast_cancer(image):
|
|
54 |
binary_data = buffer.tobytes()
|
55 |
|
56 |
response = requests.post(BREAST_CANCER_API_URL, headers=headers, data=binary_data)
|
57 |
-
|
58 |
-
print(response.json())
|
59 |
for ele in response.json():
|
60 |
label, score = ele.values()
|
61 |
-
|
62 |
-
|
63 |
-
return
|
64 |
|
65 |
|
66 |
# Create a function to Detect/Classify Brain_Tumor
|
67 |
def classify_brain_tumor(image):
|
68 |
-
|
69 |
-
|
70 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
71 |
|
72 |
-
|
73 |
-
prediction = result[0]
|
74 |
-
score = prediction['score']
|
75 |
-
label = prediction['label']
|
76 |
-
return {"score": score, "label": label}
|
77 |
|
78 |
|
79 |
# Create the Gradio interface
|
@@ -94,14 +86,14 @@ with gr.Blocks(theme=theme) as Alzheimer:
|
|
94 |
button.click(classify_alzheimer, [image], [output])
|
95 |
|
96 |
def respond(message, history):
|
97 |
-
bot_message = g4f.ChatCompletion.create(
|
98 |
-
|
99 |
-
|
100 |
-
|
101 |
-
|
102 |
-
)
|
103 |
-
|
104 |
-
return str(bot_message)
|
105 |
|
106 |
|
107 |
with gr.Column():
|
@@ -115,7 +107,7 @@ with gr.Blocks(theme=theme) as BreastCancer:
|
|
115 |
with gr.Column():
|
116 |
gr.Markdown("# Breast Cancer Detection and Classification")
|
117 |
gr.Markdown("> Classify the breast cancer.")
|
118 |
-
image = gr.Image(
|
119 |
output = gr.Label(label='Breast Cancer Classification', container=True, scale=2)
|
120 |
with gr.Row():
|
121 |
button = gr.Button(value="Submit", variant="primary")
|
@@ -129,14 +121,14 @@ with gr.Blocks(theme=theme) as BreastCancer:
|
|
129 |
button.click(classify_breast_cancer, [image], [output])
|
130 |
|
131 |
def respond(message, history):
|
132 |
-
bot_message = g4f.ChatCompletion.create(
|
133 |
-
|
134 |
-
|
135 |
-
|
136 |
-
|
137 |
-
)
|
138 |
-
|
139 |
-
yield str(bot_message)
|
140 |
|
141 |
with gr.Column():
|
142 |
gr.Markdown("# Health Bot for Breast Cancer")
|
@@ -149,7 +141,7 @@ with gr.Blocks(theme=theme) as BrainTumor:
|
|
149 |
with gr.Column():
|
150 |
gr.Markdown("# Brain Tumor Detection and Classification")
|
151 |
gr.Markdown("> Classify the Brain Tumor.")
|
152 |
-
image = gr.Image(
|
153 |
output = gr.Label(label='Brain_Tumor Classification', container=True, scale=2)
|
154 |
with gr.Row():
|
155 |
button = gr.Button(value="Submit", variant="primary")
|
@@ -163,14 +155,14 @@ with gr.Blocks(theme=theme) as BrainTumor:
|
|
163 |
button.click(classify_brain_tumor, [image], [output])
|
164 |
|
165 |
def respond(message, history):
|
166 |
-
bot_message = g4f.ChatCompletion.create(
|
167 |
-
|
168 |
-
|
169 |
-
|
170 |
-
|
171 |
-
)
|
172 |
-
|
173 |
-
return str(bot_message)
|
174 |
|
175 |
with gr.Column():
|
176 |
gr.Markdown("# Health Bot for Brain Tumor")
|
|
|
1 |
import gradio as gr
|
|
|
|
|
2 |
import cv2
|
3 |
import numpy as np
|
4 |
import requests
|
5 |
import g4f
|
6 |
import time
|
7 |
import os
|
8 |
+
from transformers import pipeline
|
9 |
+
from PIL import Image
|
10 |
+
import google.generativeai as genai
|
11 |
|
12 |
theme = gr.themes.Base(
|
13 |
primary_hue="cyan",
|
|
|
16 |
)
|
17 |
|
18 |
API_KEY = os.getenv("API_KEY")
|
19 |
+
genai.configure(api_key = os.environ['GOOGLE_API_KEY'])
|
20 |
+
txt_model = genai.GenerativeModel('gemini-pro')
|
21 |
|
22 |
+
BRAIN_TUMOR_API_URL = "https://api-inference.huggingface.co/models/Devarshi/Brain_Tumor_Classification"
|
23 |
BREAST_CANCER_API_URL = "https://api-inference.huggingface.co/models/MUmairAB/Breast_Cancer_Detector"
|
24 |
# ALZHEIMER_API_URL = "https://api-inference.huggingface.co/models/dewifaj/alzheimer_mri_classification"
|
25 |
headers = {"Authorization": "Bearer "+API_KEY+"", 'Content-Type': 'application/json'}
|
26 |
alzheimer_classifier = pipeline("image-classification", model="dewifaj/alzheimer_mri_classification")
|
27 |
# breast_cancer_classifier = pipeline("image-classification", model="MUmairAB/Breast_Cancer_Detector")
|
28 |
+
# brain_tumor_classifier = pipeline("image-classification", model="Devarshi/Brain_Tumor_Classification")
|
29 |
+
|
30 |
|
31 |
# Create a function to Detect/Classify Alzheimer
|
32 |
def classify_alzheimer(image):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
33 |
result = alzheimer_classifier(image)
|
34 |
+
prediction = {}
|
35 |
+
for ele in result:
|
36 |
+
score, label = ele.values()
|
37 |
+
prediction[label] = score
|
38 |
+
return prediction
|
39 |
|
40 |
|
41 |
# Create a function to Detect/Classify Breast_Cancer
|
|
|
45 |
binary_data = buffer.tobytes()
|
46 |
|
47 |
response = requests.post(BREAST_CANCER_API_URL, headers=headers, data=binary_data)
|
48 |
+
prediction = {}
|
|
|
49 |
for ele in response.json():
|
50 |
label, score = ele.values()
|
51 |
+
prediction[label] = score
|
52 |
+
|
53 |
+
return prediction
|
54 |
|
55 |
|
56 |
# Create a function to Detect/Classify Brain_Tumor
|
57 |
def classify_brain_tumor(image):
|
58 |
+
image_data = np.array(image, dtype=np.uint8)
|
59 |
+
_, buffer = cv2.imencode('.jpg', image_data)
|
60 |
+
binary_data = buffer.tobytes()
|
61 |
+
|
62 |
+
response = requests.post(BRAIN_TUMOR_API_URL, headers=headers, data=binary_data)
|
63 |
+
prediction = {}
|
64 |
+
for ele in response.json():
|
65 |
+
label, score = ele.values()
|
66 |
+
prediction[label] = score
|
67 |
|
68 |
+
return prediction
|
|
|
|
|
|
|
|
|
69 |
|
70 |
|
71 |
# Create the Gradio interface
|
|
|
86 |
button.click(classify_alzheimer, [image], [output])
|
87 |
|
88 |
def respond(message, history):
|
89 |
+
# bot_message = g4f.ChatCompletion.create(
|
90 |
+
# model="gemini",
|
91 |
+
# provider=g4f.Provider.GeminiProChat,
|
92 |
+
# messages=[{"role": "user",
|
93 |
+
# "content": "Your role is Alzheimer Disease Expert. Now I will provide you with the user query. First check if the user query is related to Alzheimer or not. If it is not related to Alzheimer then simply avoid the query by saying this is not my expertise, whereas if related to Alzheimer reply it as usual. Here's the user Query:" + message}],
|
94 |
+
# )
|
95 |
+
bot_message = txt_model.generate_content("Your role is Alzheimer Disease Expert. Now I will provide you with the user query. First check if the user query is related to Alzheimer or not. If it is not related to Alzheimer then simply avoid the query by saying this is not my expertise, whereas if related to Alzheimer reply it as usual. Here's the user Query:" + message)
|
96 |
+
return str(bot_message.text)
|
97 |
|
98 |
|
99 |
with gr.Column():
|
|
|
107 |
with gr.Column():
|
108 |
gr.Markdown("# Breast Cancer Detection and Classification")
|
109 |
gr.Markdown("> Classify the breast cancer.")
|
110 |
+
image = gr.Image()
|
111 |
output = gr.Label(label='Breast Cancer Classification', container=True, scale=2)
|
112 |
with gr.Row():
|
113 |
button = gr.Button(value="Submit", variant="primary")
|
|
|
121 |
button.click(classify_breast_cancer, [image], [output])
|
122 |
|
123 |
def respond(message, history):
|
124 |
+
# bot_message = g4f.ChatCompletion.create(
|
125 |
+
# model="gpt-4-32k-0613",
|
126 |
+
# provider=g4f.Provider.GeekGpt,
|
127 |
+
# messages=[{"role": "user",
|
128 |
+
# "content": "Your role is Breast_Cancer Disease Expert. Now I will provide you with the user query. First check if the user query is related to Breast_Cancer or not. If it is not related to Breast_Cancer then simply avoid the query by saying this is not my expertise, whereas if related to Breast_Cancer reply it as usual. Here's the user Query:" + message}],
|
129 |
+
# )
|
130 |
+
bot_message = txt_model.generate_content("Your role is Breast_Cancer Disease Expert. Now I will provide you with the user query. First check if the user query is related to Breast_Cancer or not. If it is not related to Breast_Cancer then simply avoid the query by saying this is not my expertise, whereas if related to Breast_Cancer reply it as usual. Here's the user Query:" + message)
|
131 |
+
yield str(bot_message.text)
|
132 |
|
133 |
with gr.Column():
|
134 |
gr.Markdown("# Health Bot for Breast Cancer")
|
|
|
141 |
with gr.Column():
|
142 |
gr.Markdown("# Brain Tumor Detection and Classification")
|
143 |
gr.Markdown("> Classify the Brain Tumor.")
|
144 |
+
image = gr.Image()
|
145 |
output = gr.Label(label='Brain_Tumor Classification', container=True, scale=2)
|
146 |
with gr.Row():
|
147 |
button = gr.Button(value="Submit", variant="primary")
|
|
|
155 |
button.click(classify_brain_tumor, [image], [output])
|
156 |
|
157 |
def respond(message, history):
|
158 |
+
# bot_message = g4f.ChatCompletion.create(
|
159 |
+
# model="gpt-4-32k-0613",
|
160 |
+
# provider=g4f.Provider.GeekGpt,
|
161 |
+
# messages=[{"role": "user",
|
162 |
+
# "content": "Your role is Brain Tumor Disease Expert. Now I will provide you with the user query. First check if the user query is related to Brain Tumor or not. If it is not related to Brain Tumor then simply avoid the query by saying this is not my expertise, whereas if related to Brain Tumor reply it as usual. Here's the user Query:" + message}],
|
163 |
+
# )
|
164 |
+
bot_message = txt_model.generate_content("Your role is Brain Tumor Disease Expert. Now I will provide you with the user query. First check if the user query is related to Brain Tumor or not. If it is not related to Brain Tumor then simply avoid the query by saying this is not my expertise, whereas if related to Brain Tumor reply it as usual. Here's the user Query:" + message)
|
165 |
+
return str(bot_message.text)
|
166 |
|
167 |
with gr.Column():
|
168 |
gr.Markdown("# Health Bot for Brain Tumor")
|