Spaces:
Runtime error
Runtime error
thlinhares
commited on
Commit
·
c0b1550
1
Parent(s):
30849ac
teste
Browse files- app.py +23 -39
- app_old.py → app_streamlit.py +36 -19
app.py
CHANGED
@@ -1,12 +1,12 @@
|
|
1 |
import io
|
|
|
2 |
import requests
|
3 |
import torch
|
4 |
from PIL import Image
|
5 |
-
|
6 |
from transformers import AutoModelForCausalLM, AutoProcessor, GenerationConfig
|
7 |
|
8 |
|
9 |
-
# Função para baixar e processar a imagem
|
10 |
def download_image(url):
|
11 |
headers = {
|
12 |
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36'
|
@@ -16,7 +16,6 @@ def download_image(url):
|
|
16 |
return Image.open(io.BytesIO(resp.content)).convert("RGB")
|
17 |
|
18 |
|
19 |
-
# Função para gerar a descrição com base na imagem e no prompt
|
20 |
def generate(images, prompt, processor, model, device, dtype, generation_config):
|
21 |
inputs = processor(
|
22 |
images=images[:2], text=f" USER: <s>{prompt} ASSISTANT: <s>", return_tensors="pt"
|
@@ -26,49 +25,34 @@ def generate(images, prompt, processor, model, device, dtype, generation_config)
|
|
26 |
return response
|
27 |
|
28 |
|
29 |
-
# Função principal da interface Streamlit
|
30 |
def main():
|
31 |
-
#
|
32 |
device = "cuda:0" if torch.cuda.is_available() else "cpu"
|
33 |
dtype = torch.float16
|
34 |
|
35 |
-
#
|
36 |
processor = AutoProcessor.from_pretrained("StanfordAIMI/CheXagent-8b", trust_remote_code=True)
|
37 |
generation_config = GenerationConfig.from_pretrained("StanfordAIMI/CheXagent-8b")
|
38 |
model = AutoModelForCausalLM.from_pretrained(
|
39 |
"StanfordAIMI/CheXagent-8b", torch_dtype=dtype, trust_remote_code=True
|
40 |
).to(device)
|
41 |
|
42 |
-
#
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
|
60 |
-
|
61 |
-
# Passo 2: Gerar a resposta
|
62 |
-
st.write(f"Gerando a análise para: {prompt}...")
|
63 |
-
response = generate(images, prompt, processor, model, device, dtype, generation_config)
|
64 |
-
|
65 |
-
# Exibir a resposta gerada
|
66 |
-
st.subheader("Análise Gerada:")
|
67 |
-
st.write(response)
|
68 |
-
|
69 |
-
except Exception as e:
|
70 |
-
st.error(f"Ocorreu um erro: {e}")
|
71 |
-
|
72 |
-
|
73 |
-
if __name__ == "__main__":
|
74 |
-
main()
|
|
|
1 |
import io
|
2 |
+
|
3 |
import requests
|
4 |
import torch
|
5 |
from PIL import Image
|
6 |
+
from rich import print
|
7 |
from transformers import AutoModelForCausalLM, AutoProcessor, GenerationConfig
|
8 |
|
9 |
|
|
|
10 |
def download_image(url):
|
11 |
headers = {
|
12 |
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36'
|
|
|
16 |
return Image.open(io.BytesIO(resp.content)).convert("RGB")
|
17 |
|
18 |
|
|
|
19 |
def generate(images, prompt, processor, model, device, dtype, generation_config):
|
20 |
inputs = processor(
|
21 |
images=images[:2], text=f" USER: <s>{prompt} ASSISTANT: <s>", return_tensors="pt"
|
|
|
25 |
return response
|
26 |
|
27 |
|
|
|
28 |
def main():
|
29 |
+
# step 1: Setup constant
|
30 |
device = "cuda:0" if torch.cuda.is_available() else "cpu"
|
31 |
dtype = torch.float16
|
32 |
|
33 |
+
# step 2: Load Processor and Model
|
34 |
processor = AutoProcessor.from_pretrained("StanfordAIMI/CheXagent-8b", trust_remote_code=True)
|
35 |
generation_config = GenerationConfig.from_pretrained("StanfordAIMI/CheXagent-8b")
|
36 |
model = AutoModelForCausalLM.from_pretrained(
|
37 |
"StanfordAIMI/CheXagent-8b", torch_dtype=dtype, trust_remote_code=True
|
38 |
).to(device)
|
39 |
|
40 |
+
# step 3: Fetch the images
|
41 |
+
image_path = "https://upload.wikimedia.org/wikipedia/commons/3/3b/Pleural_effusion-Metastatic_breast_carcinoma_Case_166_%285477628658%29.jpg"
|
42 |
+
images = [download_image(image_path)]
|
43 |
+
|
44 |
+
# step 4: Generate the Findings section
|
45 |
+
for anatomy in anatomies:
|
46 |
+
prompt = f'Describe "{anatomy}"'
|
47 |
+
response = generate(images, prompt, processor, model, device, dtype, generation_config)
|
48 |
+
print(f"Generating the Findings for [{anatomy}]:")
|
49 |
+
print(response)
|
50 |
+
|
51 |
+
|
52 |
+
if __name__ == '__main__':
|
53 |
+
print(f"Start the Findings")
|
54 |
+
anatomies = [
|
55 |
+
"Airway", "Breathing", "Cardiac", "Diaphragm",
|
56 |
+
"Everything else (e.g., mediastinal contours, bones, soft tissues, tubes, valves, and pacemakers)"
|
57 |
+
]
|
58 |
+
main()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
app_old.py → app_streamlit.py
RENAMED
@@ -1,12 +1,12 @@
|
|
1 |
import io
|
2 |
-
|
3 |
import requests
|
4 |
import torch
|
5 |
from PIL import Image
|
6 |
-
|
7 |
from transformers import AutoModelForCausalLM, AutoProcessor, GenerationConfig
|
8 |
|
9 |
|
|
|
10 |
def download_image(url):
|
11 |
headers = {
|
12 |
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36'
|
@@ -16,6 +16,7 @@ def download_image(url):
|
|
16 |
return Image.open(io.BytesIO(resp.content)).convert("RGB")
|
17 |
|
18 |
|
|
|
19 |
def generate(images, prompt, processor, model, device, dtype, generation_config):
|
20 |
inputs = processor(
|
21 |
images=images[:2], text=f" USER: <s>{prompt} ASSISTANT: <s>", return_tensors="pt"
|
@@ -25,33 +26,49 @@ def generate(images, prompt, processor, model, device, dtype, generation_config)
|
|
25 |
return response
|
26 |
|
27 |
|
|
|
28 |
def main():
|
29 |
-
#
|
30 |
device = "cuda:0" if torch.cuda.is_available() else "cpu"
|
31 |
dtype = torch.float16
|
32 |
|
33 |
-
#
|
34 |
processor = AutoProcessor.from_pretrained("StanfordAIMI/CheXagent-8b", trust_remote_code=True)
|
35 |
generation_config = GenerationConfig.from_pretrained("StanfordAIMI/CheXagent-8b")
|
36 |
model = AutoModelForCausalLM.from_pretrained(
|
37 |
"StanfordAIMI/CheXagent-8b", torch_dtype=dtype, trust_remote_code=True
|
38 |
).to(device)
|
39 |
|
40 |
-
#
|
41 |
-
|
42 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
43 |
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
|
|
|
|
|
|
|
|
50 |
|
51 |
|
52 |
-
if __name__ ==
|
53 |
-
|
54 |
-
"Airway", "Breathing", "Cardiac", "Diaphragm",
|
55 |
-
"Everything else (e.g., mediastinal contours, bones, soft tissues, tubes, valves, and pacemakers)"
|
56 |
-
]
|
57 |
-
main()
|
|
|
1 |
import io
|
|
|
2 |
import requests
|
3 |
import torch
|
4 |
from PIL import Image
|
5 |
+
import streamlit as st
|
6 |
from transformers import AutoModelForCausalLM, AutoProcessor, GenerationConfig
|
7 |
|
8 |
|
9 |
+
# Função para baixar e processar a imagem
|
10 |
def download_image(url):
|
11 |
headers = {
|
12 |
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36'
|
|
|
16 |
return Image.open(io.BytesIO(resp.content)).convert("RGB")
|
17 |
|
18 |
|
19 |
+
# Função para gerar a descrição com base na imagem e no prompt
|
20 |
def generate(images, prompt, processor, model, device, dtype, generation_config):
|
21 |
inputs = processor(
|
22 |
images=images[:2], text=f" USER: <s>{prompt} ASSISTANT: <s>", return_tensors="pt"
|
|
|
26 |
return response
|
27 |
|
28 |
|
29 |
+
# Função principal da interface Streamlit
|
30 |
def main():
|
31 |
+
# Definir o dispositivo de execução (GPU ou CPU)
|
32 |
device = "cuda:0" if torch.cuda.is_available() else "cpu"
|
33 |
dtype = torch.float16
|
34 |
|
35 |
+
# Carregar o modelo e o processador
|
36 |
processor = AutoProcessor.from_pretrained("StanfordAIMI/CheXagent-8b", trust_remote_code=True)
|
37 |
generation_config = GenerationConfig.from_pretrained("StanfordAIMI/CheXagent-8b")
|
38 |
model = AutoModelForCausalLM.from_pretrained(
|
39 |
"StanfordAIMI/CheXagent-8b", torch_dtype=dtype, trust_remote_code=True
|
40 |
).to(device)
|
41 |
|
42 |
+
# Título da aplicação Streamlit
|
43 |
+
st.title("Análise de Imagem Médica com CheXagent")
|
44 |
+
|
45 |
+
# Entrada para o URL da imagem
|
46 |
+
image_url = st.text_input("Insira o URL da imagem:", "https://upload.wikimedia.org/wikipedia/commons/3/3b/Pleural_effusion-Metastatic_breast_carcinoma_Case_166_%285477628658%29.jpg")
|
47 |
+
|
48 |
+
# Entrada para o prompt
|
49 |
+
prompt = st.text_input("Insira o prompt para a análise:", "Descreva o \"Airway\"")
|
50 |
+
|
51 |
+
# Botão para gerar a resposta
|
52 |
+
if st.button("Gerar Análise"):
|
53 |
+
try:
|
54 |
+
# Passo 1: Baixar a imagem
|
55 |
+
st.write("Baixando a imagem...")
|
56 |
+
images = [download_image(image_url)]
|
57 |
+
|
58 |
+
# Exibir a imagem na interface
|
59 |
+
st.image(images[0], caption="Imagem fornecida", use_column_width=True)
|
60 |
|
61 |
+
# Passo 2: Gerar a resposta
|
62 |
+
st.write(f"Gerando a análise para: {prompt}...")
|
63 |
+
response = generate(images, prompt, processor, model, device, dtype, generation_config)
|
64 |
+
|
65 |
+
# Exibir a resposta gerada
|
66 |
+
st.subheader("Análise Gerada:")
|
67 |
+
st.write(response)
|
68 |
+
|
69 |
+
except Exception as e:
|
70 |
+
st.error(f"Ocorreu um erro: {e}")
|
71 |
|
72 |
|
73 |
+
if __name__ == "__main__":
|
74 |
+
main()
|
|
|
|
|
|
|
|