DHEIVER Xian1057 commited on
Commit
97564d2
·
0 Parent(s):

Duplicate from Xian1057/histopathological_img_cls

Browse files

Co-authored-by: Kee ChiongHean <Xian1057@users.noreply.huggingface.co>

.gitattributes ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tflite filter=lfs diff=lfs merge=lfs -text
29
+ *.tgz filter=lfs diff=lfs merge=lfs -text
30
+ *.wasm filter=lfs diff=lfs merge=lfs -text
31
+ *.xz filter=lfs diff=lfs merge=lfs -text
32
+ *.zip filter=lfs diff=lfs merge=lfs -text
33
+ *.zst filter=lfs diff=lfs merge=lfs -text
34
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: Histopath Cls
3
+ emoji: 🐨
4
+ colorFrom: indigo
5
+ colorTo: pink
6
+ sdk: streamlit
7
+ sdk_version: 1.21.0
8
+ app_file: app.py
9
+ pinned: false
10
+ license: mit
11
+ duplicated_from: Xian1057/histopathological_img_cls
12
+ ---
13
+
14
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py ADDED
@@ -0,0 +1,116 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from fastai.vision.all import *
2
+ from io import BytesIO
3
+ from PIL import Image
4
+ from utils.cam import grad_cam
5
+ from utils.tfms import AlbTransform, get_augs
6
+ import requests
7
+ import streamlit as st
8
+
9
+
10
+ """
11
+ # Histopathologic Cancer Detection
12
+ ## Is this an cancerous cell?
13
+ this is a web app to predict whether a cell is cancerous or not.
14
+ """
15
+
16
+ # List of example image image paths
17
+ images = {
18
+ "Image 1": "example/c2a8ef295e49b8012e5bc83917a305057eaa1932.tif",
19
+ "Image 2": "example/87aed2dd32c7fe17c3af56878abd62eb5f37f925.tif",
20
+ "Image 3": "example/4ba0d1c62230781533c2df1d95e5b4c8f5b650a3.tif",
21
+ "Image 4": "example/a24ce148f6ffa7ef8eefb4efb12ebffe8dd700da.tif",
22
+ "Image 5": "example/d2dd0de8e583a5475a07c2f92fa3f06c7fabcd42.tif",
23
+ # add more images as needed
24
+ }
25
+
26
+
27
+ def predict(learn, img):
28
+ img = PILImage.create(img)
29
+ pred, key, probs = learn.predict(img)
30
+ grad_img = grad_cam(learn, img, pred)
31
+
32
+ col1, col2 = st.columns(2)
33
+ with col1:
34
+ st.image(img, caption='Original Image', use_column_width=True)
35
+
36
+ with col2:
37
+ st.image(grad_img, caption='Grad-CAM Image', use_column_width=True)
38
+
39
+ # st.write(learn_inf.predict(img))
40
+
41
+ f"""
42
+ ## This **{'is ' if pred == '1' else 'is not'}** an cancerous cell.
43
+ ### Rediction result: {pred}
44
+ ### Probability of {pred}: {probs[key].item()*100: .2f}%
45
+ """
46
+
47
+
48
+ path = "./"
49
+ learn_inf = load_learner(path + "resnet50.pkl")
50
+
51
+ option = st.radio("", ["Upload Image", "Image URL", "Download Example Image", "Example"])
52
+
53
+ if option == "Upload Image":
54
+ uploaded_file = st.file_uploader("Please upload an image.")
55
+
56
+ if uploaded_file is not None:
57
+ predict(learn_inf, uploaded_file)
58
+
59
+ elif option == "Image URL":
60
+ url = st.text_input("Please input a url.")
61
+
62
+ if url != "":
63
+ try:
64
+ response = requests.get(url)
65
+ pil_img = PILImage.create(BytesIO(response.content))
66
+ predict(learn_inf, pil_img)
67
+
68
+ except:
69
+ st.text("Problem reading image from", url)
70
+ elif option == "Download Example Image":
71
+ # Create columns for the images
72
+ cols = st.columns(len(images))
73
+
74
+
75
+ # Initialize selected_image in session state if it doesn't exist
76
+ if 'selected_image' not in st.session_state:
77
+ st.session_state['selected_image'] = None
78
+
79
+ for i, (img_name, img_path) in enumerate(images.items()):
80
+ # Open the image file
81
+ img = Image.open(img_path)
82
+
83
+ # Display the image in a column
84
+ cols[i].image(img, caption=img_name, use_column_width=True)
85
+
86
+ # Create a button for selecting the image
87
+ if cols[i].button(f"Select {img_name}", key=img_name):
88
+ st.session_state['selected_image'] = img_path
89
+
90
+ # If an image has been selected, show the download button
91
+ if st.session_state['selected_image'] is not None:
92
+ # Open the selected image file
93
+ img = Image.open(st.session_state['selected_image'])
94
+
95
+ # Convert the image to a byte array
96
+ img_byte_arr = io.BytesIO()
97
+ img.save(img_byte_arr, format='PNG')
98
+ img_byte_arr = img_byte_arr.getvalue()
99
+
100
+ # Create the download button for the selected image
101
+ st.download_button(
102
+ label="Download selected image",
103
+ data=img_byte_arr,
104
+ file_name="selected_image.png",
105
+ mime="image/png",
106
+ )
107
+ else:
108
+ option_1 = st.selectbox(
109
+ 'Example Prediction',
110
+ ('Positive Image', 'Negative Image'))
111
+
112
+ if option_1 == 'Positive Image':
113
+ predict(learn_inf, img='example/c2a8ef295e49b8012e5bc83917a305057eaa1932.tif')
114
+ elif option_1 == 'Negative Image':
115
+ predict(learn_inf, img='example/87aed2dd32c7fe17c3af56878abd62eb5f37f925.tif')
116
+
example/4ba0d1c62230781533c2df1d95e5b4c8f5b650a3.tif ADDED
example/87aed2dd32c7fe17c3af56878abd62eb5f37f925.tif ADDED
example/a24ce148f6ffa7ef8eefb4efb12ebffe8dd700da.tif ADDED
example/c2a8ef295e49b8012e5bc83917a305057eaa1932.tif ADDED
example/d2dd0de8e583a5475a07c2f92fa3f06c7fabcd42.tif ADDED
requirements.txt ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ fastbook
2
+ timm
3
+ albumentations
4
+ grad-cam
5
+ altair<5
resnet50.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2ba8099d08b3224eab0cf7e48c98a02fdee3cb3cf6bc9449568275322cc04986
3
+ size 98319417
utils/cam.py ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import cv2
3
+
4
+ from pytorch_grad_cam import GradCAM
5
+ from pytorch_grad_cam.utils.model_targets import ClassifierOutputTarget
6
+ from pytorch_grad_cam.utils.image import show_cam_on_image, preprocess_image
7
+ from PIL import Image
8
+
9
+ def grad_cam(model, img, cls):
10
+ model.eval()
11
+ img = np.array(img)
12
+ img = cv2.resize(img, (224, 224))
13
+ img = np.float32(img) / 255
14
+ input_tensor = preprocess_image(img, mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
15
+
16
+ # The target for the CAM is the Bear category.
17
+ # As usual for classication, the target is the logit output
18
+ # before softmax, for that category.
19
+ targets = [ClassifierOutputTarget(1)]
20
+ target_layers = [model.layer4[-1]]
21
+ with GradCAM(model=model, target_layers=target_layers) as cam:
22
+ grayscale_cams = cam(input_tensor=input_tensor, targets=targets)
23
+ cam_image = show_cam_on_image(img, grayscale_cams[0, :], use_rgb=True)
24
+ cam = np.uint8(255*grayscale_cams[0, :])
25
+ cam = cv2.merge([cam, cam, cam])
26
+ return Image.fromarray(cam_image)
utils/tfms.py ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from fastai.vision.all import *
2
+ import albumentations as Alb
3
+
4
+ class AlbTransform(Transform):
5
+ def __init__(self, aug): self.aug = aug
6
+ def encodes(self, img: PILImage):
7
+ aug_img = self.aug(image=np.array(img))['image']
8
+ return PILImage.create(aug_img)
9
+
10
+ def get_augs(): return Alb.Compose([
11
+ Alb.Transpose(),
12
+ Alb.Flip(),
13
+ Alb.RandomRotate90(),
14
+ Alb.HueSaturationValue(
15
+ hue_shift_limit=5,
16
+ sat_shift_limit=5,
17
+ val_shift_limit=5 ),
18
+ ])