Krithika Rangarajan*, Pranjal Aggarwal*, Dhruv Kumar Gupta, Rohan Dhanakshirur, Akhil Baby, Chandan Pal, Arun Kumar Gupta, Smriti Hari, Subhashis Banerjee, Chetan Arora,
" \ + + "Publication | Website | Github Repo
" \ + + "\ + Deep learning suffers from some problems similar to human radiologists, such as poor sensitivity to detection of isodense, obscure masses or cancers in dense breasts. Traditional radiology teaching can be incorporated into the deep learning approach to tackle these problems in the network. Our method suggests collaborative network design, and incorporates core radiology principles resulting in SOTA results. You can use this demo to run inference by providing bilateral mammogram images. To get started, you can try one of the preset examples. \ +
" \ + + "[Note: Inference on CPU may take upto 2 minutes. On a GPU, inference time is approximately 1s.]
" + # gr.HTML(description) + gr.Markdown(description) + + # head_html = gr.HTML(''' + #+ # Give bilateral mammograms(both left and right sides), and let our model find the cancers! + #
+ + #+ # This is an official demo for our paper: + # `Deep Learning for Detection of iso-dense, obscure masses in mammographically dense breasts`. + # Check out the paper and code for details! + #
+ # ''') + + # gr.Markdown( + # """ + # [![report](https://img.shields.io/badge/arxiv-report-red)](https://arxiv.org/abs/) [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/) + # """) + + def generate_preds(img1, img2): + print(img1, img2) + print(img1, img2) + img_out1 = predict(img1, img2) + if img_out1.shape[1] < img_out1.shape[2]: + ratio = img_out1.shape[2] / 800 + else: + ratio = img_out1.shape[1] / 800 + img_out1 = cv2.resize(img_out1, (0,0), fx=1 / ratio, fy=1 / ratio) + img_out2 = predict(img2, img1, baseIsLeft = False) + if img_out2.shape[1] < img_out2.shape[2]: + ratio = img_out2.shape[2] / 800 + else: + ratio = img_out2.shape[1] / 800 + img_out2 = cv2.resize(img_out2, (0,0), fx= 1 / ratio, fy= 1 / ratio) + + cv2.imwrite('img_out1.jpg', img_out1) + cv2.imwrite('img_out2.jpg', img_out2) + + + return 'img_out1.jpg', 'img_out2.jpg' + + with gr.Column(): + with gr.Row(variant = 'panel'): + + with gr.Column(variant = 'panel'): + img1 = gr.Image(type="filepath", label="Left Image" ) + img2 = gr.Image(type="filepath", label="Right Image") + # with gr.Row(): + # sub_btn = gr.Button("Predict!", variant="primary") + + with gr.Column(variant = 'panel'): + # img_out1 = gr.inputs.Image(type="file", label="Output Left Image") + # img_out2 = gr.inputs.Image(type="file", label="Output for Right Image") + img_out1 = gr.Image(type="filepath", label="Output for Left Image", shape = None) + img_out1.style(height=250 * 2) + + with gr.Column(variant = 'panel'): + img_out2 = gr.Image(type="filepath", label="Output for Right Image", shape = None) + img_out2.style(height=250 * 2) + + with gr.Row(): + sub_btn = gr.Button("Predict!", variant="primary") + + gr.Examples([[f'sample_images/img{idx}_l.jpg', f'sample_images/img{idx}_r.jpg'] for idx in range(1,6)], inputs = [img1, img2]) + + sub_btn.click(fn = lambda x,y: generate_preds(x,y), inputs = [img1, img2], outputs = [img_out1, img_out2]) + + # sub_btn.click(fn = lambda x: gr.update(visible = True), inputs = [sub_btn], outputs = [img_out1, img_out2]) + + # gr.Examples( + + # ) + + + # interface.render() + # Object Detection Interface + +# def generate_predictions(img1, img2): +# return img1 + +# interface = gr.Interface( +# fn=generate_predictions, +# inputs=[gr.inputs.Image(type="pil", label="Left Image"), gr.inputs.Image(type="pil", label="Right Image")], +# outputs=[gr.outputs.Image(type="pil", label="Output Image")], +# title="Object Detection", +# description="This model is trained on DenseMammogram dataset. It can detect objects in images. Try it out!", +# allow_flagging = False +# ).launch(share = True, show_api=False) + + +if __name__ == '__main__': + demo.launch(share = True, show_api=False) diff --git a/img_out1.jpg b/img_out1.jpg new file mode 100644 index 0000000000000000000000000000000000000000..3e4dae7f8d9cd887e4d7625fa87047cdd8b99153 --- /dev/null +++ b/img_out1.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:278c18719edfb89968f1c3b8018d89565a985959a4ed23753db4ae8347381826 +size 375987 diff --git a/img_out2.jpg b/img_out2.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6b36befd838e45c6e9646026f696c9a0462d9725 --- /dev/null +++ b/img_out2.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4f2da3a653a186a4056c956372a4f3caa5936bf2c85c05ecb8b29e84eb637e4f +size 299560 diff --git a/model.py b/model.py new file mode 100644 index 0000000000000000000000000000000000000000..e946213ba68811f6ae4c3a07c17ed8f074b2d97e --- /dev/null +++ b/model.py @@ -0,0 +1,57 @@ +import sys +sys.path.append('DenseMammogram') + +import torch + +from models import get_FRCNN_model, Bilateral_model + +device = torch.device("cuda" if torch.cuda.is_available() else "cpu") +frcnn_model = get_FRCNN_model().to(device) +bilat_model = Bilateral_model(frcnn_model).to(device) + +FRCNN_PATH = 'pretrained_models/frcnn/frcnn_models/frcnn_model.pth' +BILAR_PATH = 'pretrained_models/BILATERAL/bilateral_models/bilateral_model.pth' + +frcnn_model.load_state_dict(torch.load(FRCNN_PATH, map_location=device)) +bilat_model.load_state_dict(torch.load(BILAR_PATH, map_location=device)) + +import os +import torchvision.transforms as T +import cv2 +from tqdm import tqdm +import detection.transforms as transforms +from dataloaders import get_direction + +def predict(left_file, right_file, threshold = 0.80, baseIsLeft = True): + model = bilat_model + with torch.no_grad(): + transform = T.Compose([T.ToPILImage(),T.ToTensor()]) + model.eval() + # First is left, then right + img1 = cv2.imread(left_file) + img1 = transform(img1) + img2 = cv2.imread(right_file) + img2 = transform(img2) + + if baseIsLeft: + img1,_ = transforms.RandomHorizontalFlip(1.0)(img1) + else: + img2,_ = transforms.RandomHorizontalFlip(1.0)(img2) + + + images = [img1.to(device),img2.to(device)] + output = model([images])[0] + if baseIsLeft: + img1,output = transforms.RandomHorizontalFlip(1.0)(img1,output) + + image = cv2.imread(left_file) + for b,s,l in zip(output['boxes'], output['scores'], output['labels']): + # Convert img1 tensor to numpy array + if l == 1 and s > threshold: + # Draw the bounding boxes + b = b.detach().cpu().numpy().astype(int) + # return image, b + cv2.rectangle(image, (b[0], b[1]), (b[2], b[3]), (0, 255, 0), 2) + # Print the % probability just above the box + cv2.putText(image, 'Cancer: '+str(round(round(s.item(), 2) * 100, 1)) + '%', (b[0], b[1] - 40), cv2.FONT_HERSHEY_SIMPLEX, 3.6, (36,255,12), 6) + return image \ No newline at end of file diff --git a/pretrained_models/AIIMS_C1/frcnn_models/frcnn_model.pth b/pretrained_models/AIIMS_C1/frcnn_models/frcnn_model.pth new file mode 100644 index 0000000000000000000000000000000000000000..6b35c79f9337bffc13589b6e461338ccdc5bc1f8 --- /dev/null +++ b/pretrained_models/AIIMS_C1/frcnn_models/frcnn_model.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e4253bd5cda58b57e1ed38cbaadd7fa7698cbc47bcd4c795f27cf0a63a7da669 +size 165725683 diff --git a/pretrained_models/AIIMS_C2/frcnn_models/frcnn_model.pth b/pretrained_models/AIIMS_C2/frcnn_models/frcnn_model.pth new file mode 100644 index 0000000000000000000000000000000000000000..82d7dec576c35f73e2d9cf389d0dee160970cdf3 --- /dev/null +++ b/pretrained_models/AIIMS_C2/frcnn_models/frcnn_model.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:07ca463a86317a4db3f3ed24358ddf292701ea2a0daf67b966ac325e7d0bebae +size 165725683 diff --git a/pretrained_models/AIIMS_C3/frcnn_models/frcnn_model.pth b/pretrained_models/AIIMS_C3/frcnn_models/frcnn_model.pth new file mode 100644 index 0000000000000000000000000000000000000000..d71054e79c26d8657bf254f4249a904ff632e9d3 --- /dev/null +++ b/pretrained_models/AIIMS_C3/frcnn_models/frcnn_model.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:51ec560b1b56b9199480dee4eaaa10f45b4b96feab9397dd90f4eb05f21fd6d5 +size 165725683 diff --git a/pretrained_models/AIIMS_C4/frcnn_models/frcnn_model.pth b/pretrained_models/AIIMS_C4/frcnn_models/frcnn_model.pth new file mode 100644 index 0000000000000000000000000000000000000000..543cba8020366cb9a47beeaca1aab4918a2b985c --- /dev/null +++ b/pretrained_models/AIIMS_C4/frcnn_models/frcnn_model.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d18b23c2a1e06a11a27ebd77e87dbb6b27d54e88d92fc55d58c64957b8cdfcfb +size 165725683 diff --git a/pretrained_models/AIIMS_T1/frcnn_models/frcnn_model.pth b/pretrained_models/AIIMS_T1/frcnn_models/frcnn_model.pth new file mode 100644 index 0000000000000000000000000000000000000000..3e97b07a885595fad7f051241119ec245332f194 --- /dev/null +++ b/pretrained_models/AIIMS_T1/frcnn_models/frcnn_model.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8d8a1d133d3629e9c717070a66e1f2f2f846daca6765097622c2fe9f95c5a513 +size 165725683 diff --git a/pretrained_models/AIIMS_T2/frcnn_models/frcnn_model.pth b/pretrained_models/AIIMS_T2/frcnn_models/frcnn_model.pth new file mode 100644 index 0000000000000000000000000000000000000000..f78339f71492d19abe2987181935ea192dd53437 --- /dev/null +++ b/pretrained_models/AIIMS_T2/frcnn_models/frcnn_model.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5db00c682eec86bb2b4e764b64feffa26774643dae780bf3cf81313f5ca6f8de +size 165725683 diff --git a/pretrained_models/BILATERAL/bilateral_models/bilateral_model.pth b/pretrained_models/BILATERAL/bilateral_models/bilateral_model.pth new file mode 100644 index 0000000000000000000000000000000000000000..d6a2ad23a640e6ed77ba8381a5cbdeb9739dc96b --- /dev/null +++ b/pretrained_models/BILATERAL/bilateral_models/bilateral_model.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:dce00a005fd102839f17c490b4a58191e92e99965b1ac7e323b71b0e75043d37 +size 490558451 diff --git a/pretrained_models/frcnn/frcnn_models/frcnn_model.pth b/pretrained_models/frcnn/frcnn_models/frcnn_model.pth new file mode 100644 index 0000000000000000000000000000000000000000..385f92b1dda33f93725f56668b0737e1fcfa276d --- /dev/null +++ b/pretrained_models/frcnn/frcnn_models/frcnn_model.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e92090fd249484577db1c9e2560c82abddffd4c62203195bf8c35a32beeed4ad +size 165725683 diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..c843ddd6586849b680e42226aa340bd96c5a6e35 --- /dev/null +++ b/requirements.txt @@ -0,0 +1,12 @@ +gradio +torch==1.10.2 +tqdm==4.62.3 +torchvision==0.11.3 +scipy==1.7.3 +scikit-learn==1.0.2 +PyYAML==6.0 +Pillow==8.4.0 +pandas==1.4.0 +matplotlib==3.5.1 +numpy +easydict==1.9 \ No newline at end of file diff --git a/sample_images/img1_l.jpg b/sample_images/img1_l.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ef33003d9a6e0c09d53e32c27b2e20d1d765a2e1 --- /dev/null +++ b/sample_images/img1_l.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fde9376288294b7af41b0c46e13b6adf5ae9519f3faba3dc028ee34cf373cff9 +size 2156212 diff --git a/sample_images/img1_r.jpg b/sample_images/img1_r.jpg new file mode 100644 index 0000000000000000000000000000000000000000..1c5ed6ba6f26933fbc39ae23865bc7108ebc0cc3 --- /dev/null +++ b/sample_images/img1_r.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:94dac6cae01262ed04f24be0077e6e3bc95e13d2f7dca6041749f19f7c4ae1ad +size 1345186 diff --git a/sample_images/img2_l.jpg b/sample_images/img2_l.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6420190728853b021d204abc8db9a69b988be2b0 --- /dev/null +++ b/sample_images/img2_l.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2c060c824c66957e98014c58d97e9a4be7f57d583e52c6cc2daaa49cddd3bc6f +size 2996699 diff --git a/sample_images/img2_r.jpg b/sample_images/img2_r.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4f9a784dafc91cc276a1d39a7e2d2e64cdf79489 --- /dev/null +++ b/sample_images/img2_r.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8d338befb3447da0b11cfeb3ff365a79ebe1609614086763d38897a705ab82c3 +size 2927985 diff --git a/sample_images/img3_l.jpg b/sample_images/img3_l.jpg new file mode 100644 index 0000000000000000000000000000000000000000..feb3b3326ba2c1e6735ed5215ff76eaab1191477 --- /dev/null +++ b/sample_images/img3_l.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2a936d5256acddec0b400e383abfc84ce6ba24cd71596e433bfb7f15bcae108e +size 3221739 diff --git a/sample_images/img3_r.jpg b/sample_images/img3_r.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d06931cdec0511d777a3487d5e2b95a5883f7553 --- /dev/null +++ b/sample_images/img3_r.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a0cfb4dbd72cef0b749993b658d4f0694fbed9099b3203f2395c95b0c82ae244 +size 2665456 diff --git a/sample_images/img4_l.jpg b/sample_images/img4_l.jpg new file mode 100644 index 0000000000000000000000000000000000000000..25543cdf81428b3fef6aec800c168d09f6eef650 --- /dev/null +++ b/sample_images/img4_l.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3a72ea100c2230c8eb41b4a230362325e2c3be9c30767989fb014ee80096364e +size 3519038 diff --git a/sample_images/img4_r.jpg b/sample_images/img4_r.jpg new file mode 100644 index 0000000000000000000000000000000000000000..805083a2fae1a0186eae714668d04921bc379e3b --- /dev/null +++ b/sample_images/img4_r.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1762ae9f7717b54b4bd11968c5ab152a046106b4c9ef1972ec420b2b80878670 +size 3122826 diff --git a/sample_images/img5_l.jpg b/sample_images/img5_l.jpg new file mode 100644 index 0000000000000000000000000000000000000000..47ad4cefde131fe2156363b1c33df372093e46e5 --- /dev/null +++ b/sample_images/img5_l.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:80403c8145c54694d945cfc5e036dbfec93a76cd6ea345cf91b5e5bb680e040d +size 2964737 diff --git a/sample_images/img5_r.jpg b/sample_images/img5_r.jpg new file mode 100644 index 0000000000000000000000000000000000000000..1a05840ef96614f59eb86da7af64f1dfe5e66aa6 --- /dev/null +++ b/sample_images/img5_r.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d22534cd9959fff69c2444c28b8b2601a5a7ada07fdb231602677e070d498ea6 +size 3189089 diff --git a/sample_images/img6_l.jpg b/sample_images/img6_l.jpg new file mode 100644 index 0000000000000000000000000000000000000000..796a6c1333d4d69440034703f0cba9ae9a58ffae --- /dev/null +++ b/sample_images/img6_l.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:030632520f0c6d2aff4aba0ee56c546848ed18c543ce98da623a60e2637e3508 +size 2481050 diff --git a/sample_images/img6_r.jpg b/sample_images/img6_r.jpg new file mode 100644 index 0000000000000000000000000000000000000000..37f6c53240ff47137e13a94897064e9be94ffc0c --- /dev/null +++ b/sample_images/img6_r.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0a30541a1b5ccb08d748ce24eb4a4223dbae9126bebf3f3fdacce49a73f89a46 +size 1841285 diff --git a/sample_images/img7_l.jpg b/sample_images/img7_l.jpg new file mode 100644 index 0000000000000000000000000000000000000000..7cc7d6ff091c93fc1c8329149e0d3b2c6fb1feda --- /dev/null +++ b/sample_images/img7_l.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:178e7a9192a91fe44f18691a985b7f59b2873653574117f597f9615b360dd023 +size 3575404 diff --git a/sample_images/img7_r.jpg b/sample_images/img7_r.jpg new file mode 100644 index 0000000000000000000000000000000000000000..862577f4caea284fd6444282556027f938cc7d0f --- /dev/null +++ b/sample_images/img7_r.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5ccdc80a26b8cf006caae398cc32b6459710b6675b32cf5c5f2f2a8a5302f452 +size 4462473