MonumentDetection commited on
Commit
0f99785
·
verified ·
1 Parent(s): f932cca

Upload 236 files

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .vscode/settings.json +3 -0
  2. Base Model/MNAD_model.pth +3 -0
  3. Base Model/joint_model.pth +3 -0
  4. Base Model/meta_model.pth +3 -0
  5. Base Model/subset1/MNAD_model.pth +3 -0
  6. Base Model/subset1/joint_model.pth +3 -0
  7. Base Model/subset1/meta_model.pth +3 -0
  8. Base Model/subset2/subset.txt +0 -0
  9. Base Model/subset3/subset.txt +0 -0
  10. Base Model/subset4/subset.txt +0 -0
  11. __pycache__/main.cpython-39.pyc +0 -0
  12. main.py +152 -0
  13. monuments/__init__.py +0 -0
  14. monuments/__pycache__/__init__.cpython-312.pyc +0 -0
  15. monuments/__pycache__/__init__.cpython-39.pyc +0 -0
  16. monuments/__pycache__/admin.cpython-312.pyc +0 -0
  17. monuments/__pycache__/admin.cpython-39.pyc +0 -0
  18. monuments/__pycache__/apps.cpython-312.pyc +0 -0
  19. monuments/__pycache__/apps.cpython-39.pyc +0 -0
  20. monuments/__pycache__/forms.cpython-312.pyc +0 -0
  21. monuments/__pycache__/forms.cpython-39.pyc +0 -0
  22. monuments/__pycache__/models.cpython-312.pyc +0 -0
  23. monuments/__pycache__/models.cpython-39.pyc +0 -0
  24. monuments/__pycache__/urls.cpython-312.pyc +0 -0
  25. monuments/__pycache__/urls.cpython-39.pyc +0 -0
  26. monuments/__pycache__/views.cpython-312.pyc +0 -0
  27. monuments/__pycache__/views.cpython-39.pyc +0 -0
  28. monuments/faster_models/__init__.py +0 -0
  29. monuments/faster_models/__pycache__/__init__.cpython-39.pyc +0 -0
  30. monuments/faster_models/__pycache__/fasterrcnn.cpython-39.pyc +0 -0
  31. monuments/faster_models/fasterrcnn.py +281 -0
  32. monuments/migrations/0001_initial.py +21 -0
  33. monuments/migrations/__init__.py +0 -0
  34. monuments/migrations/__pycache__/0001_initial.cpython-312.pyc +0 -0
  35. monuments/migrations/__pycache__/0001_initial.cpython-39.pyc +0 -0
  36. monuments/migrations/__pycache__/__init__.cpython-312.pyc +0 -0
  37. monuments/migrations/__pycache__/__init__.cpython-39.pyc +0 -0
  38. output/file_20240304_205546.png +0 -0
  39. output/file_20240304_205646.png +0 -0
  40. output/file_20240304_205930.png +0 -0
  41. requirements.txt +17 -0
  42. static/assets/.DS_Store +0 -0
  43. static/assets/.sass-cache/262a5ebf37f39bafc9b50a6091656d2b17cbcfde/styles.scssc +0 -0
  44. static/assets/.sass-cache/806a25bbae7282c82f19338727861b808ded4f6a/styles.scssc +0 -0
  45. static/assets/.sass-cache/b1369f33f1018eb02dacf7d6bb3c2e3f14caddac/_blogs.scssc +0 -0
  46. static/assets/.sass-cache/b1369f33f1018eb02dacf7d6bb3c2e3f14caddac/_clients.scssc +0 -0
  47. static/assets/.sass-cache/b1369f33f1018eb02dacf7d6bb3c2e3f14caddac/_common.scssc +0 -0
  48. static/assets/.sass-cache/b1369f33f1018eb02dacf7d6bb3c2e3f14caddac/_component-list.scssc +0 -0
  49. static/assets/.sass-cache/b1369f33f1018eb02dacf7d6bb3c2e3f14caddac/_counters.scssc +0 -0
  50. static/assets/.sass-cache/b1369f33f1018eb02dacf7d6bb3c2e3f14caddac/_features.scssc +0 -0
.vscode/settings.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ {
2
+ "git.ignoreLimitWarning": true
3
+ }
Base Model/MNAD_model.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e7dfe94a7e1abb025336524910e14332a21b5b50dfb2d46538ca33767f0f5b0b
3
+ size 166110137
Base Model/joint_model.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2043d59b07609f0a74134194e1963b7eafc37c6e5e6cba823e2f1b60fd1e61cf
3
+ size 166116260
Base Model/meta_model.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:529f17900dccfa3da8611b2963e754c81b6750a6a6d5025c37e8a5da25a0ee40
3
+ size 166110137
Base Model/subset1/MNAD_model.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e7dfe94a7e1abb025336524910e14332a21b5b50dfb2d46538ca33767f0f5b0b
3
+ size 166110137
Base Model/subset1/joint_model.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2043d59b07609f0a74134194e1963b7eafc37c6e5e6cba823e2f1b60fd1e61cf
3
+ size 166116260
Base Model/subset1/meta_model.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:529f17900dccfa3da8611b2963e754c81b6750a6a6d5025c37e8a5da25a0ee40
3
+ size 166110137
Base Model/subset2/subset.txt ADDED
File without changes
Base Model/subset3/subset.txt ADDED
File without changes
Base Model/subset4/subset.txt ADDED
File without changes
__pycache__/main.cpython-39.pyc ADDED
Binary file (4.39 kB). View file
 
main.py ADDED
@@ -0,0 +1,152 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from fastapi import FastAPI, Request, Form, UploadFile
2
+ from fastapi.templating import Jinja2Templates
3
+ from fastapi.responses import HTMLResponse
4
+ from fastapi.staticfiles import StaticFiles
5
+ from pydantic import BaseModel
6
+ import io
7
+ import base64
8
+ from matplotlib.backends.backend_agg import FigureCanvasAgg
9
+ from pathlib import Path
10
+ from monuments.faster_models.fasterrcnn import fasterrcnn_resnet50_fpn, filter_pred, classes, CLASSES
11
+ import os
12
+ import shutil
13
+ from datetime import datetime
14
+ import torch
15
+ import torchvision.transforms as transforms
16
+ from PIL import Image
17
+ import numpy as np
18
+ import matplotlib.pyplot as plt
19
+ import matplotlib.patches as patches
20
+
21
+
22
+ app = FastAPI()
23
+ app.mount("/static", StaticFiles(directory="static"), name="static")
24
+ app.mount("/media", StaticFiles(directory="media"), name="media")
25
+ app.mount("/output", StaticFiles(directory="output"), name="output")
26
+
27
+ templates = Jinja2Templates(directory="templates")
28
+ imageBytes = None
29
+ model = None
30
+ class ImageForm(BaseModel):
31
+ image: UploadFile
32
+
33
+
34
+ @app.get("/")
35
+ async def monuments(request: Request):
36
+ return templates.TemplateResponse("welcome.html", {"request": request})
37
+
38
+
39
+ @app.get("/index/", response_class=HTMLResponse)
40
+ async def index(request: Request):
41
+ return templates.TemplateResponse("index.html", {"request": request})
42
+
43
+
44
+ # @app.post("/upload")
45
+ # async def upload(request: Request, image: UploadFile = File(...)):
46
+ # print('reached')
47
+ # if image:
48
+ # image_path = f"media/images/{image.filename}"
49
+ # print(image_path)
50
+ # with open(image_path, "wb") as image_file:
51
+ # image_file.write(image.file.read())
52
+
53
+ # return templates.TemplateResponse("upload.html", {"request": request, "form": image, "img_object": image_path})
54
+
55
+ # return templates.TemplateResponse("image_form.html", {"request": request, "form": image})
56
+ @app.post("/upload/")
57
+ async def upload(request: Request,file: UploadFile):
58
+ global imageBytes
59
+ imageBytes = await file.read()
60
+ image = Image.open(io.BytesIO(imageBytes))
61
+ if image.mode != 'RGB':
62
+ image = image.convert('RGB')
63
+ image_bytes = io.BytesIO()
64
+ image.save(image_bytes, format="JPEG")
65
+
66
+ contents = base64.b64encode(image_bytes.getvalue()).decode("utf-8")
67
+ contents = contents.split('\n')[0]
68
+
69
+ return templates.TemplateResponse("upload.html", {"request":request, "image_content": contents })
70
+
71
+ @app.post("/predict/")
72
+ async def predict(request: Request, model: str = Form(...), subset: str = Form(...)):
73
+ global imageBytes
74
+ try:
75
+ image = Image.open(io.BytesIO(imageBytes))
76
+ if image.mode != 'RGB':
77
+ image = image.convert('RGB')
78
+ except Exception as e:
79
+ print(f"Error: {e}")
80
+ if model:
81
+ # Define transformations
82
+ transform = transforms.Compose([
83
+ transforms.ToTensor(),
84
+ ])
85
+ image = image.convert('RGB')
86
+ img_tensor = transform(image)
87
+ img_tensor = img_tensor.unsqueeze(0)
88
+
89
+ base_dir = os.path.dirname(__file__)
90
+ print(f'{subset}hey there')
91
+ if subset in ["subset1", "subset2", "subset3", "subsset4"]:
92
+ if model in ["joint_model", "meta_model", "MNAD_model"]:
93
+ print(model)
94
+ model_path = os.path.join(base_dir, 'Base Model', subset, f'{model}.pth')
95
+ # if model == "joint_model":
96
+ # model_path = os.path.join(dir, 'Base Model', 'joint_model.pth')
97
+ # elif model == "meta_model":
98
+ # model_path = os.path.join(dir, 'Base Model', 'meta_model.pth')
99
+ # elif model == "MNAD_model":
100
+ # model_path = os.path.join(dir, 'Base Model', 'MNAD_model.pth')
101
+ # else:
102
+ # # Handle the case when model is not any of the specified values
103
+ # raise ValueError(f"Unsupported model: {model}")
104
+
105
+ model = fasterrcnn_resnet50_fpn(num_classes=21)
106
+ model.load_state_dict(torch.load(model_path, map_location=torch.device('cuda' if torch.cuda.is_available() else 'cpu')))
107
+
108
+ model.to('cuda' if torch.cuda.is_available() else 'cpu')
109
+ model.eval()
110
+
111
+ with torch.no_grad():
112
+ predictions = model(img_tensor)
113
+
114
+ outputs = filter_pred(predictions)
115
+ boxes = outputs[0]['boxes'].cpu().numpy()
116
+ labels = outputs[0]['labels'].cpu().numpy()
117
+ scores = outputs[0]['scores'].cpu().numpy()
118
+
119
+ original_np = np.array(image)
120
+
121
+ # Original Image
122
+ fig, axs = plt.subplots(figsize=(10, 5))
123
+ axs.imshow(original_np) # Assuming original images are in CHW format
124
+ axs.axis('off')
125
+ axs.set_title('Original')
126
+
127
+ # Add predicted bounding boxes to the predicted image
128
+ for j, box in enumerate(boxes):
129
+ rect = patches.Rectangle(
130
+ (box[0], box[1]), box[2] - box[0], box[3] - box[1], linewidth=2, edgecolor='r', facecolor='none'
131
+ )
132
+ axs.add_patch(rect)
133
+ axs.text(
134
+ box[0], box[1] - 5, f'{CLASSES[int(labels[j])]}' , color='r', fontsize=10,
135
+ bbox=dict(facecolor='white', alpha=0.8, edgecolor='none', boxstyle='round,pad=0.2')
136
+ )
137
+ buffer = io.BytesIO()
138
+ canvas = FigureCanvasAgg(plt.gcf())
139
+ canvas.print_png(buffer)
140
+ bytes_data = buffer.getvalue()
141
+
142
+ image = Image.open(io.BytesIO(bytes_data))
143
+ image_rgb = image.convert('RGB')
144
+ output = io.BytesIO()
145
+ image_rgb.save(output, format = "JPEG")
146
+
147
+
148
+ contents = base64.b64encode(output.getvalue()).decode("utf-8")
149
+ contents = contents.split('\n')[0]
150
+
151
+ return templates.TemplateResponse("predict.html", {"request": request, "image_content": contents,"confidence_score": round(scores[0],3)})
152
+ return {"detail": "Invalid model specified."}
monuments/__init__.py ADDED
File without changes
monuments/__pycache__/__init__.cpython-312.pyc ADDED
Binary file (150 Bytes). View file
 
monuments/__pycache__/__init__.cpython-39.pyc ADDED
Binary file (145 Bytes). View file
 
monuments/__pycache__/admin.cpython-312.pyc ADDED
Binary file (194 Bytes). View file
 
monuments/__pycache__/admin.cpython-39.pyc ADDED
Binary file (186 Bytes). View file
 
monuments/__pycache__/apps.cpython-312.pyc ADDED
Binary file (462 Bytes). View file
 
monuments/__pycache__/apps.cpython-39.pyc ADDED
Binary file (428 Bytes). View file
 
monuments/__pycache__/forms.cpython-312.pyc ADDED
Binary file (661 Bytes). View file
 
monuments/__pycache__/forms.cpython-39.pyc ADDED
Binary file (586 Bytes). View file
 
monuments/__pycache__/models.cpython-312.pyc ADDED
Binary file (501 Bytes). View file
 
monuments/__pycache__/models.cpython-39.pyc ADDED
Binary file (410 Bytes). View file
 
monuments/__pycache__/urls.cpython-312.pyc ADDED
Binary file (574 Bytes). View file
 
monuments/__pycache__/urls.cpython-39.pyc ADDED
Binary file (397 Bytes). View file
 
monuments/__pycache__/views.cpython-312.pyc ADDED
Binary file (1.61 kB). View file
 
monuments/__pycache__/views.cpython-39.pyc ADDED
Binary file (3.47 kB). View file
 
monuments/faster_models/__init__.py ADDED
File without changes
monuments/faster_models/__pycache__/__init__.cpython-39.pyc ADDED
Binary file (152 Bytes). View file
 
monuments/faster_models/__pycache__/fasterrcnn.cpython-39.pyc ADDED
Binary file (8.59 kB). View file
 
monuments/faster_models/fasterrcnn.py ADDED
@@ -0,0 +1,281 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from torchvision.ops import MultiScaleRoIAlign
2
+ from torchvision.models.detection import FasterRCNN
3
+ from torchvision.models.detection import FasterRCNN_ResNet50_FPN_Weights
4
+ from torchvision.models.resnet import resnet50, ResNet50_Weights
5
+ from torchvision.models._utils import _ovewrite_value_param
6
+ from torchvision.models.detection._utils import overwrite_eps
7
+ from torchvision.ops import misc as misc_nn_ops
8
+ from torch import nn
9
+ from torchvision.models.detection.backbone_utils import _validate_trainable_layers, _resnet_fpn_extractor
10
+ from typing import Any, Optional, TypeVar
11
+ import torch
12
+
13
+ V = TypeVar("V")
14
+ # _ovewrite_value_param("num_classes", num_classes, len(weights.meta["categories"]))
15
+ def _ovewrite_value_param(param: str, actual: Optional[V], expected: V) -> V:
16
+ if actual is not None:
17
+ if actual != expected:
18
+ raise ValueError(f"The parameter '{param}' expected value {expected} but got {actual} instead.")
19
+ return expected
20
+
21
+ def fasterrcnn_resnet50_fpn(
22
+ *,
23
+ weights: Optional[FasterRCNN_ResNet50_FPN_Weights] = None,
24
+ progress: bool = True,
25
+ num_classes: Optional[int] = None,
26
+ weights_backbone: Optional[ResNet50_Weights] = ResNet50_Weights.IMAGENET1K_V1,
27
+ trainable_backbone_layers: Optional[int] = None,
28
+ extend =0,
29
+ **kwargs: Any,
30
+ ) -> FasterRCNN:
31
+ """
32
+ Faster R-CNN model with a ResNet-50-FPN backbone from the `Faster R-CNN: Towards Real-Time Object
33
+ Detection with Region Proposal Networks <https://arxiv.org/abs/1506.01497>`__
34
+ paper.
35
+
36
+ .. betastatus:: detection module
37
+
38
+ The input to the model is expected to be a list of tensors, each of shape ``[C, H, W]``, one for each
39
+ image, and should be in ``0-1`` range. Different images can have different sizes.
40
+
41
+ The behavior of the model changes depending on if it is in training or evaluation mode.
42
+
43
+ During training, the model expects both the input tensors and a targets (list of dictionary),
44
+ containing:
45
+
46
+ - boxes (``FloatTensor[N, 4]``): the ground-truth boxes in ``[x1, y1, x2, y2]`` format, with
47
+ ``0 <= x1 < x2 <= W`` and ``0 <= y1 < y2 <= H``.
48
+ - labels (``Int64Tensor[N]``): the class label for each ground-truth box
49
+
50
+ The model returns a ``Dict[Tensor]`` during training, containing the classification and regression
51
+ losses for both the RPN and the R-CNN.
52
+
53
+ During inference, the model requires only the input tensors, and returns the post-processed
54
+ predictions as a ``List[Dict[Tensor]]``, one for each input image. The fields of the ``Dict`` are as
55
+ follows, where ``N`` is the number of detections:
56
+
57
+ - boxes (``FloatTensor[N, 4]``): the predicted boxes in ``[x1, y1, x2, y2]`` format, with
58
+ ``0 <= x1 < x2 <= W`` and ``0 <= y1 < y2 <= H``.
59
+ - labels (``Int64Tensor[N]``): the predicted labels for each detection
60
+ - scores (``Tensor[N]``): the scores of each detection
61
+
62
+ For more details on the output, you may refer to :ref:`instance_seg_output`.
63
+
64
+ Faster R-CNN is exportable to ONNX for a fixed batch size with inputs images of fixed size.
65
+
66
+ Example::
67
+
68
+ >>> model = torchvision.models.detection.fasterrcnn_resnet50_fpn(weights=FasterRCNN_ResNet50_FPN_Weights.DEFAULT)
69
+ >>> # For training
70
+ >>> images, boxes = torch.rand(4, 3, 600, 1200), torch.rand(4, 11, 4)
71
+ >>> boxes[:, :, 2:4] = boxes[:, :, 0:2] + boxes[:, :, 2:4]
72
+ >>> labels = torch.randint(1, 91, (4, 11))
73
+ >>> images = list(image for image in images)
74
+ >>> targets = []
75
+ >>> for i in range(len(images)):
76
+ >>> d = {}
77
+ >>> d['boxes'] = boxes[i]
78
+ >>> d['labels'] = labels[i]
79
+ >>> targets.append(d)
80
+ >>> output = model(images, targets)
81
+ >>> # For inference
82
+ >>> model.eval()
83
+ >>> x = [torch.rand(3, 300, 400), torch.rand(3, 500, 400)]
84
+ >>> predictions = model(x)
85
+ >>>
86
+ >>> # optionally, if you want to export the model to ONNX:
87
+ >>> torch.onnx.export(model, x, "faster_rcnn.onnx", opset_version = 11)
88
+
89
+ Args:
90
+ weights (:class:`~torchvision.models.detection.FasterRCNN_ResNet50_FPN_Weights`, optional): The
91
+ pretrained weights to use. See
92
+ :class:`~torchvision.models.detection.FasterRCNN_ResNet50_FPN_Weights` below for
93
+ more details, and possible values. By default, no pre-trained
94
+ weights are used.
95
+ progress (bool, optional): If True, displays a progress bar of the
96
+ download to stderr. Default is True.
97
+ num_classes (int, optional): number of output classes of the model (including the background)
98
+ weights_backbone (:class:`~torchvision.models.ResNet50_Weights`, optional): The
99
+ pretrained weights for the backbone.
100
+ trainable_backbone_layers (int, optional): number of trainable (not frozen) layers starting from
101
+ final block. Valid values are between 0 and 5, with 5 meaning all backbone layers are
102
+ trainable. If ``None`` is passed (the default) this value is set to 3.
103
+ **kwargs: parameters passed to the ``torchvision.models.detection.faster_rcnn.FasterRCNN``
104
+ base class. Please refer to the `source code
105
+ <https://github.com/pytorch/vision/blob/main/torchvision/models/detection/faster_rcnn.py>`_
106
+ for more details about this class.
107
+
108
+ .. autoclass:: torchvision.models.detection.FasterRCNN_ResNet50_FPN_Weights
109
+ :members:
110
+ """
111
+ weights = FasterRCNN_ResNet50_FPN_Weights.verify(weights)
112
+ weights_backbone = ResNet50_Weights.verify(weights_backbone)
113
+
114
+ if weights is not None:
115
+ weights_backbone = None
116
+ num_classes = _ovewrite_value_param("num_classes", num_classes, len(weights.meta["categories"]))
117
+ elif num_classes is None:
118
+ num_classes = 91
119
+
120
+ is_trained = weights is not None or weights_backbone is not None
121
+ trainable_backbone_layers = _validate_trainable_layers(is_trained, trainable_backbone_layers, 5, 3)
122
+ norm_layer = misc_nn_ops.FrozenBatchNorm2d if is_trained else nn.BatchNorm2d
123
+
124
+ backbone = resnet50(weights=weights_backbone, progress=progress, norm_layer=norm_layer)
125
+ backbone = _resnet_fpn_extractor(backbone, trainable_backbone_layers)
126
+ model = FasterRCNN(backbone, num_classes=num_classes, **kwargs)
127
+
128
+ if weights is not None:
129
+ model.load_state_dict(weights.get_state_dict(progress=progress, check_hash=True))
130
+ if weights == FasterRCNN_ResNet50_FPN_Weights.COCO_V1:
131
+ overwrite_eps(model, 0.0)
132
+
133
+ return model
134
+
135
+ def filter_pred(predicted):
136
+ filtered_predictions = []
137
+
138
+ for pred in predicted:
139
+ scores = pred['scores']
140
+ indices = scores > 0.5
141
+ if indices.any():
142
+ filtered_boxes = pred['boxes'][indices]
143
+ filtered_labels = pred['labels'][indices]
144
+ filtered_scores = pred['scores'][indices]
145
+ filtered_pred = {'boxes': filtered_boxes,
146
+ 'labels': filtered_labels,
147
+ 'scores': filtered_scores}
148
+ else:
149
+ filtered_boxes = torch.tensor([[0, 0, 0, 0]])
150
+ filtered_labels = torch.tensor([0])
151
+ filtered_scores =torch.tensor([0])
152
+ filtered_pred = {'boxes': filtered_boxes,
153
+ 'labels': filtered_labels,
154
+ 'scores': filtered_scores}
155
+ filtered_predictions.append(filtered_pred)
156
+ return filtered_predictions
157
+
158
+ CLASSES = [
159
+ 'bg',
160
+ 'Akash Bhairav',
161
+ 'Bhadrakali Temple',
162
+ 'Jalbinayak',
163
+ 'Lumadhi Bhadrakali Temple Sankata',
164
+ 'Maitidevi Temple',
165
+ 'Patan Dhoka',
166
+ 'Sano Pashupati',
167
+ 'Swoyambhunath',
168
+ 'Tridevi Temple',
169
+ 'ashok stupa',
170
+ 'birupakshya',
171
+ 'chamunda mai',
172
+ 'charumati',
173
+ 'mahadev temple',
174
+ 'taleju bell_KDS',
175
+ 'pratappur temple',
176
+ 'chakku bakku',
177
+ 'Ghantaghar',
178
+ 'kumaristhan',
179
+ 'uma maheshwor'
180
+ ]
181
+
182
+
183
+ classes=['Akash Bhairav','ashok stupa','Badrinath','Bagbairav',
184
+ 'Balkumari, Bhaktapur',
185
+ 'BalNilkantha',
186
+ 'basantapur tower',
187
+ 'Bhadrakali Temple',
188
+ 'bhairavnath temple',
189
+ 'bhaktapur tower','bhimeleshvara',
190
+ 'Bhimsen Temple','Bhupatindra Malla Column',
191
+ 'bhuvana lakshmeshvara',
192
+ 'birupakshya',
193
+ 'Buddha Statue',
194
+ 'chakku bakku',
195
+ 'chamunda mai',
196
+ 'Chandeshwori Temple',
197
+ 'Char Narayan Temple',
198
+ 'charumati',
199
+ 'chasin dega',
200
+ 'Chayasilin Mandap',
201
+ 'Dakshin Barahi',
202
+ 'degu tale',
203
+ 'Dharahara',
204
+ 'Fasidega Temple',
205
+ 'Garud Statue',
206
+ 'garud',
207
+ 'Ghantaghar',
208
+ 'golden gate',
209
+ 'golden temple',
210
+ 'Gopinath krishna Temple',
211
+ 'guyeshwori',
212
+ 'hanuman idol',
213
+ 'Harishankar Temple',
214
+ 'indrapura',
215
+ 'Isckon Temple',
216
+ 'jagannatha temple',
217
+ 'Jalbinayak',
218
+ 'Jamachen Monastry',
219
+ 'jame masjid',
220
+ 'jaya bageshwori',
221
+ 'kala-bhairava',
222
+ 'kasthamandap',
223
+ 'kavindrapura sattal',
224
+ 'Kedamatha Tirtha',
225
+ 'Khumbeshwor mahadev',
226
+ 'kiranteshwor mahadev',
227
+ 'kirtipur tower',
228
+ 'Kotilingeshvara',
229
+ 'Krishna mandir PDS',
230
+ 'Krishna_temple _kobahal',
231
+ 'Kumari Ghar',
232
+ 'kumaristhan',
233
+ 'kumbheshwor mahadev',
234
+ 'lalitpur tower',
235
+ 'lokeshwor temple bhaktapur',
236
+ 'Lumadhi Bhadrakali Temple Sankata',
237
+ 'Mahabauddha Asan',
238
+ 'mahadev temple',
239
+ 'Maipi Temple',
240
+ 'Maitidevi Temple',
241
+ 'manamaiju temple',
242
+ 'nagarmandap shree kriti bihar',
243
+ 'narayan temple',
244
+ 'National Gallery',
245
+ 'Naxal Bhagwati',
246
+ 'Nyatapola temple',
247
+ 'Palace of 55 Windows',
248
+ 'Panchamukhi Hanuman',
249
+ 'Patan Dhoka',
250
+ 'Pilot Baba',
251
+ 'PimBahal Gumba',
252
+ 'pratap malla column',
253
+ 'pratappur temple',
254
+ 'Ram Mandir',
255
+ 'Ranipokhari',
256
+ 'red gumba',
257
+ 'sahid gate',
258
+ 'Sankha Statue',
259
+ 'Sano Pashupati',
260
+ 'Santaneshwor Mahadev',
261
+ 'shantidham',
262
+ 'Shiva Temple',
263
+ 'shveta bhairava',
264
+ 'Siddhi Lakshmi temple',
265
+ 'simha sattal',
266
+ 'Swoyambhunath',
267
+ 'taleju bell pds',
268
+ 'taleju bell_BDS',
269
+ 'taleju bell_KDS',
270
+ 'taleju temple',
271
+ 'taleju_temple_south',
272
+ 'trailokya mohan',
273
+ 'Tridevi Temple',
274
+ 'uma maheshwor',
275
+ 'ume_maheshwara',
276
+ 'Vastala Temple',
277
+ 'vishnu temple',
278
+ 'Wakupati Narayan Temple',
279
+ 'wishing well budhha statue',
280
+ 'Yetkha Bahal',
281
+ 'yog_narendra_malla_statue']
monuments/migrations/0001_initial.py ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Generated by Django 5.0.1 on 2024-01-04 15:47
2
+
3
+ from django.db import migrations, models
4
+
5
+
6
+ class Migration(migrations.Migration):
7
+
8
+ initial = True
9
+
10
+ dependencies = [
11
+ ]
12
+
13
+ operations = [
14
+ migrations.CreateModel(
15
+ name='ImageUpload',
16
+ fields=[
17
+ ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
18
+ ('image', models.ImageField(upload_to='images/')),
19
+ ],
20
+ ),
21
+ ]
monuments/migrations/__init__.py ADDED
File without changes
monuments/migrations/__pycache__/0001_initial.cpython-312.pyc ADDED
Binary file (871 Bytes). View file
 
monuments/migrations/__pycache__/0001_initial.cpython-39.pyc ADDED
Binary file (661 Bytes). View file
 
monuments/migrations/__pycache__/__init__.cpython-312.pyc ADDED
Binary file (161 Bytes). View file
 
monuments/migrations/__pycache__/__init__.cpython-39.pyc ADDED
Binary file (156 Bytes). View file
 
output/file_20240304_205546.png ADDED
output/file_20240304_205646.png ADDED
output/file_20240304_205930.png ADDED
requirements.txt ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ distlib==0.3.8
2
+ dnspython==2.6.1
3
+ filelock==3.13.1
4
+ platformdirs==4.1.0
5
+ pymongo==4.6.2
6
+ setuptools==69.0.3
7
+ virtualenv==20.25.0
8
+ virtualenvwrapper-win==1.2.7
9
+ wheel==0.42.0
10
+ fastapi
11
+ uvicorn
12
+ matplotlib==3.8.2
13
+ numpy==1.26.4
14
+ Pillow==10.2.0
15
+ torch==2.1.2
16
+ torchvision==0.16.2
17
+ python-multipart
static/assets/.DS_Store ADDED
Binary file (14.3 kB). View file
 
static/assets/.sass-cache/262a5ebf37f39bafc9b50a6091656d2b17cbcfde/styles.scssc ADDED
Binary file (84 kB). View file
 
static/assets/.sass-cache/806a25bbae7282c82f19338727861b808ded4f6a/styles.scssc ADDED
Binary file (185 kB). View file
 
static/assets/.sass-cache/b1369f33f1018eb02dacf7d6bb3c2e3f14caddac/_blogs.scssc ADDED
Binary file (2.52 kB). View file
 
static/assets/.sass-cache/b1369f33f1018eb02dacf7d6bb3c2e3f14caddac/_clients.scssc ADDED
Binary file (4.18 kB). View file
 
static/assets/.sass-cache/b1369f33f1018eb02dacf7d6bb3c2e3f14caddac/_common.scssc ADDED
Binary file (93.4 kB). View file
 
static/assets/.sass-cache/b1369f33f1018eb02dacf7d6bb3c2e3f14caddac/_component-list.scssc ADDED
Binary file (5.71 kB). View file
 
static/assets/.sass-cache/b1369f33f1018eb02dacf7d6bb3c2e3f14caddac/_counters.scssc ADDED
Binary file (1.99 kB). View file
 
static/assets/.sass-cache/b1369f33f1018eb02dacf7d6bb3c2e3f14caddac/_features.scssc ADDED
Binary file (10.1 kB). View file