Spaces:
Build error
Build error
Upload 11 files
Browse files- 70epoch.pt +3 -0
- README.md +1 -12
- checkpoint_epoch_350.pth +3 -0
- detection_infer.py +25 -0
- output.jpg +0 -0
- requirements.txt +0 -0
- temp_image.jpg +0 -0
- ugan-major.ipynb +0 -0
- ugan.py +410 -0
- web.py +85 -0
- yolo-training.ipynb +1 -0
70epoch.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:f8a7d4c3ee25c2236425f2bb5f4257ef5d16d71834676d273984870a11bcdcbd
|
3 |
+
size 22519288
|
README.md
CHANGED
@@ -1,12 +1 @@
|
|
1 |
-
|
2 |
-
title: Ugan Yolo
|
3 |
-
emoji: π
|
4 |
-
colorFrom: red
|
5 |
-
colorTo: gray
|
6 |
-
sdk: streamlit
|
7 |
-
sdk_version: 1.32.2
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
---
|
11 |
-
|
12 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
1 |
+
underwater image enhancement and trash detection model
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
checkpoint_epoch_350.pth
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:40f6a8934bda8230cd4c97bbc251a0561a57ff35ebdce9e507e13ab9e4999273
|
3 |
+
size 655177894
|
detection_infer.py
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from ultralytics import YOLO
|
2 |
+
import matplotlib.pyplot as plt
|
3 |
+
import streamlit as st
|
4 |
+
import seaborn as sns
|
5 |
+
st.set_option('deprecation.showPyplotGlobalUse', False)
|
6 |
+
import torch
|
7 |
+
import cv2
|
8 |
+
|
9 |
+
labels = ['Mask', 'can', 'cellphone', 'electronics', 'gbottle', 'glove', 'metal', 'misc', 'net', 'pbag', 'pbottle',
|
10 |
+
'plastic', 'rod', 'sunglasses', 'tire']
|
11 |
+
|
12 |
+
garbage = []
|
13 |
+
def detect(image):
|
14 |
+
model = YOLO("70epoch.pt")
|
15 |
+
results = model(image)
|
16 |
+
class_list = []
|
17 |
+
for result in results:
|
18 |
+
boxes = result.boxes # Boxes object for bbox outputs
|
19 |
+
class_list = boxes.cls.tolist()
|
20 |
+
int_list = [int(num) for num in class_list]
|
21 |
+
class_names = [labels[i] for i in int_list]
|
22 |
+
garbage.extend(class_names)
|
23 |
+
res_plotted = results[0].plot()
|
24 |
+
|
25 |
+
return res_plotted, class_names
|
output.jpg
ADDED
![]() |
requirements.txt
ADDED
Binary file (6.06 kB). View file
|
|
temp_image.jpg
ADDED
![]() |
ugan-major.ipynb
ADDED
The diff for this file is too large to render.
See raw diff
|
|
ugan.py
ADDED
@@ -0,0 +1,410 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
import torch.nn as nn
|
3 |
+
from torch.nn import Conv2d,LeakyReLU,BatchNorm2d, ConvTranspose2d,ReLU
|
4 |
+
from torch.utils.data import DataLoader, Dataset
|
5 |
+
from torchvision import transforms
|
6 |
+
import numpy as np
|
7 |
+
import cv2
|
8 |
+
import os
|
9 |
+
from accelerate import Accelerator
|
10 |
+
import datetime
|
11 |
+
import torch
|
12 |
+
import torch.optim as optim
|
13 |
+
from torch.utils.data import DataLoader
|
14 |
+
from tqdm import tqdm
|
15 |
+
from tensorboardX import SummaryWriter
|
16 |
+
from accelerate import notebook_launcher
|
17 |
+
|
18 |
+
|
19 |
+
accelerator = Accelerator()
|
20 |
+
device = accelerator.device
|
21 |
+
|
22 |
+
def get_transforms():
|
23 |
+
transform = transforms.Compose([
|
24 |
+
transforms.ToTensor(),# H,W,C -> C,H,W && [0,255] -> [0,1]
|
25 |
+
transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5)) #[0,1] -> [-1,1]
|
26 |
+
])
|
27 |
+
return transform
|
28 |
+
|
29 |
+
|
30 |
+
class MYDataSet(Dataset):
|
31 |
+
def __init__(self,src_data_path,dst_data_path):
|
32 |
+
self.train_A_imglist = self.get_imglist(src_data_path)
|
33 |
+
self.train_B_imglist = self.get_imglist(dst_data_path)
|
34 |
+
self.transform = get_transforms()
|
35 |
+
|
36 |
+
def get_imglist(self, img_dir):
|
37 |
+
img_name_list = sorted(os.listdir(img_dir))
|
38 |
+
img_list = []
|
39 |
+
half_len = len(img_name_list) // 3
|
40 |
+
for img_name in img_name_list[:half_len]:
|
41 |
+
img_path = os.path.join(img_dir, img_name)
|
42 |
+
img_list.append(img_path)
|
43 |
+
return img_list
|
44 |
+
|
45 |
+
def __len__(self):
|
46 |
+
return len(self.train_A_imglist)
|
47 |
+
def __getitem__(self,index):
|
48 |
+
train_A_img_path = self.train_A_imglist[index]
|
49 |
+
train_B_img_path = self.train_B_imglist[index]
|
50 |
+
|
51 |
+
train_A_img = cv2.imread(train_A_img_path)
|
52 |
+
train_B_img = cv2.imread(train_B_img_path)
|
53 |
+
|
54 |
+
train_A_tensor = self.transform(train_A_img)
|
55 |
+
train_B_tensor = self.transform(train_B_img)
|
56 |
+
|
57 |
+
|
58 |
+
return [train_A_tensor,train_B_tensor]
|
59 |
+
|
60 |
+
|
61 |
+
def encoder_layer(in_channels,out_channels,kernel_size=4,stride = 2,padding = 1): # NOTE: Padding here is different from the 'vaild' in tensorflow version of original github
|
62 |
+
layer = nn.Sequential(
|
63 |
+
Conv2d(in_channels,out_channels,kernel_size=kernel_size,stride=stride,padding=padding),
|
64 |
+
BatchNorm2d(out_channels),
|
65 |
+
LeakyReLU(0.2)
|
66 |
+
)
|
67 |
+
return layer
|
68 |
+
|
69 |
+
|
70 |
+
def decoder_layer(in_channels,out_channels,last_layer=False,kernel_size=4,stride = 2,padding = 1):
|
71 |
+
if not last_layer:
|
72 |
+
layer = nn.Sequential(
|
73 |
+
ConvTranspose2d(in_channels,out_channels,kernel_size=kernel_size,stride=stride,padding=padding),
|
74 |
+
torch.nn.ReLU()
|
75 |
+
)
|
76 |
+
else:
|
77 |
+
layer = nn.Sequential(
|
78 |
+
ConvTranspose2d(in_channels,out_channels,kernel_size=4,stride=2,padding=1),
|
79 |
+
torch.nn.Tanh()
|
80 |
+
)
|
81 |
+
return layer
|
82 |
+
|
83 |
+
def discrimiter_layer(in_channels,out_channels,kernel_size=4,stride = 2,padding = 1,wgan=False):
|
84 |
+
if wgan:
|
85 |
+
layer = nn.Sequential(
|
86 |
+
Conv2d(in_channels,out_channels,kernel_size=kernel_size,stride=stride,padding=padding),
|
87 |
+
BatchNorm2d(out_channels),
|
88 |
+
LeakyReLU(0.2)
|
89 |
+
)
|
90 |
+
else:
|
91 |
+
layer = nn.Sequential(
|
92 |
+
Conv2d(in_channels,out_channels,kernel_size=kernel_size,stride=stride,padding=padding),
|
93 |
+
LeakyReLU(0.2)
|
94 |
+
)
|
95 |
+
return layer
|
96 |
+
|
97 |
+
|
98 |
+
class GeneratorNet(torch.nn.Module):
|
99 |
+
def __init__(self):
|
100 |
+
super(GeneratorNet, self).__init__()
|
101 |
+
|
102 |
+
# Encoder
|
103 |
+
self.enc_conv1 = encoder_layer(3,64)
|
104 |
+
self.enc_conv2 = encoder_layer(64,128)
|
105 |
+
self.enc_conv3 = encoder_layer(128,256)
|
106 |
+
self.enc_conv4 = encoder_layer(256,512)
|
107 |
+
self.enc_conv5 = encoder_layer(512,512)
|
108 |
+
self.enc_conv6 = encoder_layer(512,512)
|
109 |
+
self.enc_conv7 = encoder_layer(512,512)
|
110 |
+
self.enc_conv8 = encoder_layer(512,512,padding=1)
|
111 |
+
# Decoder
|
112 |
+
self.dec_conv1 = decoder_layer(512,512)
|
113 |
+
self.dec_conv2 = decoder_layer(1024,512)
|
114 |
+
self.dec_conv3 = decoder_layer(1024,512)
|
115 |
+
self.dec_conv4 = decoder_layer(1024,512)
|
116 |
+
self.dec_conv5 = decoder_layer(1024,256)
|
117 |
+
self.dec_conv6 = decoder_layer(512,128)
|
118 |
+
self.dec_conv7 = decoder_layer(256,64)
|
119 |
+
self.dec_conv8 = decoder_layer(128,3,last_layer=True)
|
120 |
+
|
121 |
+
def forward(self,input_x):
|
122 |
+
# Encoder
|
123 |
+
output_enc_conv1 = self.enc_conv1(input_x)
|
124 |
+
output_enc_conv2 = self.enc_conv2(output_enc_conv1)
|
125 |
+
output_enc_conv3 = self.enc_conv3(output_enc_conv2)
|
126 |
+
output_enc_conv4 = self.enc_conv4(output_enc_conv3)
|
127 |
+
output_enc_conv5 = self.enc_conv5(output_enc_conv4)
|
128 |
+
output_enc_conv6 = self.enc_conv6(output_enc_conv5)
|
129 |
+
output_enc_conv7 = self.enc_conv7(output_enc_conv6)
|
130 |
+
output_enc_conv8 = self.enc_conv8(output_enc_conv7)
|
131 |
+
|
132 |
+
|
133 |
+
# Decoder
|
134 |
+
output_dec_conv1 = self.dec_conv1(output_enc_conv8)
|
135 |
+
output_dec_conv1 = torch.cat([output_dec_conv1,output_enc_conv7],dim = 1)
|
136 |
+
|
137 |
+
output_dec_conv2 = self.dec_conv2(output_dec_conv1)
|
138 |
+
output_dec_conv2 = torch.cat([output_dec_conv2,output_enc_conv6],dim = 1)
|
139 |
+
|
140 |
+
output_dec_conv3 = self.dec_conv3(output_dec_conv2)
|
141 |
+
output_dec_conv3 = torch.cat([output_dec_conv3,output_enc_conv5],dim = 1)
|
142 |
+
|
143 |
+
output_dec_conv4 = self.dec_conv4(output_dec_conv3)
|
144 |
+
output_dec_conv4 = torch.cat([output_dec_conv4,output_enc_conv4],dim = 1)
|
145 |
+
|
146 |
+
output_dec_conv5 = self.dec_conv5(output_dec_conv4)
|
147 |
+
output_dec_conv5 = torch.cat([output_dec_conv5,output_enc_conv3],dim = 1)
|
148 |
+
|
149 |
+
output_dec_conv6 = self.dec_conv6(output_dec_conv5)
|
150 |
+
output_dec_conv6 = torch.cat([output_dec_conv6,output_enc_conv2],dim = 1)
|
151 |
+
|
152 |
+
output_dec_conv7 = self.dec_conv7(output_dec_conv6)
|
153 |
+
output_dec_conv7 = torch.cat([output_dec_conv7,output_enc_conv1],dim = 1)
|
154 |
+
|
155 |
+
output_dec_conv8 = self.dec_conv8(output_dec_conv7)
|
156 |
+
|
157 |
+
return output_dec_conv8
|
158 |
+
|
159 |
+
|
160 |
+
|
161 |
+
class DiscrimiterNet(torch.nn.Module):
|
162 |
+
def __init__(self,wgan_loss):
|
163 |
+
super(DiscrimiterNet, self).__init__()
|
164 |
+
self.wgan_loss = wgan_loss
|
165 |
+
|
166 |
+
self.conv1 = discrimiter_layer(3,64,self.wgan_loss)
|
167 |
+
self.conv2 = discrimiter_layer(64,128,self.wgan_loss)
|
168 |
+
self.conv3 = discrimiter_layer(128,256,self.wgan_loss)
|
169 |
+
self.conv4 = discrimiter_layer(256,512,self.wgan_loss)
|
170 |
+
self.conv5 = discrimiter_layer(512,1,kernel_size=1,stride=1)
|
171 |
+
def forward(self,x):
|
172 |
+
x = self.conv1(x)
|
173 |
+
x = self.conv2(x)
|
174 |
+
x = self.conv3(x)
|
175 |
+
x = self.conv4(x)
|
176 |
+
x = self.conv5(x)
|
177 |
+
|
178 |
+
return x
|
179 |
+
|
180 |
+
|
181 |
+
def tensor2img(one_tensor):# [b,c,h,w] [-1,1]
|
182 |
+
tensor = one_tensor.squeeze(0) #[c,h,w] [0,1]
|
183 |
+
tensor = (tensor*0.5 + 0.5)*255 # [c,h,w] [0,255]
|
184 |
+
tensor_cpu = tensor.cpu()
|
185 |
+
img = np.array(tensor_cpu,dtype=np.uint8)
|
186 |
+
img = np.transpose(img,(1,2,0))
|
187 |
+
return img
|
188 |
+
def img2tensor(np_img):# [h,w,c]
|
189 |
+
tensor = get_transforms()(np_img).cuda() # [c,h,w] [-1,1]
|
190 |
+
tensor = tensor.unsqueeze(0) # [b,c,h,w] [-1,1]
|
191 |
+
return tensor
|
192 |
+
|
193 |
+
|
194 |
+
def weights_init(module):
|
195 |
+
classname = module.__class__.__name__
|
196 |
+
if classname.find('Conv') !=-1:
|
197 |
+
nn.init.normal_(module.weight.data,0.0,0.02)
|
198 |
+
|
199 |
+
def loss_gradient_difference(real_image,generated): # b x c x h x w
|
200 |
+
true_x_shifted_right = real_image[:,:,1:,:]# 32 x 3 x 255 x 256
|
201 |
+
true_x_shifted_left = real_image[:,:,:-1,:]
|
202 |
+
true_x_gradient = torch.abs(true_x_shifted_left - true_x_shifted_right)
|
203 |
+
|
204 |
+
generated_x_shift_right = generated[:,:,1:,:]# 32 x 3 x 255 x 256
|
205 |
+
generated_x_shift_left = generated[:,:,:-1,:]
|
206 |
+
generated_x_griednt = torch.abs(generated_x_shift_left - generated_x_shift_right)
|
207 |
+
|
208 |
+
difference_x = true_x_gradient - generated_x_griednt
|
209 |
+
|
210 |
+
loss_x_gradient = (torch.sum(difference_x**2))/2 # tf.nn.l2_loss(true_x_gradient - generated_x_gradient)
|
211 |
+
|
212 |
+
true_y_shifted_right = real_image[:,:,:,1:]
|
213 |
+
true_y_shifted_left = real_image[:,:,:,:-1]
|
214 |
+
true_y_gradient = torch.abs(true_y_shifted_left - true_y_shifted_right)
|
215 |
+
|
216 |
+
generated_y_shift_right = generated[:,:,:,1:]
|
217 |
+
generated_y_shift_left = generated[:,:,:,:-1]
|
218 |
+
generated_y_griednt = torch.abs(generated_y_shift_left - generated_y_shift_right)
|
219 |
+
|
220 |
+
difference_y = true_y_gradient - generated_y_griednt
|
221 |
+
loss_y_gradient = (torch.sum(difference_y**2))/2 # tf.nn.l2_loss(true_y_gradient - generated_y_gradient)
|
222 |
+
|
223 |
+
igdl = loss_x_gradient + loss_y_gradient
|
224 |
+
return igdl
|
225 |
+
|
226 |
+
|
227 |
+
def calculate_x_gradient(images):
|
228 |
+
x_gradient_filter = torch.Tensor(
|
229 |
+
[
|
230 |
+
[[0, 0, 0], [-1, 0, 1], [0, 0, 0]],
|
231 |
+
[[0, 0, 0], [-1, 0, 1], [0, 0, 0]],
|
232 |
+
[[0, 0, 0], [-1, 0, 1], [0, 0, 0]],
|
233 |
+
]
|
234 |
+
).cuda()
|
235 |
+
x_gradient_filter = x_gradient_filter.view(3, 1, 3, 3)
|
236 |
+
result = torch.functional.F.conv2d(
|
237 |
+
images, x_gradient_filter, groups=3, padding=(1, 1)
|
238 |
+
)
|
239 |
+
return result
|
240 |
+
|
241 |
+
|
242 |
+
|
243 |
+
|
244 |
+
def calculate_y_gradient(images):
|
245 |
+
y_gradient_filter = torch.Tensor(
|
246 |
+
[
|
247 |
+
[[0, 1, 0], [0, 0, 0], [0, -1, 0]],
|
248 |
+
[[0, 1, 0], [0, 0, 0], [0, -1, 0]],
|
249 |
+
[[0, 1, 0], [0, 0, 0], [0, -1, 0]],
|
250 |
+
]
|
251 |
+
).cuda()
|
252 |
+
y_gradient_filter = y_gradient_filter.view(3, 1, 3, 3)
|
253 |
+
result = torch.functional.F.conv2d(
|
254 |
+
images, y_gradient_filter, groups=3, padding=(1, 1)
|
255 |
+
)
|
256 |
+
return result
|
257 |
+
|
258 |
+
def loss_igdl( correct_images, generated_images): # taken from https://github.com/Arquestro/ugan-pytorch/blob/master/ops/loss_modules.py
|
259 |
+
correct_images_gradient_x = calculate_x_gradient(correct_images)
|
260 |
+
generated_images_gradient_x = calculate_x_gradient(generated_images)
|
261 |
+
correct_images_gradient_y = calculate_y_gradient(correct_images)
|
262 |
+
generated_images_gradient_y = calculate_y_gradient(generated_images)
|
263 |
+
pairwise_p_distance = torch.nn.PairwiseDistance(p=1)
|
264 |
+
distances_x_gradient = pairwise_p_distance(
|
265 |
+
correct_images_gradient_x, generated_images_gradient_x
|
266 |
+
)
|
267 |
+
distances_y_gradient = pairwise_p_distance(
|
268 |
+
correct_images_gradient_y, generated_images_gradient_y
|
269 |
+
)
|
270 |
+
loss_x_gradient = torch.mean(distances_x_gradient)
|
271 |
+
loss_y_gradient = torch.mean(distances_y_gradient)
|
272 |
+
loss = 0.5 * (loss_x_gradient + loss_y_gradient)
|
273 |
+
return loss
|
274 |
+
|
275 |
+
|
276 |
+
def ToTensor(image):
|
277 |
+
"""Convert ndarrays in sample to Tensors."""
|
278 |
+
# numpy image: H x W x C
|
279 |
+
# torch image: C X H X W
|
280 |
+
image = image.transpose((2, 0, 1))
|
281 |
+
# Normalize image from [0, 255] to [0, 1]
|
282 |
+
image = 1 / 255.0 * image
|
283 |
+
return torch.from_numpy(image).type(dtype=torch.float)
|
284 |
+
|
285 |
+
def train():
|
286 |
+
accelerator = Accelerator()
|
287 |
+
device = accelerator.device
|
288 |
+
print(device)
|
289 |
+
|
290 |
+
# Define the argparse arguments as variables
|
291 |
+
trainA_path = '/kaggle/input/underwater-img/underwater_imagenet/trainA'
|
292 |
+
trainB_path = '/kaggle/input/underwater-img/underwater_imagenet/trainB'
|
293 |
+
use_wgan = True
|
294 |
+
lr = 1e-4
|
295 |
+
max_epoch = 350
|
296 |
+
bz = 32
|
297 |
+
lbda1 = 100
|
298 |
+
lbda2 = 1
|
299 |
+
num_workers = 4
|
300 |
+
checkpoints_root = 'checkpoints'
|
301 |
+
log_root = './log'
|
302 |
+
|
303 |
+
if __name__ == "__main__":
|
304 |
+
wgan = use_wgan
|
305 |
+
learning_rate = lr
|
306 |
+
max_epochs = max_epoch
|
307 |
+
batch_size = bz
|
308 |
+
lambda_1 = lbda1
|
309 |
+
lambda_2 = lbda2
|
310 |
+
|
311 |
+
netG = torch.nn.DataParallel(GeneratorNet()).to(device)
|
312 |
+
netD = torch.nn.DataParallel(DiscrimiterNet(wgan_loss=wgan)).to(device)
|
313 |
+
|
314 |
+
optimizer_g = optim.Adam(netG.parameters(), lr=learning_rate)
|
315 |
+
optimizer_d = optim.Adam(netD.parameters(), lr=learning_rate)
|
316 |
+
|
317 |
+
dataset = MYDataSet(src_data_path=trainA_path, dst_data_path=trainB_path)
|
318 |
+
datasetloader = DataLoader(dataset, batch_size=batch_size, shuffle=False, num_workers=num_workers)
|
319 |
+
|
320 |
+
log_root = log_root
|
321 |
+
date = datetime.datetime.now().strftime('%F_%T').replace(':', '_')
|
322 |
+
log_folder = date
|
323 |
+
log_dir = os.path.join(log_root, log_folder)
|
324 |
+
if not os.path.exists(log_dir):
|
325 |
+
os.makedirs(log_dir)
|
326 |
+
writer = SummaryWriter(log_dir=log_dir)
|
327 |
+
|
328 |
+
checkpoint_root = checkpoints_root
|
329 |
+
checkpoint_folder = date
|
330 |
+
checkpoint_dir = os.path.join(checkpoint_root, checkpoint_folder)
|
331 |
+
if not os.path.exists(checkpoint_dir):
|
332 |
+
os.makedirs(checkpoint_dir)
|
333 |
+
|
334 |
+
netG, netD, optimizer_g, optimizer_d, datasetloader = accelerator.prepare(netG, netD, optimizer_g, optimizer_d, datasetloader)
|
335 |
+
|
336 |
+
for epoch in range(0, max_epochs):
|
337 |
+
print("epoch :",epoch+1)
|
338 |
+
d_loss_log_list = []
|
339 |
+
g_loss_log_list = []
|
340 |
+
for iteration, data in enumerate(tqdm(datasetloader)):
|
341 |
+
batchtensor_A = data[0].to(device)
|
342 |
+
batchtensor_B = data[1].to(device)
|
343 |
+
generated_batchtensor = netG(batchtensor_A)
|
344 |
+
|
345 |
+
num_critic = 1 if not wgan else 5
|
346 |
+
for i in range(num_critic):
|
347 |
+
optimizer_d.zero_grad()
|
348 |
+
d_fake = netD(generated_batchtensor.detach())
|
349 |
+
d_real = netD(batchtensor_B)
|
350 |
+
|
351 |
+
d_loss = torch.mean(d_fake) - torch.mean(d_real)
|
352 |
+
if wgan:
|
353 |
+
lambda_gp = 10
|
354 |
+
epsilon = torch.rand(batchtensor_B.size()[0], 1, 1, 1).to(device)
|
355 |
+
x_hat = batchtensor_B * epsilon + (1 - epsilon) * generated_batchtensor
|
356 |
+
d_hat = netD(x_hat)
|
357 |
+
gradients = torch.autograd.grad(outputs=d_hat, inputs=x_hat, grad_outputs=torch.ones_like(d_hat), create_graph=True, retain_graph=True)[0]
|
358 |
+
gradients = gradients.view(gradients.size(0), -1)
|
359 |
+
gradient_penalty = lambda_gp * ((gradients.norm(2, dim=1) - 1) ** 2).mean()
|
360 |
+
d_loss += gradient_penalty
|
361 |
+
|
362 |
+
accelerator.backward(d_loss,retain_graph=True)
|
363 |
+
optimizer_d.step()
|
364 |
+
d_loss_log = d_loss.item()
|
365 |
+
d_loss_log_list.append(d_loss_log)
|
366 |
+
|
367 |
+
optimizer_g.zero_grad()
|
368 |
+
d_fake = netD(generated_batchtensor)
|
369 |
+
|
370 |
+
g_loss = -torch.mean(d_fake)
|
371 |
+
l1_loss = torch.mean(torch.abs(generated_batchtensor - batchtensor_B))
|
372 |
+
igdl_loss = loss_igdl(batchtensor_B, generated_batchtensor)
|
373 |
+
g_loss += lambda_1 * l1_loss + lambda_2 * igdl_loss
|
374 |
+
|
375 |
+
accelerator.backward(g_loss)
|
376 |
+
optimizer_g.step()
|
377 |
+
|
378 |
+
g_loss_log = g_loss.item()
|
379 |
+
g_loss_log_list.append(g_loss_log)
|
380 |
+
|
381 |
+
writer.add_scalar('G_loss', g_loss_log, (epoch * len(datasetloader) + iteration))
|
382 |
+
writer.add_scalar('D_loss', d_loss_log, (epoch * len(datasetloader) + iteration))
|
383 |
+
|
384 |
+
d_loss_average_log = np.array(d_loss_log_list).mean()
|
385 |
+
g_loss_average_log = np.array(g_loss_log_list).mean()
|
386 |
+
|
387 |
+
writer.add_scalar('D_loss_epoch', d_loss_average_log, epoch)
|
388 |
+
writer.add_scalar('G_loss_epoch', g_loss_average_log, epoch)
|
389 |
+
print('D_loss_epoch',d_loss_average_log)
|
390 |
+
print('g_loss_epoch',g_loss_average_log)
|
391 |
+
if (epoch + 1) % 50 == 0:
|
392 |
+
torch.save({
|
393 |
+
'epoch': epoch,
|
394 |
+
'netG_state_dict': netG.state_dict(),
|
395 |
+
'netD_state_dict': netD.state_dict(),
|
396 |
+
'optimizer_g_state_dict': optimizer_g.state_dict(),
|
397 |
+
'optimizer_d_state_dict': optimizer_d.state_dict(),
|
398 |
+
'g_loss': g_loss_log_list[-1],
|
399 |
+
'd_loss': d_loss_log_list[-1],
|
400 |
+
}, f"checkpoint_epoch_{epoch + 1}.pth")
|
401 |
+
|
402 |
+
writer.close()
|
403 |
+
|
404 |
+
|
405 |
+
if __name__ == "__main__":
|
406 |
+
notebook_launcher(train, num_processes=1)
|
407 |
+
|
408 |
+
|
409 |
+
|
410 |
+
|
web.py
ADDED
@@ -0,0 +1,85 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
import cv2
|
3 |
+
import numpy as np
|
4 |
+
from PIL import Image
|
5 |
+
from ugan import GeneratorNet
|
6 |
+
import torch
|
7 |
+
from ugan import img2tensor
|
8 |
+
from ugan import tensor2img
|
9 |
+
from PIL import Image, ImageOps
|
10 |
+
from torchvision import transforms
|
11 |
+
import io
|
12 |
+
from detection_infer import detect
|
13 |
+
|
14 |
+
model_path ='checkpoint_epoch_350.pth'
|
15 |
+
checkpoint = torch.load(model_path)
|
16 |
+
|
17 |
+
|
18 |
+
# Assuming 'model' is your model instance
|
19 |
+
netG = GeneratorNet().cuda()
|
20 |
+
model_dict = netG.state_dict()
|
21 |
+
new_state_dict = {}
|
22 |
+
for k, v in checkpoint['netG_state_dict'].items():
|
23 |
+
if k in model_dict:
|
24 |
+
new_state_dict[k] = v
|
25 |
+
|
26 |
+
|
27 |
+
for k, v in checkpoint['netG_state_dict'].items():
|
28 |
+
name = k[7:] # remove 'module.' prefix
|
29 |
+
if name in model_dict:
|
30 |
+
new_state_dict[name] = v
|
31 |
+
# Load the newly created state dict
|
32 |
+
|
33 |
+
new_model = GeneratorNet().cuda()
|
34 |
+
for name, param in new_model.named_parameters():
|
35 |
+
if name in model_dict:
|
36 |
+
model_dict[name].copy_(param)
|
37 |
+
|
38 |
+
|
39 |
+
|
40 |
+
new_model.load_state_dict(new_state_dict)
|
41 |
+
|
42 |
+
|
43 |
+
|
44 |
+
# Main Streamlit app
|
45 |
+
def main():
|
46 |
+
st.title('under water Image enhancement and trash detection App')
|
47 |
+
st.write('Upload an image and we will process it!')
|
48 |
+
|
49 |
+
# Upload image
|
50 |
+
uploaded_image = st.file_uploader('Upload an image', type=['jpg', 'jpeg', 'png'])
|
51 |
+
|
52 |
+
if uploaded_image is not None:
|
53 |
+
with torch.no_grad():
|
54 |
+
# Display the uploaded image
|
55 |
+
st.image(uploaded_image, caption='Uploaded Image', use_column_width=True)
|
56 |
+
img = Image.open(uploaded_image)
|
57 |
+
img = ImageOps.exif_transpose(img) # Corrects orientation
|
58 |
+
img = img.resize((512, 512), resample=Image.BILINEAR)
|
59 |
+
img_tensor = img2tensor(img)
|
60 |
+
output_tensor = new_model.forward(img_tensor)
|
61 |
+
output_img = tensor2img(output_tensor)
|
62 |
+
cv2.imwrite('output.jpg', output_img)
|
63 |
+
processed_image = Image.open('output.jpg')
|
64 |
+
st.image(processed_image, caption='Processed Image', use_column_width=True)
|
65 |
+
st.text("Running the model for trash detection")
|
66 |
+
output_image, class_names = detect(processed_image)
|
67 |
+
# Display the output
|
68 |
+
st.text("Output Image:")
|
69 |
+
# Display "Output Image"
|
70 |
+
st.image(output_image)
|
71 |
+
if len(class_names)==0:
|
72 |
+
st.success("The water is clear!!!")
|
73 |
+
else:
|
74 |
+
st.error(f"Waste Detected!!!\nThe image has {class_names}")
|
75 |
+
|
76 |
+
|
77 |
+
if __name__ == '__main__':
|
78 |
+
main()
|
79 |
+
|
80 |
+
|
81 |
+
|
82 |
+
|
83 |
+
|
84 |
+
|
85 |
+
|
yolo-training.ipynb
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"metadata":{"kernelspec":{"language":"python","display_name":"Python 3","name":"python3"},"language_info":{"name":"python","version":"3.10.13","mimetype":"text/x-python","codemirror_mode":{"name":"ipython","version":3},"pygments_lexer":"ipython3","nbconvert_exporter":"python","file_extension":".py"},"kaggle":{"accelerator":"gpu","dataSources":[{"sourceId":7799590,"sourceType":"datasetVersion","datasetId":4566742}],"dockerImageVersionId":30665,"isInternetEnabled":true,"language":"python","sourceType":"notebook","isGpuEnabled":true}},"nbformat_minor":4,"nbformat":4,"cells":[{"cell_type":"code","source":"!git clone https://github.com/SkalskiP/yolov9.git","metadata":{"_uuid":"8f2839f25d086af736a60e9eeb907d3b93b6e0e5","_cell_guid":"b1076dfc-b9ad-4769-8c92-a6c4dae69d19","execution":{"iopub.status.busy":"2024-03-09T11:13:00.318326Z","iopub.execute_input":"2024-03-09T11:13:00.319033Z","iopub.status.idle":"2024-03-09T11:13:02.115708Z","shell.execute_reply.started":"2024-03-09T11:13:00.318993Z","shell.execute_reply":"2024-03-09T11:13:02.114647Z"},"trusted":true},"execution_count":1,"outputs":[{"name":"stdout","text":"Cloning into 'yolov9'...\nremote: Enumerating objects: 325, done.\u001b[K\nremote: Counting objects: 100% (172/172), done.\u001b[K\nremote: Compressing objects: 100% (70/70), done.\u001b[K\nremote: Total 325 (delta 134), reused 102 (delta 102), pack-reused 153\u001b[K\nReceiving objects: 100% (325/325), 2.26 MiB | 49.32 MiB/s, done.\nResolving deltas: 100% (159/159), done.\n","output_type":"stream"}]},{"cell_type":"code","source":"dataDir = '/kaggle/input/underwater-trash-detection-yolov9/'\nworkingDir = '/kaggle/working/' ","metadata":{"execution":{"iopub.status.busy":"2024-03-09T11:13:02.117642Z","iopub.execute_input":"2024-03-09T11:13:02.117955Z","iopub.status.idle":"2024-03-09T11:13:02.122142Z","shell.execute_reply.started":"2024-03-09T11:13:02.117926Z","shell.execute_reply":"2024-03-09T11:13:02.121308Z"},"trusted":true},"execution_count":2,"outputs":[]},{"cell_type":"code","source":"!wget https://github.com/WongKinYiu/yolov9/releases/download/v0.1/yolov9-e.pt","metadata":{"execution":{"iopub.status.busy":"2024-03-09T11:13:02.123274Z","iopub.execute_input":"2024-03-09T11:13:02.123516Z","iopub.status.idle":"2024-03-09T11:13:04.227215Z","shell.execute_reply.started":"2024-03-09T11:13:02.123488Z","shell.execute_reply":"2024-03-09T11:13:04.226195Z"},"trusted":true},"execution_count":3,"outputs":[{"name":"stdout","text":"--2024-03-09 11:13:02-- https://github.com/WongKinYiu/yolov9/releases/download/v0.1/yolov9-e.pt\nResolving github.com (github.com)... 20.248.137.48\nConnecting to github.com (github.com)|20.248.137.48|:443... connected.\nHTTP request sent, awaiting response... 302 Found\nLocation: https://objects.githubusercontent.com/github-production-release-asset-2e65be/759338070/1380cea0-94b4-4d8b-adab-773e081eacee?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=AKIAVCODYLSA53PQK4ZA%2F20240309%2Fus-east-1%2Fs3%2Faws4_request&X-Amz-Date=20240309T111303Z&X-Amz-Expires=300&X-Amz-Signature=816cefb1f0ded7c873912fd7471d7399b103f5ec730f7208d09c41fbf0decaf5&X-Amz-SignedHeaders=host&actor_id=0&key_id=0&repo_id=759338070&response-content-disposition=attachment%3B%20filename%3Dyolov9-e.pt&response-content-type=application%2Foctet-stream [following]\n--2024-03-09 11:13:03-- https://objects.githubusercontent.com/github-production-release-asset-2e65be/759338070/1380cea0-94b4-4d8b-adab-773e081eacee?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=AKIAVCODYLSA53PQK4ZA%2F20240309%2Fus-east-1%2Fs3%2Faws4_request&X-Amz-Date=20240309T111303Z&X-Amz-Expires=300&X-Amz-Signature=816cefb1f0ded7c873912fd7471d7399b103f5ec730f7208d09c41fbf0decaf5&X-Amz-SignedHeaders=host&actor_id=0&key_id=0&repo_id=759338070&response-content-disposition=attachment%3B%20filename%3Dyolov9-e.pt&response-content-type=application%2Foctet-stream\nResolving objects.githubusercontent.com (objects.githubusercontent.com)... 185.199.108.133, 185.199.109.133, 185.199.110.133, ...\nConnecting to objects.githubusercontent.com (objects.githubusercontent.com)|185.199.108.133|:443... connected.\nHTTP request sent, awaiting response... 200 OK\nLength: 140217688 (134M) [application/octet-stream]\nSaving to: 'yolov9-e.pt'\n\nyolov9-e.pt 100%[===================>] 133.72M 314MB/s in 0.4s \n\n2024-03-09 11:13:04 (314 MB/s) - 'yolov9-e.pt' saved [140217688/140217688]\n\n","output_type":"stream"}]},{"cell_type":"code","source":"cd yolov9","metadata":{"execution":{"iopub.status.busy":"2024-03-09T11:13:04.228676Z","iopub.execute_input":"2024-03-09T11:13:04.229002Z","iopub.status.idle":"2024-03-09T11:13:04.235746Z","shell.execute_reply.started":"2024-03-09T11:13:04.228972Z","shell.execute_reply":"2024-03-09T11:13:04.234875Z"},"trusted":true},"execution_count":4,"outputs":[{"name":"stdout","text":"/kaggle/working/yolov9\n","output_type":"stream"}]},{"cell_type":"code","source":"!pip install -r requirements.txt -q","metadata":{"execution":{"iopub.status.busy":"2024-03-09T11:13:04.238114Z","iopub.execute_input":"2024-03-09T11:13:04.238383Z","iopub.status.idle":"2024-03-09T11:13:18.508026Z","shell.execute_reply.started":"2024-03-09T11:13:04.238359Z","shell.execute_reply":"2024-03-09T11:13:18.506755Z"},"trusted":true},"execution_count":5,"outputs":[]},{"cell_type":"code","source":"import shutil\n\n# Source directory\nsrc_dir = \"/kaggle/input/underwater-trash-detection-yolov9/valid\"\n\n# Destination directory\ndest_dir = \"/kaggle/working/yolov9/valid\"\n\n# Copy all files from source to destination\nshutil.copytree(src_dir, dest_dir)\n","metadata":{"execution":{"iopub.status.busy":"2024-03-09T11:17:48.981515Z","iopub.execute_input":"2024-03-09T11:17:48.982181Z","iopub.status.idle":"2024-03-09T11:17:59.795664Z","shell.execute_reply.started":"2024-03-09T11:17:48.982150Z","shell.execute_reply":"2024-03-09T11:17:59.794729Z"},"trusted":true},"execution_count":8,"outputs":[{"execution_count":8,"output_type":"execute_result","data":{"text/plain":"'/kaggle/working/yolov9/valid'"},"metadata":{}}]},{"cell_type":"code","source":"import shutil\n\n# Source directory\nsrc_dir = \"/kaggle/input/underwater-trash-detection-yolov9/train\"\n\n# Destination directory\ndest_dir = \"/kaggle/working/yolov9/train\"\n\n# Copy all files from source to destination\nshutil.copytree(src_dir, dest_dir)","metadata":{"execution":{"iopub.status.busy":"2024-03-09T11:20:35.506459Z","iopub.execute_input":"2024-03-09T11:20:35.506920Z","iopub.status.idle":"2024-03-09T11:21:20.729740Z","shell.execute_reply.started":"2024-03-09T11:20:35.506882Z","shell.execute_reply":"2024-03-09T11:21:20.728878Z"},"trusted":true},"execution_count":10,"outputs":[{"execution_count":10,"output_type":"execute_result","data":{"text/plain":"'/kaggle/working/yolov9/train'"},"metadata":{}}]},{"cell_type":"code","source":"import shutil\n\n# Source directory\nsrc_dir = \"/kaggle/input/underwater-trash-detection-yolov9/test\"\n\n# Destination directory\ndest_dir = \"/kaggle/working/yolov9/test\"\n\n# Copy all files from source to destination\nshutil.copytree(src_dir, dest_dir)","metadata":{"execution":{"iopub.status.busy":"2024-03-09T11:21:37.676948Z","iopub.execute_input":"2024-03-09T11:21:37.677658Z","iopub.status.idle":"2024-03-09T11:21:42.021755Z","shell.execute_reply.started":"2024-03-09T11:21:37.677625Z","shell.execute_reply":"2024-03-09T11:21:42.020848Z"},"trusted":true},"execution_count":11,"outputs":[{"execution_count":11,"output_type":"execute_result","data":{"text/plain":"'/kaggle/working/yolov9/test'"},"metadata":{}}]},{"cell_type":"code","source":"!python train_dual.py --workers 8 --batch 4 --img 256 --epochs 100 --data /kaggle/input/underwater-trash-detection-yolov9/data.yaml --weights /kaggle/working/yolov9-e.pt --device 0 --cfg /kaggle/working/yolov9/models/detect/yolov9.yaml --hyp /kaggle/working/yolov9/data/hyps/hyp.scratch-high.yaml\n","metadata":{"execution":{"iopub.status.busy":"2024-03-09T11:21:51.866687Z","iopub.execute_input":"2024-03-09T11:21:51.867377Z","iopub.status.idle":"2024-03-09T17:59:41.560031Z","shell.execute_reply.started":"2024-03-09T11:21:51.867341Z","shell.execute_reply":"2024-03-09T17:59:41.558985Z"},"trusted":true},"execution_count":12,"outputs":[{"name":"stdout","text":"2024-03-09 11:21:58.805623: E external/local_xla/xla/stream_executor/cuda/cuda_dnn.cc:9261] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered\n2024-03-09 11:21:58.805689: E external/local_xla/xla/stream_executor/cuda/cuda_fft.cc:607] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered\n2024-03-09 11:21:58.807310: E external/local_xla/xla/stream_executor/cuda/cuda_blas.cc:1515] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\n\u001b[34m\u001b[1mwandb\u001b[0m: (1) Create a W&B account\n\u001b[34m\u001b[1mwandb\u001b[0m: (2) Use an existing W&B account\n\u001b[34m\u001b[1mwandb\u001b[0m: (3) Don't visualize my results\n\u001b[34m\u001b[1mwandb\u001b[0m: Enter your choice: (30 second timeout) \n\u001b[34m\u001b[1mwandb\u001b[0m: W&B disabled due to login timeout.\n\u001b[34m\u001b[1mtrain_dual: \u001b[0mweights=/kaggle/working/yolov9-e.pt, cfg=/kaggle/working/yolov9/models/detect/yolov9.yaml, data=/kaggle/input/underwater-trash-detection-yolov9/data.yaml, hyp=/kaggle/working/yolov9/data/hyps/hyp.scratch-high.yaml, epochs=100, batch_size=4, imgsz=256, rect=False, resume=False, nosave=False, noval=False, noautoanchor=False, noplots=False, evolve=None, bucket=, cache=None, image_weights=False, device=0, multi_scale=False, single_cls=False, optimizer=SGD, sync_bn=False, workers=8, project=runs/train, name=exp, exist_ok=False, quad=False, cos_lr=False, flat_cos_lr=False, fixed_lr=False, label_smoothing=0.0, patience=100, freeze=[0], save_period=-1, seed=0, local_rank=-1, min_items=0, close_mosaic=0, entity=None, upload_dataset=False, bbox_interval=-1, artifact_alias=latest\nYOLOv5 π 1e33dbb Python-3.10.13 torch-2.1.2 CUDA:0 (Tesla P100-PCIE-16GB, 16276MiB)\n\n\u001b[34m\u001b[1mhyperparameters: \u001b[0mlr0=0.01, lrf=0.01, momentum=0.937, weight_decay=0.0005, warmup_epochs=3.0, warmup_momentum=0.8, warmup_bias_lr=0.1, box=7.5, cls=0.5, cls_pw=1.0, dfl=1.5, obj_pw=1.0, iou_t=0.2, anchor_t=5.0, fl_gamma=0.0, hsv_h=0.015, hsv_s=0.7, hsv_v=0.4, degrees=0.0, translate=0.1, scale=0.9, shear=0.0, perspective=0.0, flipud=0.0, fliplr=0.5, mosaic=1.0, mixup=0.15, copy_paste=0.3\n\u001b[34m\u001b[1mClearML: \u001b[0mrun 'pip install clearml' to automatically track, visualize and remotely train YOLO π in ClearML\n\u001b[34m\u001b[1mComet: \u001b[0mrun 'pip install comet_ml' to automatically track and visualize YOLO π runs in Comet\n\u001b[34m\u001b[1mTensorBoard: \u001b[0mStart with 'tensorboard --logdir runs/train', view at http://localhost:6006/\nOverriding model.yaml nc=80 with nc=15\n\n from n params module arguments \n 0 -1 1 0 models.common.Silence [] \n 1 -1 1 1856 models.common.Conv [3, 64, 3, 2] \n 2 -1 1 73984 models.common.Conv [64, 128, 3, 2] \n 3 -1 1 212864 models.common.RepNCSPELAN4 [128, 256, 128, 64, 1] \n 4 -1 1 590336 models.common.Conv [256, 256, 3, 2] \n 5 -1 1 847616 models.common.RepNCSPELAN4 [256, 512, 256, 128, 1] \n 6 -1 1 2360320 models.common.Conv [512, 512, 3, 2] \n 7 -1 1 2857472 models.common.RepNCSPELAN4 [512, 512, 512, 256, 1] \n 8 -1 1 2360320 models.common.Conv [512, 512, 3, 2] \n 9 -1 1 2857472 models.common.RepNCSPELAN4 [512, 512, 512, 256, 1] \n 10 -1 1 656896 models.common.SPPELAN [512, 512, 256] \n 11 -1 1 0 torch.nn.modules.upsampling.Upsample [None, 2, 'nearest'] \n 12 [-1, 7] 1 0 models.common.Concat [1] \n 13 -1 1 3119616 models.common.RepNCSPELAN4 [1024, 512, 512, 256, 1] \n 14 -1 1 0 torch.nn.modules.upsampling.Upsample [None, 2, 'nearest'] \n 15 [-1, 5] 1 0 models.common.Concat [1] \n 16 -1 1 912640 models.common.RepNCSPELAN4 [1024, 256, 256, 128, 1] \n 17 -1 1 590336 models.common.Conv [256, 256, 3, 2] \n 18 [-1, 13] 1 0 models.common.Concat [1] \n 19 -1 1 2988544 models.common.RepNCSPELAN4 [768, 512, 512, 256, 1] \n 20 -1 1 2360320 models.common.Conv [512, 512, 3, 2] \n 21 [-1, 10] 1 0 models.common.Concat [1] \n 22 -1 1 3119616 models.common.RepNCSPELAN4 [1024, 512, 512, 256, 1] \n 23 5 1 131328 models.common.CBLinear [512, [256]] \n 24 7 1 393984 models.common.CBLinear [512, [256, 512]] \n 25 9 1 656640 models.common.CBLinear [512, [256, 512, 512]] \n 26 0 1 1856 models.common.Conv [3, 64, 3, 2] \n 27 -1 1 73984 models.common.Conv [64, 128, 3, 2] \n 28 -1 1 212864 models.common.RepNCSPELAN4 [128, 256, 128, 64, 1] \n 29 -1 1 590336 models.common.Conv [256, 256, 3, 2] \n 30 [23, 24, 25, -1] 1 0 models.common.CBFuse [[0, 0, 0]] \n 31 -1 1 847616 models.common.RepNCSPELAN4 [256, 512, 256, 128, 1] \n 32 -1 1 2360320 models.common.Conv [512, 512, 3, 2] \n 33 [24, 25, -1] 1 0 models.common.CBFuse [[1, 1]] \n 34 -1 1 2857472 models.common.RepNCSPELAN4 [512, 512, 512, 256, 1] \n 35 -1 1 2360320 models.common.Conv [512, 512, 3, 2] \n 36 [25, -1] 1 0 models.common.CBFuse [[2]] \n 37 -1 1 2857472 models.common.RepNCSPELAN4 [512, 512, 512, 256, 1] \n 38[31, 34, 37, 16, 19, 22] 1 21575162 models.yolo.DualDDetect [15, [512, 512, 512, 256, 512, 512]]\nyolov9 summary: 930 layers, 60829562 parameters, 60829530 gradients, 266.3 GFLOPs\n\nTransferred 438/1412 items from /kaggle/working/yolov9-e.pt\n\u001b[34m\u001b[1mAMP: \u001b[0mchecks passed β
\n\u001b[34m\u001b[1moptimizer:\u001b[0m SGD(lr=0.01) with parameter groups 230 weight(decay=0.0), 247 weight(decay=0.0005), 245 bias\n\u001b[34m\u001b[1malbumentations: \u001b[0mBlur(p=0.01, blur_limit=(3, 7)), MedianBlur(p=0.01, blur_limit=(3, 7)), ToGray(p=0.01), CLAHE(p=0.01, clip_limit=(1, 4.0), tile_grid_size=(8, 8))\n\u001b[34m\u001b[1mtrain: \u001b[0mScanning /kaggle/working/yolov9/train/labels... 3626 images, 0 background\u001b[0m\n\u001b[34m\u001b[1mtrain: \u001b[0mNew cache created: /kaggle/working/yolov9/train/labels.cache\n\u001b[34m\u001b[1mval: \u001b[0mScanning /kaggle/working/yolov9/valid/labels... 1000 images, 0 backgrounds,\u001b[0m\n\u001b[34m\u001b[1mval: \u001b[0mNew cache created: /kaggle/working/yolov9/valid/labels.cache\nPlotting labels to runs/train/exp3/labels.jpg... \n/opt/conda/lib/python3.10/site-packages/seaborn/_oldcore.py:1119: FutureWarning: use_inf_as_na option is deprecated and will be removed in a future version. Convert inf values to NaN before operating instead.\n with pd.option_context('mode.use_inf_as_na', True):\n/opt/conda/lib/python3.10/site-packages/seaborn/_oldcore.py:1119: FutureWarning: use_inf_as_na option is deprecated and will be removed in a future version. Convert inf values to NaN before operating instead.\n with pd.option_context('mode.use_inf_as_na', True):\n/opt/conda/lib/python3.10/site-packages/seaborn/_oldcore.py:1119: FutureWarning: use_inf_as_na option is deprecated and will be removed in a future version. Convert inf values to NaN before operating instead.\n with pd.option_context('mode.use_inf_as_na', True):\n/opt/conda/lib/python3.10/site-packages/seaborn/_oldcore.py:1119: FutureWarning: use_inf_as_na option is deprecated and will be removed in a future version. Convert inf values to NaN before operating instead.\n with pd.option_context('mode.use_inf_as_na', True):\n/opt/conda/lib/python3.10/site-packages/seaborn/_oldcore.py:1119: FutureWarning: use_inf_as_na option is deprecated and will be removed in a future version. Convert inf values to NaN before operating instead.\n with pd.option_context('mode.use_inf_as_na', True):\n/opt/conda/lib/python3.10/site-packages/seaborn/_oldcore.py:1119: FutureWarning: use_inf_as_na option is deprecated and will be removed in a future version. Convert inf values to NaN before operating instead.\n with pd.option_context('mode.use_inf_as_na', True):\n/opt/conda/lib/python3.10/site-packages/seaborn/_oldcore.py:1119: FutureWarning: use_inf_as_na option is deprecated and will be removed in a future version. Convert inf values to NaN before operating instead.\n with pd.option_context('mode.use_inf_as_na', True):\n/opt/conda/lib/python3.10/site-packages/seaborn/_oldcore.py:1119: FutureWarning: use_inf_as_na option is deprecated and will be removed in a future version. Convert inf values to NaN before operating instead.\n with pd.option_context('mode.use_inf_as_na', True):\n/opt/conda/lib/python3.10/site-packages/seaborn/_oldcore.py:1119: FutureWarning: use_inf_as_na option is deprecated and will be removed in a future version. Convert inf values to NaN before operating instead.\n with pd.option_context('mode.use_inf_as_na', True):\n/opt/conda/lib/python3.10/site-packages/seaborn/_oldcore.py:1119: FutureWarning: use_inf_as_na option is deprecated and will be removed in a future version. Convert inf values to NaN before operating instead.\n with pd.option_context('mode.use_inf_as_na', True):\n/opt/conda/lib/python3.10/site-packages/seaborn/_oldcore.py:1119: FutureWarning: use_inf_as_na option is deprecated and will be removed in a future version. Convert inf values to NaN before operating instead.\n with pd.option_context('mode.use_inf_as_na', True):\n/opt/conda/lib/python3.10/site-packages/seaborn/_oldcore.py:1119: FutureWarning: use_inf_as_na option is deprecated and will be removed in a future version. Convert inf values to NaN before operating instead.\n with pd.option_context('mode.use_inf_as_na', True):\n/opt/conda/lib/python3.10/site-packages/seaborn/_oldcore.py:1119: FutureWarning: use_inf_as_na option is deprecated and will be removed in a future version. Convert inf values to NaN before operating instead.\n with pd.option_context('mode.use_inf_as_na', True):\n/opt/conda/lib/python3.10/site-packages/seaborn/_oldcore.py:1119: FutureWarning: use_inf_as_na option is deprecated and will be removed in a future version. Convert inf values to NaN before operating instead.\n with pd.option_context('mode.use_inf_as_na', True):\n/opt/conda/lib/python3.10/site-packages/seaborn/_oldcore.py:1119: FutureWarning: use_inf_as_na option is deprecated and will be removed in a future version. Convert inf values to NaN before operating instead.\n with pd.option_context('mode.use_inf_as_na', True):\n/opt/conda/lib/python3.10/site-packages/seaborn/_oldcore.py:1119: FutureWarning: use_inf_as_na option is deprecated and will be removed in a future version. Convert inf values to NaN before operating instead.\n with pd.option_context('mode.use_inf_as_na', True):\n/opt/conda/lib/python3.10/site-packages/seaborn/_oldcore.py:1119: FutureWarning: use_inf_as_na option is deprecated and will be removed in a future version. Convert inf values to NaN before operating instead.\n with pd.option_context('mode.use_inf_as_na', True):\n/opt/conda/lib/python3.10/site-packages/seaborn/_oldcore.py:1119: FutureWarning: use_inf_as_na option is deprecated and will be removed in a future version. Convert inf values to NaN before operating instead.\n with pd.option_context('mode.use_inf_as_na', True):\n/opt/conda/lib/python3.10/site-packages/seaborn/_oldcore.py:1119: FutureWarning: use_inf_as_na option is deprecated and will be removed in a future version. Convert inf values to NaN before operating instead.\n with pd.option_context('mode.use_inf_as_na', True):\n/opt/conda/lib/python3.10/site-packages/seaborn/_oldcore.py:1119: FutureWarning: use_inf_as_na option is deprecated and will be removed in a future version. Convert inf values to NaN before operating instead.\n with pd.option_context('mode.use_inf_as_na', True):\nImage sizes 256 train, 256 val\nUsing 4 dataloader workers\nLogging results to \u001b[1mruns/train/exp3\u001b[0m\nStarting training for 100 epochs...\n\n Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n 0/99 1.73G 5.401 6.228 5.238 18 256: WARNING β οΈ TensorBoard graph visualization failure Only tensors, lists, tuples of tensors, or dictionary of tensors can be output from traced functions\n 0/99 2G 5.267 6.123 5.138 11 256: 1\n Class Images Instances P R mAP50 \n all 1000 1891 0.0701 0.00336 0.00133 0.000465\n\n Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n 1/99 2G 4.972 5.486 4.88 30 256: 1\n Class Images Instances P R mAP50 \n all 1000 1891 0.00105 0.179 0.00162 0.000347\n\n Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n 2/99 2G 3.845 4.507 3.993 2 256: 1\n Class Images Instances P R mAP50 \n all 1000 1891 0.582 0.0591 0.0301 0.0113\n\n Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n 3/99 2G 3.108 3.999 3.233 3 256: 1\n Class Images Instances P R mAP50 \n all 1000 1891 0.603 0.0789 0.0523 0.0235\n\n Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n 4/99 2G 2.847 3.793 2.895 9 256: 1\n Class Images Instances P R mAP50 \n all 1000 1891 0.679 0.0875 0.0713 0.0317\n\n Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n 5/99 2G 2.679 3.598 2.682 5 256: 1\n Class Images Instances P R mAP50 \n all 1000 1891 0.495 0.12 0.0913 0.0443\n\n Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n 6/99 2G 2.604 3.455 2.58 11 256: 1\n Class Images Instances P R mAP50 \n all 1000 1891 0.576 0.155 0.107 0.0563\n\n Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n 7/99 2G 2.531 3.357 2.506 5 256: 1\n Class Images Instances P R mAP50 \n all 1000 1891 0.525 0.128 0.115 0.0636\n\n Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n 8/99 2G 2.462 3.296 2.469 19 256: 1\n Class Images Instances P R mAP50 \n all 1000 1891 0.526 0.142 0.119 0.0644\n\n Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n 9/99 2G 2.401 3.216 2.405 5 256: 1\n Class Images Instances P R mAP50 \n all 1000 1891 0.558 0.166 0.151 0.0749\n\n Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n 10/99 2G 2.389 3.163 2.374 2 256: 1\n Class Images Instances P R mAP50 \n all 1000 1891 0.543 0.162 0.144 0.08\n\n Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n 11/99 2G 2.332 3.04 2.325 8 256: 1\n Class Images Instances P R mAP50 \n all 1000 1891 0.696 0.154 0.161 0.0884\n\n Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n 12/99 2G 2.306 2.974 2.288 7 256: 1\n Class Images Instances P R mAP50 \n all 1000 1891 0.602 0.178 0.185 0.102\n\n Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n 13/99 2G 2.291 2.967 2.308 17 256: 1\n Class Images Instances P R mAP50 \n all 1000 1891 0.574 0.195 0.184 0.108\n\n Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n 14/99 2G 2.258 2.9 2.256 17 256: 1\n Class Images Instances P R mAP50 \n all 1000 1891 0.598 0.194 0.199 0.118\n\n Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n 15/99 2G 2.251 2.861 2.26 15 256: 1\n Class Images Instances P R mAP50 \n all 1000 1891 0.537 0.214 0.204 0.122\n\n Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n 16/99 2G 2.228 2.818 2.239 5 256: 1\n Class Images Instances P R mAP50 \n all 1000 1891 0.623 0.235 0.217 0.125\n\n Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n 17/99 2G 2.215 2.775 2.233 14 256: 1\n Class Images Instances P R mAP50 \n all 1000 1891 0.558 0.231 0.216 0.126\n\n Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n 18/99 2G 2.191 2.727 2.204 10 256: 1\n Class Images Instances P R mAP50 \n all 1000 1891 0.642 0.22 0.237 0.145\n\n Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n 19/99 2G 2.155 2.695 2.173 4 256: 1\n Class Images Instances P R mAP50 \n all 1000 1891 0.7 0.233 0.256 0.145\n\n Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n 20/99 2G 2.128 2.642 2.159 5 256: 1\n Class Images Instances P R mAP50 \n all 1000 1891 0.68 0.245 0.27 0.159\n\n Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n 21/99 2G 2.128 2.626 2.157 6 256: 1\n Class Images Instances P R mAP50 \n all 1000 1891 0.707 0.246 0.27 0.159\n\n Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n 22/99 2G 2.098 2.583 2.158 23 256: 1\n Class Images Instances P R mAP50 \n all 1000 1891 0.685 0.275 0.274 0.159\n\n Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n 23/99 2G 2.118 2.545 2.13 24 256: 1\n Class Images Instances P R mAP50 \n all 1000 1891 0.709 0.266 0.285 0.171\n\n Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n 24/99 2G 2.08 2.534 2.121 5 256: 1\n Class Images Instances P R mAP50 \n all 1000 1891 0.676 0.238 0.252 0.156\n\n Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n 25/99 2G 2.067 2.495 2.11 11 256: 1\n Class Images Instances P R mAP50 \n all 1000 1891 0.61 0.27 0.278 0.171\n\n Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n 26/99 2G 2.077 2.487 2.113 5 256: 1\n Class Images Instances P R mAP50 \n all 1000 1891 0.72 0.253 0.295 0.18\n\n Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n 27/99 2G 2.039 2.435 2.08 4 256: 1\n Class Images Instances P R mAP50 \n all 1000 1891 0.643 0.283 0.306 0.181\n\n Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n 28/99 2G 2.071 2.43 2.103 13 256: 1\n Class Images Instances P R mAP50 \n all 1000 1891 0.634 0.285 0.294 0.178\n\n Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n 29/99 2G 2.043 2.408 2.087 6 256: 1\n Class Images Instances P R mAP50 \n all 1000 1891 0.537 0.292 0.31 0.187\n\n Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n 30/99 2G 2.017 2.367 2.061 13 256: 1\n Class Images Instances P R mAP50 \n all 1000 1891 0.545 0.267 0.291 0.181\n\n Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n 31/99 2G 2.029 2.351 2.066 18 256: 1\n Class Images Instances P R mAP50 \n all 1000 1891 0.738 0.29 0.338 0.202\n\n Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n 32/99 2G 2.005 2.346 2.058 28 256: 1\n Class Images Instances P R mAP50 \n all 1000 1891 0.672 0.292 0.333 0.202\n\n Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n 33/99 2G 2.003 2.349 2.05 17 256: 1\n Class Images Instances P R mAP50 \n all 1000 1891 0.386 0.353 0.324 0.192\n\n Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n 34/99 2G 2.001 2.344 2.088 21 256: 1\n Class Images Instances P R mAP50 \n all 1000 1891 0.526 0.334 0.337 0.21\n\n Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n 35/99 2G 1.994 2.269 2.034 2 256: 1\n Class Images Instances P R mAP50 \n all 1000 1891 0.723 0.282 0.312 0.199\n\n Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n 36/99 2G 1.981 2.284 2.044 19 256: 1\n Class Images Instances P R mAP50 \n all 1000 1891 0.406 0.357 0.337 0.205\n\n Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n 37/99 2G 1.951 2.254 2.023 9 256: 1\n Class Images Instances P R mAP50 \n all 1000 1891 0.59 0.325 0.335 0.21\n\n Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n 38/99 2G 1.96 2.218 2.039 5 256: 1\n Class Images Instances P R mAP50 \n all 1000 1891 0.638 0.278 0.335 0.21\n\n Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n 39/99 2G 1.952 2.186 2.022 5 256: 1\n Class Images Instances P R mAP50 \n all 1000 1891 0.576 0.335 0.369 0.234\n\n Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n 40/99 2G 1.953 2.241 2.027 10 256: 1\n Class Images Instances P R mAP50 \n all 1000 1891 0.458 0.33 0.341 0.21\n\n Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n 41/99 2G 1.951 2.181 2.015 25 256: 1\n Class Images Instances P R mAP50 \n all 1000 1891 0.464 0.342 0.351 0.222\n\n Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n 42/99 2G 1.921 2.17 1.998 12 256: 1\n Class Images Instances P R mAP50 \n all 1000 1891 0.413 0.349 0.347 0.224\n\n Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n 43/99 2G 1.915 2.14 1.997 9 256: 1\n Class Images Instances P R mAP50 \n all 1000 1891 0.494 0.311 0.343 0.214\n\n Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n 44/99 2G 1.938 2.165 2.018 10 256: 1\n Class Images Instances P R mAP50 \n all 1000 1891 0.346 0.334 0.339 0.218\n\n Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n 45/99 2G 1.888 2.102 1.987 10 256: 1\n Class Images Instances P R mAP50 \n all 1000 1891 0.578 0.326 0.383 0.235\n\n Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n 46/99 2G 1.914 2.092 1.999 17 256: 1\n Class Images Instances P R mAP50 \n all 1000 1891 0.367 0.385 0.384 0.232\n\n Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n 47/99 2G 1.935 2.13 2.003 5 256: 1\n Class Images Instances P R mAP50 \n all 1000 1891 0.416 0.392 0.382 0.241\n\n Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n 48/99 2G 1.887 2.082 1.974 20 256: 1\n Class Images Instances P R mAP50 \n all 1000 1891 0.428 0.399 0.405 0.253\n\n Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n 49/99 2G 1.881 2.072 1.979 17 256: 1\n Class Images Instances P R mAP50 \n all 1000 1891 0.417 0.34 0.353 0.227\n\n Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n 50/99 2G 1.88 2.044 1.946 3 256: 1\n Class Images Instances P R mAP50 \n all 1000 1891 0.427 0.395 0.395 0.255\n\n Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n 51/99 2G 1.883 2.021 1.971 9 256: 1\n Class Images Instances P R mAP50 \n all 1000 1891 0.406 0.427 0.398 0.253\n\n Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n 52/99 2G 1.864 2.027 1.969 7 256: 1\n Class Images Instances P R mAP50 \n all 1000 1891 0.442 0.343 0.368 0.239\n\n Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n 53/99 2G 1.857 2 1.956 7 256: 1\n Class Images Instances P R mAP50 \n all 1000 1891 0.392 0.411 0.396 0.253\n\n Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n 54/99 2G 1.845 1.985 1.963 17 256: 1\n Class Images Instances P R mAP50 \n all 1000 1891 0.45 0.388 0.415 0.269\n\n Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n 55/99 2G 1.837 1.971 1.948 14 256: 1\n Class Images Instances P R mAP50 \n all 1000 1891 0.459 0.408 0.418 0.264\n\n Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n 56/99 2G 1.866 2.004 1.978 13 256: 1\n Class Images Instances P R mAP50 \n all 1000 1891 0.394 0.402 0.41 0.264\n\n Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n 57/99 2G 1.848 1.962 1.954 14 256: 1\n Class Images Instances P R mAP50 \n all 1000 1891 0.442 0.415 0.419 0.263\n\n Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n 58/99 2G 1.824 1.953 1.933 12 256: 1\n Class Images Instances P R mAP50 \n all 1000 1891 0.502 0.386 0.425 0.27\n\n Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n 59/99 2G 1.835 1.972 1.948 25 256: 1\n Class Images Instances P R mAP50 \n all 1000 1891 0.561 0.362 0.415 0.267\n\n Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n 60/99 2G 1.81 1.935 1.946 10 256: 1\n Class Images Instances P R mAP50 \n all 1000 1891 0.507 0.407 0.425 0.273\n\n Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n 61/99 2G 1.819 1.902 1.921 9 256: 1\n Class Images Instances P R mAP50 \n all 1000 1891 0.484 0.404 0.427 0.272\n\n Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n 62/99 2G 1.819 1.926 1.935 6 256: 1\n Class Images Instances P R mAP50 \n all 1000 1891 0.549 0.388 0.431 0.27\n\n Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n 63/99 2G 1.793 1.865 1.898 4 256: 1\n Class Images Instances P R mAP50 \n all 1000 1891 0.509 0.427 0.436 0.273\n\n Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n 64/99 2G 1.781 1.875 1.911 21 256: 1\n Class Images Instances P R mAP50 \n all 1000 1891 0.482 0.435 0.433 0.275\n\n Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n 65/99 2G 1.809 1.89 1.92 14 256: 1\n Class Images Instances P R mAP50 \n all 1000 1891 0.508 0.406 0.445 0.286\n\n Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n 66/99 2G 1.791 1.837 1.9 13 256: 1\n Class Images Instances P R mAP50 \n all 1000 1891 0.464 0.423 0.443 0.284\n\n Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n 67/99 2G 1.767 1.837 1.896 4 256: 1\n Class Images Instances P R mAP50 \n all 1000 1891 0.488 0.459 0.463 0.294\n\n Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n 68/99 2G 1.78 1.822 1.91 7 256: 1\n Class Images Instances P R mAP50 \n all 1000 1891 0.498 0.444 0.436 0.281\n\n Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n 69/99 2G 1.778 1.806 1.893 14 256: 1\n Class Images Instances P R mAP50 \n all 1000 1891 0.551 0.391 0.44 0.286\n\n Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n 70/99 2G 1.766 1.775 1.889 10 256: 1\n Class Images Instances P R mAP50 \n all 1000 1891 0.531 0.439 0.46 0.294\n\n Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n 71/99 2G 1.743 1.775 1.879 7 256: 1\n Class Images Instances P R mAP50 \n all 1000 1891 0.554 0.449 0.478 0.31\n\n Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n 72/99 2G 1.736 1.771 1.875 23 256: 1\n Class Images Instances P R mAP50 \n all 1000 1891 0.551 0.422 0.469 0.298\n\n Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n 73/99 2G 1.737 1.771 1.87 2 256: 1\n Class Images Instances P R mAP50 \n all 1000 1891 0.511 0.43 0.455 0.293\n\n Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n 74/99 2G 1.746 1.765 1.876 5 256: 1\n Class Images Instances P R mAP50 \n all 1000 1891 0.507 0.422 0.456 0.296\n\n Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n 75/99 2G 1.731 1.742 1.862 15 256: 1\n Class Images Instances P R mAP50 \n all 1000 1891 0.559 0.419 0.469 0.298\n\n Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n 76/99 2G 1.731 1.741 1.873 19 256: 1\n Class Images Instances P R mAP50 \n all 1000 1891 0.533 0.43 0.477 0.304\n\n Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n 77/99 2G 1.737 1.749 1.876 15 256: 1\n Class Images Instances P R mAP50 \n all 1000 1891 0.505 0.446 0.477 0.303\n\n Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n 78/99 2G 1.716 1.719 1.865 4 256: 1\n Class Images Instances P R mAP50 \n all 1000 1891 0.505 0.407 0.462 0.302\n\n Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n 79/99 2G 1.715 1.71 1.857 16 256: 1\n Class Images Instances P R mAP50 \n all 1000 1891 0.516 0.432 0.475 0.307\n\n Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n 80/99 2G 1.732 1.727 1.866 9 256: 1\n Class Images Instances P R mAP50 \n all 1000 1891 0.507 0.443 0.472 0.308\n\n Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n 81/99 2G 1.735 1.72 1.88 35 256: 1\n Class Images Instances P R mAP50 \n all 1000 1891 0.491 0.502 0.5 0.323\n\n Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n 82/99 2G 1.71 1.683 1.857 31 256: 1\n Class Images Instances P R mAP50 \n all 1000 1891 0.528 0.49 0.505 0.329\n\n Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n 83/99 2G 1.713 1.682 1.86 13 256: 1\n Class Images Instances P R mAP50 \n all 1000 1891 0.597 0.435 0.491 0.317\n\n Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n 84/99 2G 1.7 1.673 1.847 3 256: 1\n Class Images Instances P R mAP50 \n all 1000 1891 0.55 0.485 0.505 0.323\n\n Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n 85/99 2G 1.666 1.649 1.838 6 256: 1\n Class Images Instances P R mAP50 \n all 1000 1891 0.588 0.45 0.518 0.327\n\n Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n 86/99 2G 1.696 1.674 1.858 14 256: 1\n Class Images Instances P R mAP50 \n all 1000 1891 0.586 0.449 0.514 0.331\n\n Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n 87/99 2G 1.69 1.647 1.838 17 256: 1\n Class Images Instances P R mAP50 \n all 1000 1891 0.563 0.485 0.522 0.331\n\n Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n 88/99 2G 1.682 1.646 1.84 3 256: 1\n Class Images Instances P R mAP50 \n all 1000 1891 0.632 0.469 0.518 0.331\n\n Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n 89/99 2G 1.672 1.626 1.83 12 256: 1\n Class Images Instances P R mAP50 \n all 1000 1891 0.646 0.468 0.517 0.333\n\n Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n 90/99 2G 1.683 1.648 1.847 15 256: 1\n Class Images Instances P R mAP50 \n all 1000 1891 0.558 0.492 0.524 0.338\n\n Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n 91/99 2G 1.668 1.617 1.827 12 256: 1\n Class Images Instances P R mAP50 \n all 1000 1891 0.632 0.492 0.524 0.342\n\n Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n 92/99 2G 1.655 1.594 1.819 15 256: 1\n Class Images Instances P R mAP50 \n all 1000 1891 0.597 0.486 0.532 0.34\n\n Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n 93/99 2G 1.669 1.605 1.819 11 256: 1\n Class Images Instances P R mAP50 \n all 1000 1891 0.544 0.519 0.536 0.344\n\n Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n 94/99 2G 1.678 1.617 1.825 3 256: 1\n Class Images Instances P R mAP50 \n all 1000 1891 0.571 0.526 0.54 0.344\n\n Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n 95/99 2G 1.656 1.609 1.823 5 256: 1\n Class Images Instances P R mAP50 \n all 1000 1891 0.582 0.5 0.54 0.346\n\n Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n 96/99 2G 1.646 1.573 1.814 8 256: 1\n Class Images Instances P R mAP50 \n all 1000 1891 0.63 0.466 0.541 0.345\n\n Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n 97/99 2G 1.665 1.587 1.819 6 256: 1\n Class Images Instances P R mAP50 \n all 1000 1891 0.567 0.495 0.542 0.346\n\n Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n 98/99 2G 1.647 1.572 1.813 13 256: 1\n Class Images Instances P R mAP50 \n all 1000 1891 0.557 0.501 0.545 0.348\n\n Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n 99/99 2G 1.658 1.582 1.806 1 256: 1\n Class Images Instances P R mAP50 \n all 1000 1891 0.561 0.523 0.544 0.348\n\n100 epochs completed in 6.608 hours.\nOptimizer stripped from runs/train/exp3/weights/last.pt, 122.4MB\nOptimizer stripped from runs/train/exp3/weights/best.pt, 122.4MB\n\nValidating runs/train/exp3/weights/best.pt...\nFusing layers... \nyolov9 summary: 700 layers, 60788602 parameters, 0 gradients, 265.1 GFLOPs\n Class Images Instances P R mAP50 \n all 1000 1891 0.552 0.503 0.545 0.348\n Mask 1000 90 0.896 0.567 0.75 0.561\n can 1000 20 0.375 0.25 0.335 0.108\n cellphone 1000 71 0.867 0.972 0.987 0.839\n electronics 1000 40 0.361 0.475 0.392 0.186\n gbottle 1000 82 0.432 0.639 0.606 0.364\n glove 1000 55 0.86 0.764 0.863 0.673\n metal 1000 22 0.384 0.136 0.157 0.0728\n misc 1000 51 0.327 0.275 0.301 0.192\n net 1000 148 0.682 0.703 0.689 0.421\n pbag 1000 330 0.739 0.927 0.923 0.769\n pbottle 1000 284 0.522 0.599 0.596 0.366\n plastic 1000 59 0.37 0.0847 0.141 0.0676\n rod 1000 9 0.201 0.222 0.252 0.0756\n sunglasses 1000 3 0.493 0.329 0.486 0.222\n tire 1000 627 0.765 0.601 0.69 0.306\nResults saved to \u001b[1mruns/train/exp3\u001b[0m\n","output_type":"stream"}]},{"cell_type":"code","source":"import zipfile\nimport os\nfrom IPython.display import FileLink\n\ndef zip_dir(directory = os.curdir, file_name = 'directory.zip'):\n os.chdir(directory)\n zip_ref = zipfile.ZipFile(file_name, mode='w')\n for folder, _, files in os.walk(directory):\n for file in files:\n if file_name in file:\n pass\n else:\n zip_ref.write(os.path.join(folder, file))\n\n return FileLink(file_name)","metadata":{"execution":{"iopub.status.busy":"2024-03-09T18:10:25.500691Z","iopub.execute_input":"2024-03-09T18:10:25.501588Z","iopub.status.idle":"2024-03-09T18:10:25.507719Z","shell.execute_reply.started":"2024-03-09T18:10:25.501553Z","shell.execute_reply":"2024-03-09T18:10:25.506701Z"},"trusted":true},"execution_count":15,"outputs":[]},{"cell_type":"code","source":"zip_dir()","metadata":{"execution":{"iopub.status.busy":"2024-03-09T18:10:27.620253Z","iopub.execute_input":"2024-03-09T18:10:27.620609Z","iopub.status.idle":"2024-03-09T18:10:28.395913Z","shell.execute_reply.started":"2024-03-09T18:10:27.620580Z","shell.execute_reply":"2024-03-09T18:10:28.395023Z"},"trusted":true},"execution_count":16,"outputs":[{"execution_count":16,"output_type":"execute_result","data":{"text/plain":"/kaggle/working/yolov9/runs/directory.zip","text/html":"<a href='directory.zip' target='_blank'>directory.zip</a><br>"},"metadata":{}}]},{"cell_type":"code","source":"import shutil\n\n# Directory to zip\ndir_to_zip = \"/kaggle/working/yolov9\"\n\n# Zip file name\nzip_file = \"/kaggle/working/yolov9.zip\"\n\n# Create a zip file of the directory\nshutil.make_archive(zip_file, 'zip', dir_to_zip)\n\n# Print the path to the zip file\nprint(\"Download the zip file from:\", zip_file)\n","metadata":{"execution":{"iopub.status.busy":"2024-03-09T18:12:00.225976Z","iopub.execute_input":"2024-03-09T18:12:00.226356Z","iopub.status.idle":"2024-03-09T18:12:33.355380Z","shell.execute_reply.started":"2024-03-09T18:12:00.226323Z","shell.execute_reply":"2024-03-09T18:12:33.354402Z"},"trusted":true},"execution_count":17,"outputs":[{"name":"stdout","text":"Download the zip file from: /kaggle/working/yolov9.zip\n","output_type":"stream"}]},{"cell_type":"code","source":"","metadata":{},"execution_count":null,"outputs":[]}]}
|