Spaces:
Build error
Build error
Sophie98
commited on
Commit
β’
4e64649
1
Parent(s):
a145212
Restructure code and fix error
Browse files- .gitattributes +0 -2
- .gitignore +0 -8
- README.md +3 -3
- model_checkpoint.h5 β Segmentation/model_checkpoint.h5 +0 -0
- segmentation.py β Segmentation/segmentation.py +25 -26
- StyTR.py β StyleTransfer/StyTR.py +4 -7
- ViT_helper.py β StyleTransfer/ViT_helper.py +0 -0
- function.py β StyleTransfer/function.py +0 -0
- misc.py β StyleTransfer/misc.py +0 -0
- decoder_iter_160000.pth β StyleTransfer/models/decoder_iter_160000.pth +0 -0
- embedding_iter_160000.pth β StyleTransfer/models/embedding_iter_160000.pth +0 -0
- transformer_iter_160000.pth β StyleTransfer/models/transformer_iter_160000.pth +0 -0
- vgg_normalised.pth β StyleTransfer/models/vgg_normalised.pth +0 -0
- styleTransfer.py β StyleTransfer/styleTransfer.py +6 -8
- transformer.py β StyleTransfer/transformer.py +1 -1
- app.py +16 -15
- box_ops.py +0 -88
- sofa_example1.jpg β figures/sofa_example1.jpg +0 -0
- style_example1.jpg β figures/style_example1.jpg +0 -0
- style_example2.jpg β figures/style_example2.jpg +0 -0
- style_example3.jpg β figures/style_example3.jpg +0 -0
- style_example4.jpg β figures/style_example4.jpg +0 -0
- style_example5.jpg β figures/style_example5.jpg +0 -0
- model_final.h5 +0 -3
- sofa.jpg +0 -0
- sofa_stylized_style.jpg +0 -0
- style.jpg +0 -0
.gitattributes
CHANGED
@@ -25,5 +25,3 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
25 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
26 |
*.zstandard filter=lfs diff=lfs merge=lfs -text
|
27 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
28 |
-
|
29 |
-
*.pth filter=lfs diff=lfs merge=lfs -text
|
|
|
25 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
26 |
*.zstandard filter=lfs diff=lfs merge=lfs -text
|
27 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
.gitignore
DELETED
@@ -1,8 +0,0 @@
|
|
1 |
-
__pycache__/box_ops.cpython-37.pyc
|
2 |
-
__pycache__/function.cpython-37.pyc
|
3 |
-
__pycache__/misc.cpython-37.pyc
|
4 |
-
__pycache__/segmentation.cpython-37.pyc
|
5 |
-
__pycache__/styleTransfer.cpython-37.pyc
|
6 |
-
__pycache__/StyTR.cpython-37.pyc
|
7 |
-
__pycache__/transformer.cpython-37.pyc
|
8 |
-
__pycache__/ViT_helper.cpython-37.pyc
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
README.md
CHANGED
@@ -1,8 +1,8 @@
|
|
1 |
---
|
2 |
-
title:
|
3 |
emoji: π
|
4 |
-
colorFrom:
|
5 |
-
colorTo:
|
6 |
sdk: gradio
|
7 |
sdk_version: 2.9.4
|
8 |
app_file: app.py
|
|
|
1 |
---
|
2 |
+
title: SofaStylerV2
|
3 |
emoji: π
|
4 |
+
colorFrom: pink
|
5 |
+
colorTo: yellow
|
6 |
sdk: gradio
|
7 |
sdk_version: 2.9.4
|
8 |
app_file: app.py
|
model_checkpoint.h5 β Segmentation/model_checkpoint.h5
RENAMED
File without changes
|
segmentation.py β Segmentation/segmentation.py
RENAMED
@@ -8,6 +8,30 @@ from PIL import Image
|
|
8 |
import segmentation_models as sm
|
9 |
sm.set_framework('tf.keras')
|
10 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
11 |
|
12 |
def get_mask(image:Image) -> Image:
|
13 |
"""
|
@@ -20,35 +44,10 @@ def get_mask(image:Image) -> Image:
|
|
20 |
mask = corresponding maks of the image
|
21 |
"""
|
22 |
|
23 |
-
model_path = "model_checkpoint.h5"
|
24 |
-
CLASSES = ['sofa']
|
25 |
-
BACKBONE = 'resnet50'
|
26 |
-
|
27 |
-
# define network parameters
|
28 |
-
n_classes = 1 if len(CLASSES) == 1 else (len(CLASSES) + 1) # case for binary and multiclass segmentation
|
29 |
-
activation = 'sigmoid' if n_classes == 1 else 'softmax'
|
30 |
-
preprocess_input = sm.get_preprocessing(BACKBONE)
|
31 |
-
LR=0.0001
|
32 |
-
|
33 |
-
#create model architecture
|
34 |
-
model = sm.Unet(BACKBONE, classes=n_classes, activation=activation)
|
35 |
-
# define optomizer
|
36 |
-
optim = keras.optimizers.Adam(LR)
|
37 |
-
# Segmentation models losses can be combined together by '+' and scaled by integer or float factor
|
38 |
-
dice_loss = sm.losses.DiceLoss()
|
39 |
-
focal_loss = sm.losses.BinaryFocalLoss() if n_classes == 1 else sm.losses.CategoricalFocalLoss()
|
40 |
-
total_loss = dice_loss + (1 * focal_loss)
|
41 |
-
# actulally total_loss can be imported directly from library, above example just show you how to manipulate with losses
|
42 |
-
# total_loss = sm.losses.binary_focal_dice_loss # or sm.losses.categorical_focal_dice_loss
|
43 |
-
metrics = [sm.metrics.IOUScore(threshold=0.5), sm.metrics.FScore(threshold=0.5)]
|
44 |
-
# compile keras model with defined optimozer, loss and metrics
|
45 |
-
model.compile(optim, total_loss, metrics)
|
46 |
-
|
47 |
# #load model
|
48 |
-
|
49 |
#model = keras.models.load_model('model_final.h5', compile=False)
|
50 |
print('loaded model')
|
51 |
-
return image
|
52 |
test_img = np.array(image)
|
53 |
test_img = cv2.resize(test_img, (640, 640))
|
54 |
test_img = cv2.cvtColor(test_img, cv2.COLOR_RGB2BGR)
|
|
|
8 |
import segmentation_models as sm
|
9 |
sm.set_framework('tf.keras')
|
10 |
|
11 |
+
model_path = "Segmentation/model_checkpoint.h5"
|
12 |
+
CLASSES = ['sofa']
|
13 |
+
BACKBONE = 'resnet50'
|
14 |
+
|
15 |
+
# define network parameters
|
16 |
+
n_classes = 1 if len(CLASSES) == 1 else (len(CLASSES) + 1) # case for binary and multiclass segmentation
|
17 |
+
activation = 'sigmoid' if n_classes == 1 else 'softmax'
|
18 |
+
preprocess_input = sm.get_preprocessing(BACKBONE)
|
19 |
+
LR=0.0001
|
20 |
+
|
21 |
+
#create model architecture
|
22 |
+
model = sm.Unet(BACKBONE, classes=n_classes, activation=activation)
|
23 |
+
# define optomizer
|
24 |
+
optim = keras.optimizers.Adam(LR)
|
25 |
+
# Segmentation models losses can be combined together by '+' and scaled by integer or float factor
|
26 |
+
dice_loss = sm.losses.DiceLoss()
|
27 |
+
focal_loss = sm.losses.BinaryFocalLoss() if n_classes == 1 else sm.losses.CategoricalFocalLoss()
|
28 |
+
total_loss = dice_loss + (1 * focal_loss)
|
29 |
+
# actulally total_loss can be imported directly from library, above example just show you how to manipulate with losses
|
30 |
+
# total_loss = sm.losses.binary_focal_dice_loss # or sm.losses.categorical_focal_dice_loss
|
31 |
+
metrics = [sm.metrics.IOUScore(threshold=0.5), sm.metrics.FScore(threshold=0.5)]
|
32 |
+
# compile keras model with defined optimozer, loss and metrics
|
33 |
+
model.compile(optim, total_loss, metrics)
|
34 |
+
model.load_weights(model_path)
|
35 |
|
36 |
def get_mask(image:Image) -> Image:
|
37 |
"""
|
|
|
44 |
mask = corresponding maks of the image
|
45 |
"""
|
46 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
47 |
# #load model
|
48 |
+
|
49 |
#model = keras.models.load_model('model_final.h5', compile=False)
|
50 |
print('loaded model')
|
|
|
51 |
test_img = np.array(image)
|
52 |
test_img = cv2.resize(test_img, (640, 640))
|
53 |
test_img = cv2.cvtColor(test_img, cv2.COLOR_RGB2BGR)
|
StyTR.py β StyleTransfer/StyTR.py
RENAMED
@@ -1,15 +1,12 @@
|
|
1 |
import torch
|
2 |
import torch.nn.functional as F
|
3 |
from torch import nn
|
4 |
-
import
|
5 |
-
import box_ops
|
6 |
-
from misc import (NestedTensor, nested_tensor_from_tensor_list,
|
7 |
accuracy, get_world_size, interpolate,
|
8 |
is_dist_avail_and_initialized)
|
9 |
-
from function import normal,normal_style
|
10 |
-
from function import calc_mean_std
|
11 |
-
|
12 |
-
from ViT_helper import DropPath, to_2tuple, trunc_normal_
|
13 |
|
14 |
class PatchEmbed(nn.Module):
|
15 |
""" Image to Patch Embedding
|
|
|
1 |
import torch
|
2 |
import torch.nn.functional as F
|
3 |
from torch import nn
|
4 |
+
from StyleTransfer.misc import (NestedTensor, nested_tensor_from_tensor_list,
|
|
|
|
|
5 |
accuracy, get_world_size, interpolate,
|
6 |
is_dist_avail_and_initialized)
|
7 |
+
from StyleTransfer.function import normal,normal_style
|
8 |
+
from StyleTransfer.function import calc_mean_std
|
9 |
+
from StyleTransfer.ViT_helper import DropPath, to_2tuple, trunc_normal_
|
|
|
10 |
|
11 |
class PatchEmbed(nn.Module):
|
12 |
""" Image to Patch Embedding
|
ViT_helper.py β StyleTransfer/ViT_helper.py
RENAMED
File without changes
|
function.py β StyleTransfer/function.py
RENAMED
File without changes
|
misc.py β StyleTransfer/misc.py
RENAMED
File without changes
|
decoder_iter_160000.pth β StyleTransfer/models/decoder_iter_160000.pth
RENAMED
File without changes
|
embedding_iter_160000.pth β StyleTransfer/models/embedding_iter_160000.pth
RENAMED
File without changes
|
transformer_iter_160000.pth β StyleTransfer/models/transformer_iter_160000.pth
RENAMED
File without changes
|
vgg_normalised.pth β StyleTransfer/models/vgg_normalised.pth
RENAMED
File without changes
|
styleTransfer.py β StyleTransfer/styleTransfer.py
RENAMED
@@ -3,13 +3,11 @@ import numpy as np
|
|
3 |
import torch
|
4 |
import torch.nn as nn
|
5 |
from torchvision import transforms
|
6 |
-
import transformer as transformer
|
7 |
-
import StyTR as StyTR
|
8 |
from collections import OrderedDict
|
9 |
import tensorflow_hub as tfhub
|
10 |
import tensorflow as tf
|
11 |
-
import os
|
12 |
-
import cv2
|
13 |
import paddlehub as phub
|
14 |
|
15 |
|
@@ -30,10 +28,10 @@ def content_transform():
|
|
30 |
return transform
|
31 |
|
32 |
def StyleTransformer(content_img: Image, style_img: Image):
|
33 |
-
vgg_path = 'vgg_normalised.pth'
|
34 |
-
decoder_path = 'decoder_iter_160000.pth'
|
35 |
-
Trans_path = 'transformer_iter_160000.pth'
|
36 |
-
embedding_path = 'embedding_iter_160000.pth'
|
37 |
# Advanced options
|
38 |
content_size=640
|
39 |
style_size=640
|
|
|
3 |
import torch
|
4 |
import torch.nn as nn
|
5 |
from torchvision import transforms
|
6 |
+
import StyleTransfer.transformer as transformer
|
7 |
+
import StyleTransfer.StyTR as StyTR
|
8 |
from collections import OrderedDict
|
9 |
import tensorflow_hub as tfhub
|
10 |
import tensorflow as tf
|
|
|
|
|
11 |
import paddlehub as phub
|
12 |
|
13 |
|
|
|
28 |
return transform
|
29 |
|
30 |
def StyleTransformer(content_img: Image, style_img: Image):
|
31 |
+
vgg_path = 'StyleTransfer/models/vgg_normalised.pth'
|
32 |
+
decoder_path = 'StyleTransfer/models/decoder_iter_160000.pth'
|
33 |
+
Trans_path = 'StyleTransfer/models/transformer_iter_160000.pth'
|
34 |
+
embedding_path = 'StyleTransfer/models/embedding_iter_160000.pth'
|
35 |
# Advanced options
|
36 |
content_size=640
|
37 |
style_size=640
|
transformer.py β StyleTransfer/transformer.py
RENAMED
@@ -4,7 +4,7 @@ from typing import Optional, List
|
|
4 |
import torch
|
5 |
import torch.nn.functional as F
|
6 |
from torch import nn, Tensor
|
7 |
-
from function import normal,normal_style
|
8 |
import numpy as np
|
9 |
import os
|
10 |
device = torch.device("cuda:2" if torch.cuda.is_available() else "cpu")
|
|
|
4 |
import torch
|
5 |
import torch.nn.functional as F
|
6 |
from torch import nn, Tensor
|
7 |
+
from StyleTransfer.function import normal,normal_style
|
8 |
import numpy as np
|
9 |
import os
|
10 |
device = torch.device("cuda:2" if torch.cuda.is_available() else "cpu")
|
app.py
CHANGED
@@ -1,11 +1,12 @@
|
|
1 |
from cv2 import transpose
|
2 |
import numpy as np
|
3 |
import gradio as gr
|
4 |
-
from segmentation import get_mask,replace_sofa
|
5 |
-
|
6 |
from PIL import Image
|
7 |
from random import randint
|
8 |
|
|
|
9 |
#https://colab.research.google.com/drive/11CtQpSeRBGAuw4TtE_rL470tRo-1X-p2#scrollTo=edGukUHXyymr
|
10 |
#https://colab.research.google.com/drive/1xq33YKf0LVKCkbbUZIoNPzgpR_4Kd0qL#scrollTo=sPuM8Xypjs-c
|
11 |
#https://github.com/dhawan98/Post-Processing-of-Image-Segmentation-using-CRF
|
@@ -102,25 +103,25 @@ def style_sofa(input_img: np.ndarray, style_img: np.ndarray):
|
|
102 |
mask = get_mask(resized_img)
|
103 |
#mask.save('mask.jpg')
|
104 |
# Created a styled sofa
|
105 |
-
|
106 |
-
|
107 |
-
#
|
108 |
-
#
|
109 |
-
|
110 |
-
|
111 |
-
|
112 |
print('Finishing job', id)
|
113 |
-
return
|
114 |
|
115 |
demo = gr.Interface(
|
116 |
style_sofa,
|
117 |
inputs = [gr.inputs.Image(),gr.inputs.Image()],
|
118 |
outputs = 'image',
|
119 |
-
examples= [['sofa_example1.jpg','style_example1.jpg'],
|
120 |
-
['sofa_example1.jpg','style_example2.jpg'],
|
121 |
-
['sofa_example1.jpg','style_example3.jpg'],
|
122 |
-
['sofa_example1.jpg','style_example4.jpg'],
|
123 |
-
['sofa_example1.jpg','style_example5.jpg']],
|
124 |
title="π Style your sofa π ",
|
125 |
description="Customize your sofa to your wildest dreams π!\
|
126 |
\nProvide a picture of your sofa and a desired pattern\
|
|
|
1 |
from cv2 import transpose
|
2 |
import numpy as np
|
3 |
import gradio as gr
|
4 |
+
from Segmentation.segmentation import get_mask,replace_sofa
|
5 |
+
from StyleTransfer.styleTransfer import create_styledSofa
|
6 |
from PIL import Image
|
7 |
from random import randint
|
8 |
|
9 |
+
|
10 |
#https://colab.research.google.com/drive/11CtQpSeRBGAuw4TtE_rL470tRo-1X-p2#scrollTo=edGukUHXyymr
|
11 |
#https://colab.research.google.com/drive/1xq33YKf0LVKCkbbUZIoNPzgpR_4Kd0qL#scrollTo=sPuM8Xypjs-c
|
12 |
#https://github.com/dhawan98/Post-Processing-of-Image-Segmentation-using-CRF
|
|
|
103 |
mask = get_mask(resized_img)
|
104 |
#mask.save('mask.jpg')
|
105 |
# Created a styled sofa
|
106 |
+
print('Styling sofa...')
|
107 |
+
styled_sofa = create_styledSofa(resized_img,resized_style)
|
108 |
+
#styled_sofa.save('styled_sofa.jpg')
|
109 |
+
# postprocess the final image
|
110 |
+
print('Replacing sofa...')
|
111 |
+
new_sofa = replace_sofa(resized_img,mask,styled_sofa)
|
112 |
+
new_sofa = new_sofa.crop(box)
|
113 |
print('Finishing job', id)
|
114 |
+
return new_sofa
|
115 |
|
116 |
demo = gr.Interface(
|
117 |
style_sofa,
|
118 |
inputs = [gr.inputs.Image(),gr.inputs.Image()],
|
119 |
outputs = 'image',
|
120 |
+
examples= [['figures/sofa_example1.jpg','figures/style_example1.jpg'],
|
121 |
+
['figures/sofa_example1.jpg','figures/style_example2.jpg'],
|
122 |
+
['figures/sofa_example1.jpg','figures/style_example3.jpg'],
|
123 |
+
['figures/sofa_example1.jpg','figures/style_example4.jpg'],
|
124 |
+
['figures/sofa_example1.jpg','figures/style_example5.jpg']],
|
125 |
title="π Style your sofa π ",
|
126 |
description="Customize your sofa to your wildest dreams π!\
|
127 |
\nProvide a picture of your sofa and a desired pattern\
|
box_ops.py
DELETED
@@ -1,88 +0,0 @@
|
|
1 |
-
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
|
2 |
-
"""
|
3 |
-
Utilities for bounding box manipulation and GIoU.
|
4 |
-
"""
|
5 |
-
import torch
|
6 |
-
from torchvision.ops.boxes import box_area
|
7 |
-
|
8 |
-
|
9 |
-
def box_cxcywh_to_xyxy(x):
|
10 |
-
x_c, y_c, w, h = x.unbind(-1)
|
11 |
-
b = [(x_c - 0.5 * w), (y_c - 0.5 * h),
|
12 |
-
(x_c + 0.5 * w), (y_c + 0.5 * h)]
|
13 |
-
return torch.stack(b, dim=-1)
|
14 |
-
|
15 |
-
|
16 |
-
def box_xyxy_to_cxcywh(x):
|
17 |
-
x0, y0, x1, y1 = x.unbind(-1)
|
18 |
-
b = [(x0 + x1) / 2, (y0 + y1) / 2,
|
19 |
-
(x1 - x0), (y1 - y0)]
|
20 |
-
return torch.stack(b, dim=-1)
|
21 |
-
|
22 |
-
|
23 |
-
# modified from torchvision to also return the union
|
24 |
-
def box_iou(boxes1, boxes2):
|
25 |
-
area1 = box_area(boxes1)
|
26 |
-
area2 = box_area(boxes2)
|
27 |
-
|
28 |
-
lt = torch.max(boxes1[:, None, :2], boxes2[:, :2]) # [N,M,2]
|
29 |
-
rb = torch.min(boxes1[:, None, 2:], boxes2[:, 2:]) # [N,M,2]
|
30 |
-
|
31 |
-
wh = (rb - lt).clamp(min=0) # [N,M,2]
|
32 |
-
inter = wh[:, :, 0] * wh[:, :, 1] # [N,M]
|
33 |
-
|
34 |
-
union = area1[:, None] + area2 - inter
|
35 |
-
|
36 |
-
iou = inter / union
|
37 |
-
return iou, union
|
38 |
-
|
39 |
-
|
40 |
-
def generalized_box_iou(boxes1, boxes2):
|
41 |
-
"""
|
42 |
-
Generalized IoU from https://giou.stanford.edu/
|
43 |
-
|
44 |
-
The boxes should be in [x0, y0, x1, y1] format
|
45 |
-
|
46 |
-
Returns a [N, M] pairwise matrix, where N = len(boxes1)
|
47 |
-
and M = len(boxes2)
|
48 |
-
"""
|
49 |
-
# degenerate boxes gives inf / nan results
|
50 |
-
# so do an early check
|
51 |
-
assert (boxes1[:, 2:] >= boxes1[:, :2]).all()
|
52 |
-
assert (boxes2[:, 2:] >= boxes2[:, :2]).all()
|
53 |
-
iou, union = box_iou(boxes1, boxes2)
|
54 |
-
|
55 |
-
lt = torch.min(boxes1[:, None, :2], boxes2[:, :2])
|
56 |
-
rb = torch.max(boxes1[:, None, 2:], boxes2[:, 2:])
|
57 |
-
|
58 |
-
wh = (rb - lt).clamp(min=0) # [N,M,2]
|
59 |
-
area = wh[:, :, 0] * wh[:, :, 1]
|
60 |
-
|
61 |
-
return iou - (area - union) / area
|
62 |
-
|
63 |
-
|
64 |
-
def masks_to_boxes(masks):
|
65 |
-
"""Compute the bounding boxes around the provided masks
|
66 |
-
|
67 |
-
The masks should be in format [N, H, W] where N is the number of masks, (H, W) are the spatial dimensions.
|
68 |
-
|
69 |
-
Returns a [N, 4] tensors, with the boxes in xyxy format
|
70 |
-
"""
|
71 |
-
if masks.numel() == 0:
|
72 |
-
return torch.zeros((0, 4), device=masks.device)
|
73 |
-
|
74 |
-
h, w = masks.shape[-2:]
|
75 |
-
|
76 |
-
y = torch.arange(0, h, dtype=torch.float)
|
77 |
-
x = torch.arange(0, w, dtype=torch.float)
|
78 |
-
y, x = torch.meshgrid(y, x)
|
79 |
-
|
80 |
-
x_mask = (masks * x.unsqueeze(0))
|
81 |
-
x_max = x_mask.flatten(1).max(-1)[0]
|
82 |
-
x_min = x_mask.masked_fill(~(masks.bool()), 1e8).flatten(1).min(-1)[0]
|
83 |
-
|
84 |
-
y_mask = (masks * y.unsqueeze(0))
|
85 |
-
y_max = y_mask.flatten(1).max(-1)[0]
|
86 |
-
y_min = y_mask.masked_fill(~(masks.bool()), 1e8).flatten(1).min(-1)[0]
|
87 |
-
|
88 |
-
return torch.stack([x_min, y_min, x_max, y_max], 1)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
sofa_example1.jpg β figures/sofa_example1.jpg
RENAMED
File without changes
|
style_example1.jpg β figures/style_example1.jpg
RENAMED
File without changes
|
style_example2.jpg β figures/style_example2.jpg
RENAMED
File without changes
|
style_example3.jpg β figures/style_example3.jpg
RENAMED
File without changes
|
style_example4.jpg β figures/style_example4.jpg
RENAMED
File without changes
|
style_example5.jpg β figures/style_example5.jpg
RENAMED
File without changes
|
model_final.h5
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:9a456f38c83897d9d8b5c8dd989ff7ee2fe13bb123a70a00b6e987d4efac1c6e
|
3 |
-
size 130858696
|
|
|
|
|
|
|
|
sofa.jpg
DELETED
Binary file (58.6 kB)
|
|
sofa_stylized_style.jpg
DELETED
Binary file (145 kB)
|
|
style.jpg
DELETED
Binary file (177 kB)
|
|