Abubakar740
commited on
Commit
·
2232b2c
1
Parent(s):
2438336
Upload LDR to HDR application files
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- app.py +339 -0
- best_ldr.py +327 -0
- checkpoints/ldr2hdr_cyclegan_728/latest_net_G.pth +3 -0
- data/__init__.py +107 -0
- data/__pycache__/__init__.cpython-312.pyc +0 -0
- data/__pycache__/aligned_dataset.cpython-312.pyc +0 -0
- data/__pycache__/base_dataset.cpython-312.pyc +0 -0
- data/__pycache__/image_folder.cpython-312.pyc +0 -0
- data/__pycache__/single_dataset.cpython-312.pyc +0 -0
- data/__pycache__/unaligned_dataset.cpython-312.pyc +0 -0
- data/aligned_dataset.py +60 -0
- data/base_dataset.py +167 -0
- data/colorization_dataset.py +69 -0
- data/image_folder.py +72 -0
- data/single_dataset.py +40 -0
- data/template_dataset.py +78 -0
- data/unaligned_dataset.py +71 -0
- models/__init__.py +58 -0
- models/__pycache__/__init__.cpython-312.pyc +0 -0
- models/__pycache__/base_model.cpython-312.pyc +0 -0
- models/__pycache__/cycle_gan_model.cpython-312.pyc +0 -0
- models/__pycache__/networks.cpython-312.pyc +0 -0
- models/__pycache__/pix2pix_model.cpython-312.pyc +0 -0
- models/__pycache__/test_model.cpython-312.pyc +0 -0
- models/base_model.py +306 -0
- models/colorization_model.py +69 -0
- models/cycle_gan_model.py +196 -0
- models/networks.py +588 -0
- models/pix2pix_model.py +127 -0
- models/template_model.py +100 -0
- models/test_model.py +69 -0
- options/__init__.py +1 -0
- options/__pycache__/__init__.cpython-312.pyc +0 -0
- options/__pycache__/base_options.cpython-312.pyc +0 -0
- options/__pycache__/test_options.cpython-312.pyc +0 -0
- options/__pycache__/train_options.cpython-312.pyc +0 -0
- options/base_options.py +127 -0
- options/test_options.py +23 -0
- options/train_options.py +35 -0
- requirements.txt +8 -0
- util/__init__.py +1 -0
- util/__pycache__/__init__.cpython-312.pyc +0 -0
- util/__pycache__/html.cpython-312.pyc +0 -0
- util/__pycache__/image_pool.cpython-312.pyc +0 -0
- util/__pycache__/util.cpython-312.pyc +0 -0
- util/__pycache__/visualizer.cpython-312.pyc +0 -0
- util/get_data.py +107 -0
- util/html.py +84 -0
- util/image_pool.py +54 -0
- util/util.py +130 -0
app.py
ADDED
|
@@ -0,0 +1,339 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
# import gradio as gr
|
| 5 |
+
# import torch
|
| 6 |
+
# import os
|
| 7 |
+
# import tempfile
|
| 8 |
+
# import shutil
|
| 9 |
+
# from PIL import Image
|
| 10 |
+
# import numpy as np
|
| 11 |
+
# from pathlib import Path
|
| 12 |
+
# import sys
|
| 13 |
+
# import copy
|
| 14 |
+
|
| 15 |
+
# # --- Import logic from your project ---
|
| 16 |
+
# from options.test_options import TestOptions
|
| 17 |
+
# from data import create_dataset
|
| 18 |
+
# from models import create_model
|
| 19 |
+
# try:
|
| 20 |
+
# from best_ldr import compute_metrics_for_images, score_records
|
| 21 |
+
# except ImportError:
|
| 22 |
+
# raise ImportError("Could not import from best_ldr.py. Make sure the file is in the same directory as app.py.")
|
| 23 |
+
|
| 24 |
+
# print("--- Initializing LDR-to-HDR Model (this may take a moment) ---")
|
| 25 |
+
|
| 26 |
+
# # --- Global Setup: Load the CycleGAN model once when the app starts ---
|
| 27 |
+
|
| 28 |
+
# # We need to satisfy the parser's requirement for a dataroot at startup
|
| 29 |
+
# if '--dataroot' not in sys.argv:
|
| 30 |
+
# sys.argv.extend(['--dataroot', './dummy_dataroot_for_init'])
|
| 31 |
+
|
| 32 |
+
# # Load the base options
|
| 33 |
+
# opt = TestOptions().parse()
|
| 34 |
+
|
| 35 |
+
# # Manually override settings for our model
|
| 36 |
+
# opt.name = 'ldr2hdr_cyclegan_728'
|
| 37 |
+
# opt.model = 'test'
|
| 38 |
+
# opt.netG = 'resnet_9blocks'
|
| 39 |
+
# opt.norm = 'instance'
|
| 40 |
+
# opt.no_dropout = True
|
| 41 |
+
# opt.checkpoints_dir = './checkpoints'
|
| 42 |
+
# opt.gpu_ids = [0] if torch.cuda.is_available() else []
|
| 43 |
+
# opt.device = torch.device('cuda:{}'.format(opt.gpu_ids[0])) if opt.gpu_ids else torch.device('cpu')
|
| 44 |
+
|
| 45 |
+
# # Create the model using these options
|
| 46 |
+
# model = create_model(opt)
|
| 47 |
+
# model.setup(opt)
|
| 48 |
+
# model.eval()
|
| 49 |
+
|
| 50 |
+
# print("--- Model Loaded Successfully ---")
|
| 51 |
+
|
| 52 |
+
|
| 53 |
+
# # --- Helper Function for Inference ---
|
| 54 |
+
|
| 55 |
+
# def run_inference(model, image_path, process_options):
|
| 56 |
+
# """
|
| 57 |
+
# A reusable function to run the model with specific preprocessing options.
|
| 58 |
+
# """
|
| 59 |
+
# # Deep copy the base options to avoid modifying the global state
|
| 60 |
+
# local_opt = copy.deepcopy(opt)
|
| 61 |
+
|
| 62 |
+
# # Apply the specific settings for this run
|
| 63 |
+
# for key, value in process_options.items():
|
| 64 |
+
# setattr(local_opt, key, value)
|
| 65 |
+
|
| 66 |
+
# with tempfile.TemporaryDirectory() as temp_dir:
|
| 67 |
+
# shutil.copy(image_path, temp_dir)
|
| 68 |
+
# local_opt.dataroot = temp_dir
|
| 69 |
+
# local_opt.num_test = 1
|
| 70 |
+
# dataset = create_dataset(local_opt)
|
| 71 |
+
|
| 72 |
+
# for i, data in enumerate(dataset):
|
| 73 |
+
# model.set_input(data)
|
| 74 |
+
# model.test()
|
| 75 |
+
# visuals = model.get_current_visuals()
|
| 76 |
+
|
| 77 |
+
# for label, image_tensor in visuals.items():
|
| 78 |
+
# if label == 'fake':
|
| 79 |
+
# image_numpy = (np.transpose(image_tensor.cpu().float().numpy()[0], (1, 2, 0)) + 1) / 2.0 * 255.0
|
| 80 |
+
# return Image.fromarray(image_numpy.astype(np.uint8))
|
| 81 |
+
|
| 82 |
+
# # --- The Main Gradio Processing Function ---
|
| 83 |
+
|
| 84 |
+
# def process_images_and_display(list_of_temp_files):
|
| 85 |
+
# """
|
| 86 |
+
# The main workflow: select best LDR, then run two inference modes.
|
| 87 |
+
# """
|
| 88 |
+
# if not list_of_temp_files:
|
| 89 |
+
# raise gr.Error("Please upload your bracketed LDR images.")
|
| 90 |
+
# if len(list_of_temp_files) < 2:
|
| 91 |
+
# gr.Warning("For best results, upload at least 2 bracketed LDR images.")
|
| 92 |
+
|
| 93 |
+
# uploaded_filepaths = [Path(f.name) for f in list_of_temp_files]
|
| 94 |
+
|
| 95 |
+
# try:
|
| 96 |
+
# # --- Step 1: Select the Best LDR ---
|
| 97 |
+
# print(f"Analyzing {len(uploaded_filepaths)} uploaded images...")
|
| 98 |
+
# weights = {"clipped": 0.35, "coverage": 0.25, "exposure": 0.15, "sharpness": 0.15, "noise": 0.10}
|
| 99 |
+
# records = compute_metrics_for_images(uploaded_filepaths, resize_max=1024)
|
| 100 |
+
# scored_records = score_records(records, weights)
|
| 101 |
+
# if not scored_records:
|
| 102 |
+
# raise gr.Error("Could not read or score any of the uploaded images.")
|
| 103 |
+
|
| 104 |
+
# best_ldr_record = scored_records[0]
|
| 105 |
+
# best_ldr_path = best_ldr_record['path']
|
| 106 |
+
# print(f"Best LDR selected: {os.path.basename(best_ldr_path)} (Score: {best_ldr_record['score']:.4f})")
|
| 107 |
+
# chosen_ldr_image = Image.open(best_ldr_path).convert("RGB")
|
| 108 |
+
|
| 109 |
+
# # --- Step 2: Run Inference in Both Modes ---
|
| 110 |
+
|
| 111 |
+
# # Mode A: High-Quality Crop (at model's native resolution)
|
| 112 |
+
# print("Running Mode A: High-Quality Crop...")
|
| 113 |
+
# crop_options = {
|
| 114 |
+
# 'preprocess': 'resize_and_crop',
|
| 115 |
+
# 'load_size': 728,
|
| 116 |
+
# 'crop_size': 728
|
| 117 |
+
# }
|
| 118 |
+
# hdr_cropped = run_inference(model, best_ldr_path, crop_options)
|
| 119 |
+
# print("Mode A successful.")
|
| 120 |
+
|
| 121 |
+
# # Mode B: Full Image (at a higher resolution)
|
| 122 |
+
# print("Running Mode B: Full Image (High-Res Scaled)...")
|
| 123 |
+
# scale_options = {
|
| 124 |
+
# 'preprocess': 'scale_width',
|
| 125 |
+
# 'load_size': 1024, # <-- THIS IS THE CHANGE FOR HIGHER RESOLUTION
|
| 126 |
+
# 'crop_size': 728 # This value is ignored by scale_width but needs to be present
|
| 127 |
+
# }
|
| 128 |
+
# hdr_scaled = run_inference(model, best_ldr_path, scale_options)
|
| 129 |
+
# print("Mode B successful.")
|
| 130 |
+
|
| 131 |
+
# # Return all the images to update the UI
|
| 132 |
+
# return uploaded_filepaths, chosen_ldr_image, hdr_cropped, hdr_scaled
|
| 133 |
+
|
| 134 |
+
# except Exception as e:
|
| 135 |
+
# print(f"An error occurred: {e}")
|
| 136 |
+
# raise gr.Error(f"An error occurred during processing: {e}")
|
| 137 |
+
|
| 138 |
+
# # --- Create and Launch the Gradio Interface ---
|
| 139 |
+
|
| 140 |
+
# with gr.Blocks(theme=gr.themes.Monochrome(), css="footer {display: none !important}") as demo:
|
| 141 |
+
# gr.Markdown("# LDR Bracketing to HDR Converter")
|
| 142 |
+
# gr.Markdown("Upload a set of bracketed LDR images. The app will automatically select the best one and convert it to HDR using two different methods for comparison.")
|
| 143 |
+
|
| 144 |
+
# with gr.Row():
|
| 145 |
+
# with gr.Column(scale=1, min_width=300):
|
| 146 |
+
# input_files = gr.Files(
|
| 147 |
+
# label="Upload Bracketed LDR Images",
|
| 148 |
+
# file_types=["image"]
|
| 149 |
+
# )
|
| 150 |
+
# process_button = gr.Button("Process Images", variant="primary")
|
| 151 |
+
|
| 152 |
+
# with gr.Accordion("See Your Uploads", open=False):
|
| 153 |
+
# input_gallery = gr.Gallery(label="Uploaded LDR Bracket", show_label=False, columns=3, height="auto")
|
| 154 |
+
|
| 155 |
+
# with gr.Column(scale=2):
|
| 156 |
+
# gr.Markdown("## Results")
|
| 157 |
+
# with gr.Row():
|
| 158 |
+
# chosen_ldr_display = gr.Image(label="Best LDR Chosen by Algorithm", type="pil", interactive=False)
|
| 159 |
+
# with gr.Row():
|
| 160 |
+
# output_cropped = gr.Image(label="Result 1: High-Quality Crop (728x728)", type="pil", interactive=False)
|
| 161 |
+
# output_scaled = gr.Image(label="Result 2: Full Image (Scaled to 1024px Width)", type="pil", interactive=False)
|
| 162 |
+
|
| 163 |
+
# process_button.click(
|
| 164 |
+
# fn=process_images_and_display,
|
| 165 |
+
# inputs=input_files,
|
| 166 |
+
# outputs=[input_gallery, chosen_ldr_display, output_cropped, output_scaled]
|
| 167 |
+
# )
|
| 168 |
+
|
| 169 |
+
# print("--- Launching Gradio App ---")
|
| 170 |
+
# demo.launch(share=True)
|
| 171 |
+
|
| 172 |
+
|
| 173 |
+
|
| 174 |
+
|
| 175 |
+
|
| 176 |
+
|
| 177 |
+
|
| 178 |
+
|
| 179 |
+
|
| 180 |
+
import gradio as gr
|
| 181 |
+
import torch
|
| 182 |
+
import os
|
| 183 |
+
import tempfile
|
| 184 |
+
import shutil
|
| 185 |
+
from PIL import Image
|
| 186 |
+
import numpy as np
|
| 187 |
+
from pathlib import Path
|
| 188 |
+
import sys
|
| 189 |
+
import copy
|
| 190 |
+
|
| 191 |
+
# --- Import logic from your project ---
|
| 192 |
+
from options.test_options import TestOptions
|
| 193 |
+
from data import create_dataset
|
| 194 |
+
from models import create_model
|
| 195 |
+
try:
|
| 196 |
+
from best_ldr import compute_metrics_for_images, score_records
|
| 197 |
+
except ImportError:
|
| 198 |
+
raise ImportError("Could not import from best_ldr.py. Make sure the file is in the same directory as app.py.")
|
| 199 |
+
|
| 200 |
+
print("--- Initializing LDR-to-HDR Model (this may take a moment) ---")
|
| 201 |
+
|
| 202 |
+
# --- Global Setup: Load the CycleGAN model once when the app starts ---
|
| 203 |
+
|
| 204 |
+
# We need to satisfy the parser's requirement for a dataroot at startup
|
| 205 |
+
if '--dataroot' not in sys.argv:
|
| 206 |
+
sys.argv.extend(['--dataroot', './dummy_dataroot_for_init'])
|
| 207 |
+
|
| 208 |
+
# Load the base options
|
| 209 |
+
opt = TestOptions().parse()
|
| 210 |
+
|
| 211 |
+
# Manually override settings for our model
|
| 212 |
+
opt.name = 'ldr2hdr_cyclegan_728'
|
| 213 |
+
opt.model = 'test'
|
| 214 |
+
opt.netG = 'resnet_9blocks'
|
| 215 |
+
opt.norm = 'instance'
|
| 216 |
+
opt.no_dropout = True
|
| 217 |
+
opt.checkpoints_dir = './checkpoints'
|
| 218 |
+
opt.gpu_ids = [0] if torch.cuda.is_available() else []
|
| 219 |
+
opt.device = torch.device('cuda:{}'.format(opt.gpu_ids[0])) if opt.gpu_ids else torch.device('cpu')
|
| 220 |
+
|
| 221 |
+
# Create the model using these options
|
| 222 |
+
model = create_model(opt)
|
| 223 |
+
model.setup(opt)
|
| 224 |
+
model.eval()
|
| 225 |
+
|
| 226 |
+
print("--- Model Loaded Successfully ---")
|
| 227 |
+
|
| 228 |
+
|
| 229 |
+
# --- The Main Gradio Processing Function ---
|
| 230 |
+
|
| 231 |
+
def process_images_to_hdr(list_of_temp_files):
|
| 232 |
+
"""
|
| 233 |
+
The main workflow: select best LDR, run inference, and return results for the UI.
|
| 234 |
+
"""
|
| 235 |
+
if not list_of_temp_files:
|
| 236 |
+
raise gr.Error("Please upload your bracketed LDR images.")
|
| 237 |
+
if len(list_of_temp_files) < 2:
|
| 238 |
+
gr.Warning("For best results, upload at least 2 bracketed LDR images.")
|
| 239 |
+
|
| 240 |
+
uploaded_filepaths = [Path(f.name) for f in list_of_temp_files]
|
| 241 |
+
|
| 242 |
+
try:
|
| 243 |
+
# --- Step 1: Select the Best LDR ---
|
| 244 |
+
print(f"Analyzing {len(uploaded_filepaths)} uploaded images...")
|
| 245 |
+
weights = {"clipped": 0.35, "coverage": 0.25, "exposure": 0.15, "sharpness": 0.15, "noise": 0.10}
|
| 246 |
+
records = compute_metrics_for_images(uploaded_filepaths, resize_max=1024)
|
| 247 |
+
scored_records = score_records(records, weights)
|
| 248 |
+
if not scored_records:
|
| 249 |
+
raise gr.Error("Could not read or score any of the uploaded images.")
|
| 250 |
+
|
| 251 |
+
best_ldr_record = scored_records[0]
|
| 252 |
+
best_ldr_path = best_ldr_record['path']
|
| 253 |
+
print(f"Best LDR selected: {os.path.basename(best_ldr_path)} (Score: {best_ldr_record['score']:.4f})")
|
| 254 |
+
|
| 255 |
+
# --- Step 2: Run Inference ---
|
| 256 |
+
print("Running Full Image (High-Res Scaled) Inference...")
|
| 257 |
+
|
| 258 |
+
# We only need the one set of options now
|
| 259 |
+
inference_options = {
|
| 260 |
+
'preprocess': 'scale_width',
|
| 261 |
+
'load_size': 1024, # Generate the high-resolution, full image
|
| 262 |
+
'crop_size': 728 # This value is ignored but required by the parser
|
| 263 |
+
}
|
| 264 |
+
|
| 265 |
+
# Deep copy the base options to avoid modifying the global state
|
| 266 |
+
local_opt = copy.deepcopy(opt)
|
| 267 |
+
for key, value in inference_options.items():
|
| 268 |
+
setattr(local_opt, key, value)
|
| 269 |
+
|
| 270 |
+
# Run the model
|
| 271 |
+
with tempfile.TemporaryDirectory() as temp_dir:
|
| 272 |
+
shutil.copy(best_ldr_path, temp_dir)
|
| 273 |
+
local_opt.dataroot = temp_dir
|
| 274 |
+
local_opt.num_test = 1
|
| 275 |
+
dataset = create_dataset(local_opt)
|
| 276 |
+
|
| 277 |
+
for i, data in enumerate(dataset):
|
| 278 |
+
model.set_input(data)
|
| 279 |
+
model.test()
|
| 280 |
+
visuals = model.get_current_visuals()
|
| 281 |
+
|
| 282 |
+
for label, image_tensor in visuals.items():
|
| 283 |
+
if label == 'fake':
|
| 284 |
+
image_numpy = (np.transpose(image_tensor.cpu().float().numpy()[0], (1, 2, 0)) + 1) / 2.0 * 255.0
|
| 285 |
+
final_hdr_image = Image.fromarray(image_numpy.astype(np.uint8))
|
| 286 |
+
print("Conversion to HDR successful.")
|
| 287 |
+
# Return the gallery of inputs and the single final HDR image
|
| 288 |
+
return uploaded_filepaths, final_hdr_image
|
| 289 |
+
|
| 290 |
+
except Exception as e:
|
| 291 |
+
print(f"An error occurred: {e}")
|
| 292 |
+
raise gr.Error(f"An error occurred during processing: {e}")
|
| 293 |
+
|
| 294 |
+
# --- Create and Launch the Gradio Interface ---
|
| 295 |
+
|
| 296 |
+
with gr.Blocks(theme=gr.themes.Soft(), css="footer {display: none !important}") as demo:
|
| 297 |
+
gr.Markdown(
|
| 298 |
+
"""
|
| 299 |
+
# LDR Bracketing to HDR Converter
|
| 300 |
+
Upload a set of bracketed LDR images. The app will automatically select the best one and convert it to a vibrant, full-resolution HDR image.
|
| 301 |
+
"""
|
| 302 |
+
)
|
| 303 |
+
|
| 304 |
+
with gr.Row():
|
| 305 |
+
with gr.Column(scale=1, min_width=350):
|
| 306 |
+
# --- INPUT ---
|
| 307 |
+
input_files = gr.Files(
|
| 308 |
+
label="Upload Bracketed LDR Images",
|
| 309 |
+
file_types=["image"]
|
| 310 |
+
)
|
| 311 |
+
process_button = gr.Button("Process Images", variant="primary")
|
| 312 |
+
with gr.Accordion("See Your Uploaded Images", open=True):
|
| 313 |
+
input_gallery = gr.Gallery(label="Uploaded Images", show_label=False, columns=[2, 3], height="auto")
|
| 314 |
+
|
| 315 |
+
with gr.Column(scale=2):
|
| 316 |
+
# --- OUTPUT ---
|
| 317 |
+
gr.Markdown("## Generated HDR Result")
|
| 318 |
+
output_image = gr.Image(label="Final HDR Image", type="pil", interactive=False, show_download_button=True)
|
| 319 |
+
|
| 320 |
+
process_button.click(
|
| 321 |
+
fn=process_images_to_hdr,
|
| 322 |
+
inputs=input_files,
|
| 323 |
+
outputs=[input_gallery, output_image]
|
| 324 |
+
)
|
| 325 |
+
|
| 326 |
+
# gr.Markdown("### Examples")
|
| 327 |
+
# gr.Examples(
|
| 328 |
+
# examples=[
|
| 329 |
+
# [
|
| 330 |
+
# "../pix2pix_dataset/testA/077A2406.jpg",
|
| 331 |
+
# "../pix2pix_dataset/testA/077A4049.jpg",
|
| 332 |
+
# "../pix2pix_dataset/testA/077A4073.jpg"
|
| 333 |
+
# ]
|
| 334 |
+
# ],
|
| 335 |
+
# inputs=input_files
|
| 336 |
+
# )
|
| 337 |
+
|
| 338 |
+
print("--- Launching Gradio App ---")
|
| 339 |
+
demo.launch(share=True)
|
best_ldr.py
ADDED
|
@@ -0,0 +1,327 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
"""
|
| 3 |
+
make_cyclegan_dataset.py
|
| 4 |
+
|
| 5 |
+
Create paired datasets (setA, setB) for CycleGAN training from your dataset.
|
| 6 |
+
|
| 7 |
+
What it does:
|
| 8 |
+
- Walks the dataset root (e.g. ../jpeg_stage1Just0)
|
| 9 |
+
- Finds scene directories that contain both a `source/` subfolder with >= min_images
|
| 10 |
+
and an `output/` subfolder with at least one image.
|
| 11 |
+
- For each scene: selects the best LDR from `source/` (using metrics: clipped, coverage,
|
| 12 |
+
exposure centering, sharpness, noise), copies that chosen source image into outdir/setA/,
|
| 13 |
+
copies the scene's output image into outdir/setB/ but renames it to the chosen source filename.
|
| 14 |
+
- Writes CSV and JSON reports with metric breakdowns.
|
| 15 |
+
|
| 16 |
+
Usage:
|
| 17 |
+
python make_cyclegan_dataset.py --root ../jpeg_stage1Just0 --outdir ./cyclegan_data
|
| 18 |
+
|
| 19 |
+
Dependencies:
|
| 20 |
+
pip install opencv-python pillow numpy
|
| 21 |
+
|
| 22 |
+
Author: ChatGPT (opinionated: default weights favor low clipping and good coverage)
|
| 23 |
+
"""
|
| 24 |
+
|
| 25 |
+
import argparse
|
| 26 |
+
import os
|
| 27 |
+
from pathlib import Path
|
| 28 |
+
import json
|
| 29 |
+
import csv
|
| 30 |
+
import shutil
|
| 31 |
+
from math import fabs
|
| 32 |
+
|
| 33 |
+
import numpy as np
|
| 34 |
+
import cv2
|
| 35 |
+
from PIL import Image, ExifTags
|
| 36 |
+
|
| 37 |
+
IMG_EXTS = {".jpg", ".jpeg", ".png", ".tif", ".tiff", ".bmp", ".webp"}
|
| 38 |
+
|
| 39 |
+
# ------------------ Image / metric helpers ------------------
|
| 40 |
+
|
| 41 |
+
def is_image_file(p: Path):
|
| 42 |
+
return p.suffix.lower() in IMG_EXTS and p.is_file()
|
| 43 |
+
|
| 44 |
+
def list_images(folder: Path):
|
| 45 |
+
if not folder.exists():
|
| 46 |
+
return []
|
| 47 |
+
return sorted([p for p in folder.iterdir() if is_image_file(p)])
|
| 48 |
+
|
| 49 |
+
def read_image_gray(path: Path, resize_max=None):
|
| 50 |
+
"""Read color then convert to grayscale float32 [0,1]. Uses cv2.imdecode to handle weird filenames."""
|
| 51 |
+
arr = np.fromfile(str(path), dtype=np.uint8)
|
| 52 |
+
img = cv2.imdecode(arr, cv2.IMREAD_COLOR)
|
| 53 |
+
if img is None:
|
| 54 |
+
raise IOError(f"Failed to read image {path}")
|
| 55 |
+
if resize_max:
|
| 56 |
+
h, w = img.shape[:2]
|
| 57 |
+
scale = resize_max / max(h, w) if max(h, w) > resize_max else 1.0
|
| 58 |
+
if scale != 1.0:
|
| 59 |
+
img = cv2.resize(img, (int(w*scale), int(h*scale)), interpolation=cv2.INTER_AREA)
|
| 60 |
+
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY).astype(np.float32) / 255.0
|
| 61 |
+
return gray
|
| 62 |
+
|
| 63 |
+
def clipped_ratio(gray):
|
| 64 |
+
total = gray.size
|
| 65 |
+
high = np.count_nonzero(gray >= 0.992)
|
| 66 |
+
low = np.count_nonzero(gray <= 0.008)
|
| 67 |
+
return float(high + low) / float(total)
|
| 68 |
+
|
| 69 |
+
def histogram_coverage(gray, bins=256, min_frac=0.001):
|
| 70 |
+
hist, _ = np.histogram((gray * 255).astype(np.uint8), bins=bins, range=(0,255))
|
| 71 |
+
threshold = max(1, int(min_frac * gray.size))
|
| 72 |
+
covered = np.count_nonzero(hist >= threshold)
|
| 73 |
+
return float(covered) / float(bins)
|
| 74 |
+
|
| 75 |
+
def exposure_distance(gray):
|
| 76 |
+
return float(abs(float(np.mean(gray)) - 0.5))
|
| 77 |
+
|
| 78 |
+
def sharpness_metric(gray):
|
| 79 |
+
lap = cv2.Laplacian((gray * 255).astype(np.uint8), cv2.CV_64F)
|
| 80 |
+
return float(np.var(lap))
|
| 81 |
+
|
| 82 |
+
def noise_estimate(gray):
|
| 83 |
+
blur = cv2.GaussianBlur(gray, (3,3), 0)
|
| 84 |
+
hf = gray - blur
|
| 85 |
+
return float(np.std(hf))
|
| 86 |
+
|
| 87 |
+
def minmax_normalize(vals, eps=1e-8):
|
| 88 |
+
arr = np.array(vals, dtype=np.float64)
|
| 89 |
+
mn = float(arr.min())
|
| 90 |
+
mx = float(arr.max())
|
| 91 |
+
if mx - mn < eps:
|
| 92 |
+
# all equal -> zeros
|
| 93 |
+
return np.zeros_like(arr)
|
| 94 |
+
return (arr - mn) / (mx - mn)
|
| 95 |
+
|
| 96 |
+
# ------------------ Selection & scene processing ------------------
|
| 97 |
+
|
| 98 |
+
def compute_metrics_for_images(image_paths, resize_max):
|
| 99 |
+
records = []
|
| 100 |
+
for p in image_paths:
|
| 101 |
+
try:
|
| 102 |
+
g = read_image_gray(p, resize_max=resize_max)
|
| 103 |
+
except Exception as e:
|
| 104 |
+
print(f" WARNING: cannot read {p}: {e}")
|
| 105 |
+
continue
|
| 106 |
+
rec = {
|
| 107 |
+
"path": str(p),
|
| 108 |
+
"name": p.name,
|
| 109 |
+
"clipped": clipped_ratio(g),
|
| 110 |
+
"coverage": histogram_coverage(g),
|
| 111 |
+
"exposure_dist": exposure_distance(g),
|
| 112 |
+
"sharpness": sharpness_metric(g),
|
| 113 |
+
"noise": noise_estimate(g)
|
| 114 |
+
}
|
| 115 |
+
records.append(rec)
|
| 116 |
+
return records
|
| 117 |
+
|
| 118 |
+
def score_records(records, weights):
|
| 119 |
+
if not records:
|
| 120 |
+
return []
|
| 121 |
+
clipped_vals = [r["clipped"] for r in records]
|
| 122 |
+
cov_vals = [r["coverage"] for r in records]
|
| 123 |
+
exp_vals = [r["exposure_dist"] for r in records]
|
| 124 |
+
sharp_vals = [r["sharpness"] for r in records]
|
| 125 |
+
noise_vals = [r["noise"] for r in records]
|
| 126 |
+
|
| 127 |
+
clipped_n = minmax_normalize(clipped_vals)
|
| 128 |
+
cov_n = minmax_normalize(cov_vals)
|
| 129 |
+
exp_n = minmax_normalize(exp_vals)
|
| 130 |
+
sharp_n = minmax_normalize(sharp_vals)
|
| 131 |
+
noise_n = minmax_normalize(noise_vals)
|
| 132 |
+
|
| 133 |
+
scored = []
|
| 134 |
+
for i, r in enumerate(records):
|
| 135 |
+
score = 0.0
|
| 136 |
+
score += weights["clipped"] * (1.0 - float(clipped_n[i])) # less clipping -> better
|
| 137 |
+
score += weights["coverage"] * float(cov_n[i]) # more coverage -> better
|
| 138 |
+
score += weights["exposure"] * (1.0 - float(exp_n[i])) # closer to mid gray -> better
|
| 139 |
+
score += weights["sharpness"] * float(sharp_n[i]) # sharper -> better
|
| 140 |
+
score += weights["noise"] * (1.0 - float(noise_n[i])) # less noise -> better
|
| 141 |
+
|
| 142 |
+
rec = dict(r)
|
| 143 |
+
rec.update({
|
| 144 |
+
"clipped_n": float(clipped_n[i]),
|
| 145 |
+
"coverage_n": float(cov_n[i]),
|
| 146 |
+
"exposure_n": float(exp_n[i]),
|
| 147 |
+
"sharpness_n": float(sharp_n[i]),
|
| 148 |
+
"noise_n": float(noise_n[i]),
|
| 149 |
+
"score": float(score)
|
| 150 |
+
})
|
| 151 |
+
scored.append(rec)
|
| 152 |
+
scored_sorted = sorted(scored, key=lambda x: x["score"], reverse=True)
|
| 153 |
+
return scored_sorted
|
| 154 |
+
|
| 155 |
+
def find_output_image(output_folder: Path):
|
| 156 |
+
imgs = list_images(output_folder)
|
| 157 |
+
if not imgs:
|
| 158 |
+
return None
|
| 159 |
+
# Prefer file with same name as parent folder (if present), else pick largest file
|
| 160 |
+
parent_name = output_folder.parent.name
|
| 161 |
+
for p in imgs:
|
| 162 |
+
if p.stem == parent_name:
|
| 163 |
+
return p
|
| 164 |
+
# otherwise pick largest by file size (likely the main image)
|
| 165 |
+
imgs_sorted = sorted(imgs, key=lambda x: x.stat().st_size, reverse=True)
|
| 166 |
+
return imgs_sorted[0]
|
| 167 |
+
|
| 168 |
+
# ------------------ Main procedure ------------------
|
| 169 |
+
|
| 170 |
+
def make_dataset(root: Path, outdir: Path, min_images: int,
|
| 171 |
+
resize_max: int, weights: dict, copy_method="copy"):
|
| 172 |
+
scenes_found = 0
|
| 173 |
+
results = []
|
| 174 |
+
setA = outdir / "setA"
|
| 175 |
+
setB = outdir / "setB"
|
| 176 |
+
os.makedirs(setA, exist_ok=True)
|
| 177 |
+
os.makedirs(setB, exist_ok=True)
|
| 178 |
+
|
| 179 |
+
# Walk the tree and find directories that contain both source/ and output/
|
| 180 |
+
for dirpath, dirnames, filenames in os.walk(root):
|
| 181 |
+
d = Path(dirpath)
|
| 182 |
+
src_dir = d / "source"
|
| 183 |
+
out_dir = d / "output"
|
| 184 |
+
if not src_dir.exists() or not out_dir.exists():
|
| 185 |
+
continue
|
| 186 |
+
src_imgs = list_images(src_dir)
|
| 187 |
+
if len(src_imgs) < min_images:
|
| 188 |
+
# skip small scenes
|
| 189 |
+
continue
|
| 190 |
+
|
| 191 |
+
scenes_found += 1
|
| 192 |
+
print(f"[{scenes_found}] Scene: {d} ({len(src_imgs)} source images)")
|
| 193 |
+
|
| 194 |
+
# compute metrics and choose best
|
| 195 |
+
records = compute_metrics_for_images(src_imgs, resize_max=resize_max)
|
| 196 |
+
if not records:
|
| 197 |
+
print(" No readable source images, skipping.")
|
| 198 |
+
continue
|
| 199 |
+
scored = score_records(records, weights)
|
| 200 |
+
chosen = scored[0]
|
| 201 |
+
chosen_path = Path(chosen["path"])
|
| 202 |
+
chosen_name = chosen_path.name # used for setA filename (and setB target name)
|
| 203 |
+
|
| 204 |
+
# find output image for this scene
|
| 205 |
+
out_img = find_output_image(out_dir)
|
| 206 |
+
if out_img is None:
|
| 207 |
+
print(f" WARNING: no output image found in {out_dir}; skipping copying pair.")
|
| 208 |
+
out_img_path = None
|
| 209 |
+
else:
|
| 210 |
+
out_img_path = out_img
|
| 211 |
+
|
| 212 |
+
# destination paths
|
| 213 |
+
destA = setA / chosen_name
|
| 214 |
+
destB = setB / chosen_name
|
| 215 |
+
|
| 216 |
+
# copy or symlink
|
| 217 |
+
try:
|
| 218 |
+
if copy_method == "symlink":
|
| 219 |
+
if destA.exists():
|
| 220 |
+
destA.unlink()
|
| 221 |
+
os.symlink(os.path.abspath(chosen_path), destA)
|
| 222 |
+
else:
|
| 223 |
+
shutil.copy2(chosen_path, destA)
|
| 224 |
+
except Exception as e:
|
| 225 |
+
print(f" ERROR copying source -> {destA}: {e}")
|
| 226 |
+
|
| 227 |
+
if out_img_path is not None:
|
| 228 |
+
try:
|
| 229 |
+
if copy_method == "symlink":
|
| 230 |
+
if destB.exists():
|
| 231 |
+
destB.unlink()
|
| 232 |
+
os.symlink(os.path.abspath(out_img_path), destB)
|
| 233 |
+
else:
|
| 234 |
+
shutil.copy2(out_img_path, destB)
|
| 235 |
+
except Exception as e:
|
| 236 |
+
print(f" ERROR copying output -> {destB}: {e}")
|
| 237 |
+
|
| 238 |
+
# record result
|
| 239 |
+
result = {
|
| 240 |
+
"scene_dir": str(d),
|
| 241 |
+
"source_dir": str(src_dir),
|
| 242 |
+
"output_dir": str(out_dir),
|
| 243 |
+
"chosen_source_path": str(chosen_path),
|
| 244 |
+
"chosen_source_name": chosen_name,
|
| 245 |
+
"chosen_score": chosen["score"],
|
| 246 |
+
"metrics": {
|
| 247 |
+
"clipped": chosen["clipped"],
|
| 248 |
+
"coverage": chosen["coverage"],
|
| 249 |
+
"exposure_dist": chosen["exposure_dist"],
|
| 250 |
+
"sharpness": chosen["sharpness"],
|
| 251 |
+
"noise": chosen["noise"],
|
| 252 |
+
"clipped_n": chosen["clipped_n"],
|
| 253 |
+
"coverage_n": chosen["coverage_n"],
|
| 254 |
+
"exposure_n": chosen["exposure_n"],
|
| 255 |
+
"sharpness_n": chosen["sharpness_n"],
|
| 256 |
+
"noise_n": chosen["noise_n"],
|
| 257 |
+
},
|
| 258 |
+
"output_image_used": str(out_img_path) if out_img_path is not None else None,
|
| 259 |
+
"destA": str(destA),
|
| 260 |
+
"destB": str(destB) if out_img_path is not None else None
|
| 261 |
+
}
|
| 262 |
+
results.append(result)
|
| 263 |
+
|
| 264 |
+
# print top 3 for quick audit
|
| 265 |
+
print(" Top candidates:")
|
| 266 |
+
for c in scored[:3]:
|
| 267 |
+
print(f" {c['score']:.4f} clipped={c['clipped']:.4f} cov={c['coverage']:.4f} expd={c['exposure_dist']:.4f} sharp={c['sharpness']:.1f} noise={c['noise']:.5f} -> {Path(c['path']).name}")
|
| 268 |
+
|
| 269 |
+
# write reports
|
| 270 |
+
out_csv = outdir / "paired_selection.csv"
|
| 271 |
+
out_json = outdir / "paired_selection.json"
|
| 272 |
+
with open(out_json, "w", encoding="utf-8") as jf:
|
| 273 |
+
json.dump(results, jf, indent=2)
|
| 274 |
+
with open(out_csv, "w", newline="", encoding="utf-8") as cf:
|
| 275 |
+
writer = csv.writer(cf)
|
| 276 |
+
header = ["scene_dir", "source_dir", "output_dir", "chosen_source_name", "chosen_source_path",
|
| 277 |
+
"chosen_score", "output_image_used", "destA", "destB"]
|
| 278 |
+
writer.writerow(header)
|
| 279 |
+
for r in results:
|
| 280 |
+
writer.writerow([r.get(h, "") for h in header])
|
| 281 |
+
|
| 282 |
+
print(f"\nDone. Scenes processed: {scenes_found}")
|
| 283 |
+
print(f"Paired data saved to:\n {setA}\n {setB}")
|
| 284 |
+
print(f"Reports: {out_csv} , {out_json}")
|
| 285 |
+
return results
|
| 286 |
+
|
| 287 |
+
# ------------------ CLI ------------------
|
| 288 |
+
|
| 289 |
+
def parse_weights(s):
|
| 290 |
+
parts = [float(x.strip()) for x in s.split(",")]
|
| 291 |
+
if len(parts) != 5:
|
| 292 |
+
raise argparse.ArgumentTypeError("weights must be 5 comma-separated numbers")
|
| 293 |
+
ssum = sum(parts)
|
| 294 |
+
if ssum == 0:
|
| 295 |
+
raise argparse.ArgumentTypeError("weights sum must be > 0")
|
| 296 |
+
return [p / ssum for p in parts]
|
| 297 |
+
|
| 298 |
+
def main():
|
| 299 |
+
ap = argparse.ArgumentParser(description="Make paired CycleGAN dataset from your LDR/HDR scene layout.")
|
| 300 |
+
ap.add_argument("--root", "-r", required=True, help="Root of dataset (e.g. ../jpeg_stage1Just0)")
|
| 301 |
+
ap.add_argument("--outdir", "-o", default="./cyclegan_data", help="Output folder for paired dataset")
|
| 302 |
+
ap.add_argument("--min_images", type=int, default=2, help="Minimum images in source/ to consider scene")
|
| 303 |
+
ap.add_argument("--resize_max", type=int, default=1024, help="Resize longest side for metric calc (speeds up)")
|
| 304 |
+
ap.add_argument("--weights", type=parse_weights, default="0.35,0.25,0.15,0.15,0.10",
|
| 305 |
+
help="5 weights: clipped,coverage,exposure,sharpness,noise (will be normalized)")
|
| 306 |
+
ap.add_argument("--copy_method", choices=["copy", "symlink"], default="copy",
|
| 307 |
+
help="copy files or create symlinks (symlink saves disk space)")
|
| 308 |
+
args = ap.parse_args()
|
| 309 |
+
|
| 310 |
+
root = Path(args.root).expanduser().resolve()
|
| 311 |
+
outdir = Path(args.outdir).expanduser().resolve()
|
| 312 |
+
w = args.weights if isinstance(args.weights, list) else args.weights # parse_weights returns list
|
| 313 |
+
weights = {
|
| 314 |
+
"clipped": w[0],
|
| 315 |
+
"coverage": w[1],
|
| 316 |
+
"exposure": w[2],
|
| 317 |
+
"sharpness": w[3],
|
| 318 |
+
"noise": w[4]
|
| 319 |
+
}
|
| 320 |
+
print("Using weights:", weights)
|
| 321 |
+
outdir.mkdir(parents=True, exist_ok=True)
|
| 322 |
+
|
| 323 |
+
make_dataset(root, outdir, min_images=args.min_images,
|
| 324 |
+
resize_max=args.resize_max, weights=weights, copy_method=args.copy_method)
|
| 325 |
+
|
| 326 |
+
if __name__ == "__main__":
|
| 327 |
+
main()
|
checkpoints/ldr2hdr_cyclegan_728/latest_net_G.pth
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:ff234154d77ae11a15cb2550deec1cc63cf269be4bbec8487aea3ba2bc3c2b0f
|
| 3 |
+
size 45533133
|
data/__init__.py
ADDED
|
@@ -0,0 +1,107 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""This package includes all the modules related to data loading and preprocessing
|
| 2 |
+
|
| 3 |
+
To add a custom dataset class called 'dummy', you need to add a file called 'dummy_dataset.py' and define a subclass 'DummyDataset' inherited from BaseDataset.
|
| 4 |
+
You need to implement four functions:
|
| 5 |
+
-- <__init__>: initialize the class, first call BaseDataset.__init__(self, opt).
|
| 6 |
+
-- <__len__>: return the size of dataset.
|
| 7 |
+
-- <__getitem__>: get a data point from data loader.
|
| 8 |
+
-- <modify_commandline_options>: (optionally) add dataset-specific options and set default options.
|
| 9 |
+
|
| 10 |
+
Now you can use the dataset class by specifying flag '--dataset_mode dummy'.
|
| 11 |
+
See our template dataset class 'template_dataset.py' for more details.
|
| 12 |
+
"""
|
| 13 |
+
|
| 14 |
+
import importlib
|
| 15 |
+
import torch.utils.data
|
| 16 |
+
from torch.utils.data.distributed import DistributedSampler
|
| 17 |
+
import torch.distributed as dist
|
| 18 |
+
import os
|
| 19 |
+
from data.base_dataset import BaseDataset
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
def find_dataset_using_name(dataset_name):
|
| 23 |
+
"""Import the module "data/[dataset_name]_dataset.py".
|
| 24 |
+
|
| 25 |
+
In the file, the class called DatasetNameDataset() will
|
| 26 |
+
be instantiated. It has to be a subclass of BaseDataset,
|
| 27 |
+
and it is case-insensitive.
|
| 28 |
+
"""
|
| 29 |
+
dataset_filename = "data." + dataset_name + "_dataset"
|
| 30 |
+
datasetlib = importlib.import_module(dataset_filename)
|
| 31 |
+
|
| 32 |
+
dataset = None
|
| 33 |
+
target_dataset_name = dataset_name.replace("_", "") + "dataset"
|
| 34 |
+
for name, cls in datasetlib.__dict__.items():
|
| 35 |
+
if name.lower() == target_dataset_name.lower() and issubclass(cls, BaseDataset):
|
| 36 |
+
dataset = cls
|
| 37 |
+
|
| 38 |
+
if dataset is None:
|
| 39 |
+
raise NotImplementedError("In %s.py, there should be a subclass of BaseDataset with class name that matches %s in lowercase." % (dataset_filename, target_dataset_name))
|
| 40 |
+
|
| 41 |
+
return dataset
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
def get_option_setter(dataset_name):
|
| 45 |
+
"""Return the static method <modify_commandline_options> of the dataset class."""
|
| 46 |
+
dataset_class = find_dataset_using_name(dataset_name)
|
| 47 |
+
return dataset_class.modify_commandline_options
|
| 48 |
+
|
| 49 |
+
|
| 50 |
+
def create_dataset(opt):
|
| 51 |
+
"""Create a dataset given the option.
|
| 52 |
+
|
| 53 |
+
This function wraps the class CustomDatasetDataLoader.
|
| 54 |
+
This is the main interface between this package and 'train.py'/'test.py'
|
| 55 |
+
|
| 56 |
+
Example:
|
| 57 |
+
>>> from data import create_dataset
|
| 58 |
+
>>> dataset = create_dataset(opt)
|
| 59 |
+
"""
|
| 60 |
+
data_loader = CustomDatasetDataLoader(opt)
|
| 61 |
+
dataset = data_loader.load_data()
|
| 62 |
+
return dataset
|
| 63 |
+
|
| 64 |
+
|
| 65 |
+
class CustomDatasetDataLoader:
|
| 66 |
+
"""Wrapper class of Dataset class that performs multi-threaded data loading"""
|
| 67 |
+
|
| 68 |
+
def __init__(self, opt):
|
| 69 |
+
"""Initialize this class
|
| 70 |
+
|
| 71 |
+
Step 1: create a dataset instance given the name [dataset_mode]
|
| 72 |
+
Step 2: create a multi-threaded data loader.
|
| 73 |
+
"""
|
| 74 |
+
self.opt = opt
|
| 75 |
+
dataset_class = find_dataset_using_name(opt.dataset_mode)
|
| 76 |
+
self.dataset = dataset_class(opt)
|
| 77 |
+
print("dataset [%s] was created" % type(self.dataset).__name__)
|
| 78 |
+
|
| 79 |
+
# Use DistributedSampler for DDP training
|
| 80 |
+
if "LOCAL_RANK" in os.environ:
|
| 81 |
+
print(f'create DDP sampler on rank {int(os.environ["LOCAL_RANK"])}')
|
| 82 |
+
self.sampler = DistributedSampler(self.dataset, shuffle=not opt.serial_batches)
|
| 83 |
+
shuffle = False # DistributedSampler handles shuffling
|
| 84 |
+
else:
|
| 85 |
+
self.sampler = None
|
| 86 |
+
shuffle = not opt.serial_batches
|
| 87 |
+
|
| 88 |
+
self.dataloader = torch.utils.data.DataLoader(self.dataset, batch_size=opt.batch_size, shuffle=shuffle, sampler=self.sampler, num_workers=int(opt.num_threads))
|
| 89 |
+
|
| 90 |
+
def load_data(self):
|
| 91 |
+
return self
|
| 92 |
+
|
| 93 |
+
def __len__(self):
|
| 94 |
+
"""Return the number of data in the dataset"""
|
| 95 |
+
return min(len(self.dataset), self.opt.max_dataset_size)
|
| 96 |
+
|
| 97 |
+
def __iter__(self):
|
| 98 |
+
"""Return a batch of data"""
|
| 99 |
+
for i, data in enumerate(self.dataloader):
|
| 100 |
+
if i * self.opt.batch_size >= self.opt.max_dataset_size:
|
| 101 |
+
break
|
| 102 |
+
yield data
|
| 103 |
+
|
| 104 |
+
def set_epoch(self, epoch):
|
| 105 |
+
"""Set epoch for DistributedSampler to ensure proper shuffling"""
|
| 106 |
+
if self.sampler is not None:
|
| 107 |
+
self.sampler.set_epoch(epoch)
|
data/__pycache__/__init__.cpython-312.pyc
ADDED
|
Binary file (6.17 kB). View file
|
|
|
data/__pycache__/aligned_dataset.cpython-312.pyc
ADDED
|
Binary file (3.89 kB). View file
|
|
|
data/__pycache__/base_dataset.cpython-312.pyc
ADDED
|
Binary file (9.56 kB). View file
|
|
|
data/__pycache__/image_folder.cpython-312.pyc
ADDED
|
Binary file (3.51 kB). View file
|
|
|
data/__pycache__/single_dataset.cpython-312.pyc
ADDED
|
Binary file (2.57 kB). View file
|
|
|
data/__pycache__/unaligned_dataset.cpython-312.pyc
ADDED
|
Binary file (4.59 kB). View file
|
|
|
data/aligned_dataset.py
ADDED
|
@@ -0,0 +1,60 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
from data.base_dataset import BaseDataset, get_params, get_transform
|
| 3 |
+
from data.image_folder import make_dataset
|
| 4 |
+
from PIL import Image
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
class AlignedDataset(BaseDataset):
|
| 8 |
+
"""A dataset class for paired image dataset.
|
| 9 |
+
|
| 10 |
+
It assumes that the directory '/path/to/data/train' contains image pairs in the form of {A,B}.
|
| 11 |
+
During test time, you need to prepare a directory '/path/to/data/test'.
|
| 12 |
+
"""
|
| 13 |
+
|
| 14 |
+
def __init__(self, opt):
|
| 15 |
+
"""Initialize this dataset class.
|
| 16 |
+
|
| 17 |
+
Parameters:
|
| 18 |
+
opt (Option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions
|
| 19 |
+
"""
|
| 20 |
+
BaseDataset.__init__(self, opt)
|
| 21 |
+
self.dir_AB = os.path.join(opt.dataroot, opt.phase) # get the image directory
|
| 22 |
+
self.AB_paths = sorted(make_dataset(self.dir_AB, opt.max_dataset_size)) # get image paths
|
| 23 |
+
assert self.opt.load_size >= self.opt.crop_size # crop_size should be smaller than the size of loaded image
|
| 24 |
+
self.input_nc = self.opt.output_nc if self.opt.direction == "BtoA" else self.opt.input_nc
|
| 25 |
+
self.output_nc = self.opt.input_nc if self.opt.direction == "BtoA" else self.opt.output_nc
|
| 26 |
+
|
| 27 |
+
def __getitem__(self, index):
|
| 28 |
+
"""Return a data point and its metadata information.
|
| 29 |
+
|
| 30 |
+
Parameters:
|
| 31 |
+
index - - a random integer for data indexing
|
| 32 |
+
|
| 33 |
+
Returns a dictionary that contains A, B, A_paths and B_paths
|
| 34 |
+
A (tensor) - - an image in the input domain
|
| 35 |
+
B (tensor) - - its corresponding image in the target domain
|
| 36 |
+
A_paths (str) - - image paths
|
| 37 |
+
B_paths (str) - - image paths (same as A_paths)
|
| 38 |
+
"""
|
| 39 |
+
# read a image given a random integer index
|
| 40 |
+
AB_path = self.AB_paths[index]
|
| 41 |
+
AB = Image.open(AB_path).convert("RGB")
|
| 42 |
+
# split AB image into A and B
|
| 43 |
+
w, h = AB.size
|
| 44 |
+
w2 = int(w / 2)
|
| 45 |
+
A = AB.crop((0, 0, w2, h))
|
| 46 |
+
B = AB.crop((w2, 0, w, h))
|
| 47 |
+
|
| 48 |
+
# apply the same transform to both A and B
|
| 49 |
+
transform_params = get_params(self.opt, A.size)
|
| 50 |
+
A_transform = get_transform(self.opt, transform_params, grayscale=(self.input_nc == 1))
|
| 51 |
+
B_transform = get_transform(self.opt, transform_params, grayscale=(self.output_nc == 1))
|
| 52 |
+
|
| 53 |
+
A = A_transform(A)
|
| 54 |
+
B = B_transform(B)
|
| 55 |
+
|
| 56 |
+
return {"A": A, "B": B, "A_paths": AB_path, "B_paths": AB_path}
|
| 57 |
+
|
| 58 |
+
def __len__(self):
|
| 59 |
+
"""Return the total number of images in the dataset."""
|
| 60 |
+
return len(self.AB_paths)
|
data/base_dataset.py
ADDED
|
@@ -0,0 +1,167 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""This module implements an abstract base class (ABC) 'BaseDataset' for datasets.
|
| 2 |
+
|
| 3 |
+
It also includes common transformation functions (e.g., get_transform, __scale_width), which can be later used in subclasses.
|
| 4 |
+
"""
|
| 5 |
+
|
| 6 |
+
import random
|
| 7 |
+
import numpy as np
|
| 8 |
+
import torch.utils.data as data
|
| 9 |
+
from PIL import Image
|
| 10 |
+
import torchvision.transforms as transforms
|
| 11 |
+
from abc import ABC, abstractmethod
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
class BaseDataset(data.Dataset, ABC):
|
| 15 |
+
"""This class is an abstract base class (ABC) for datasets.
|
| 16 |
+
|
| 17 |
+
To create a subclass, you need to implement the following four functions:
|
| 18 |
+
-- <__init__>: initialize the class, first call BaseDataset.__init__(self, opt).
|
| 19 |
+
-- <__len__>: return the size of dataset.
|
| 20 |
+
-- <__getitem__>: get a data point.
|
| 21 |
+
-- <modify_commandline_options>: (optionally) add dataset-specific options and set default options.
|
| 22 |
+
"""
|
| 23 |
+
|
| 24 |
+
def __init__(self, opt):
|
| 25 |
+
"""Initialize the class; save the options in the class
|
| 26 |
+
|
| 27 |
+
Parameters:
|
| 28 |
+
opt (Option class)-- stores all the experiment flags; needs to be a subclass of BaseOptions
|
| 29 |
+
"""
|
| 30 |
+
self.opt = opt
|
| 31 |
+
self.root = opt.dataroot
|
| 32 |
+
|
| 33 |
+
@staticmethod
|
| 34 |
+
def modify_commandline_options(parser, is_train):
|
| 35 |
+
"""Add new dataset-specific options, and rewrite default values for existing options.
|
| 36 |
+
|
| 37 |
+
Parameters:
|
| 38 |
+
parser -- original option parser
|
| 39 |
+
is_train (bool) -- whether training phase or test phase. You can use this flag to add training-specific or test-specific options.
|
| 40 |
+
|
| 41 |
+
Returns:
|
| 42 |
+
the modified parser.
|
| 43 |
+
"""
|
| 44 |
+
return parser
|
| 45 |
+
|
| 46 |
+
@abstractmethod
|
| 47 |
+
def __len__(self):
|
| 48 |
+
"""Return the total number of images in the dataset."""
|
| 49 |
+
return 0
|
| 50 |
+
|
| 51 |
+
@abstractmethod
|
| 52 |
+
def __getitem__(self, index):
|
| 53 |
+
"""Return a data point and its metadata information.
|
| 54 |
+
|
| 55 |
+
Parameters:
|
| 56 |
+
index - - a random integer for data indexing
|
| 57 |
+
|
| 58 |
+
Returns:
|
| 59 |
+
a dictionary of data with their names. It ususally contains the data itself and its metadata information.
|
| 60 |
+
"""
|
| 61 |
+
pass
|
| 62 |
+
|
| 63 |
+
|
| 64 |
+
def get_params(opt, size):
|
| 65 |
+
w, h = size
|
| 66 |
+
new_h = h
|
| 67 |
+
new_w = w
|
| 68 |
+
if opt.preprocess == "resize_and_crop":
|
| 69 |
+
new_h = new_w = opt.load_size
|
| 70 |
+
elif opt.preprocess == "scale_width_and_crop":
|
| 71 |
+
new_w = opt.load_size
|
| 72 |
+
new_h = opt.load_size * h // w
|
| 73 |
+
|
| 74 |
+
x = random.randint(0, np.maximum(0, new_w - opt.crop_size))
|
| 75 |
+
y = random.randint(0, np.maximum(0, new_h - opt.crop_size))
|
| 76 |
+
|
| 77 |
+
flip = random.random() > 0.5
|
| 78 |
+
|
| 79 |
+
return {"crop_pos": (x, y), "flip": flip}
|
| 80 |
+
|
| 81 |
+
|
| 82 |
+
def get_transform(opt, params=None, grayscale=False, method=transforms.InterpolationMode.BICUBIC, convert=True):
|
| 83 |
+
transform_list = []
|
| 84 |
+
if grayscale:
|
| 85 |
+
transform_list.append(transforms.Grayscale(1))
|
| 86 |
+
if "resize" in opt.preprocess:
|
| 87 |
+
osize = [opt.load_size, opt.load_size]
|
| 88 |
+
transform_list.append(transforms.Resize(osize, method))
|
| 89 |
+
elif "scale_width" in opt.preprocess:
|
| 90 |
+
transform_list.append(transforms.Lambda(lambda img: __scale_width(img, opt.load_size, opt.crop_size, method)))
|
| 91 |
+
|
| 92 |
+
if "crop" in opt.preprocess:
|
| 93 |
+
if params is None:
|
| 94 |
+
transform_list.append(transforms.RandomCrop(opt.crop_size))
|
| 95 |
+
else:
|
| 96 |
+
transform_list.append(transforms.Lambda(lambda img: __crop(img, params["crop_pos"], opt.crop_size)))
|
| 97 |
+
|
| 98 |
+
if opt.preprocess == "none":
|
| 99 |
+
transform_list.append(transforms.Lambda(lambda img: __make_power_2(img, base=4, method=method)))
|
| 100 |
+
|
| 101 |
+
if not opt.no_flip:
|
| 102 |
+
if params is None:
|
| 103 |
+
transform_list.append(transforms.RandomHorizontalFlip())
|
| 104 |
+
elif params["flip"]:
|
| 105 |
+
transform_list.append(transforms.Lambda(lambda img: __flip(img, params["flip"])))
|
| 106 |
+
|
| 107 |
+
if convert:
|
| 108 |
+
transform_list += [transforms.ToTensor()]
|
| 109 |
+
if grayscale:
|
| 110 |
+
transform_list += [transforms.Normalize((0.5,), (0.5,))]
|
| 111 |
+
else:
|
| 112 |
+
transform_list += [transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]
|
| 113 |
+
return transforms.Compose(transform_list)
|
| 114 |
+
|
| 115 |
+
|
| 116 |
+
def __transforms2pil_resize(method):
|
| 117 |
+
mapper = {
|
| 118 |
+
transforms.InterpolationMode.BILINEAR: Image.BILINEAR,
|
| 119 |
+
transforms.InterpolationMode.BICUBIC: Image.BICUBIC,
|
| 120 |
+
transforms.InterpolationMode.NEAREST: Image.NEAREST,
|
| 121 |
+
transforms.InterpolationMode.LANCZOS: Image.LANCZOS,
|
| 122 |
+
}
|
| 123 |
+
return mapper[method]
|
| 124 |
+
|
| 125 |
+
|
| 126 |
+
def __make_power_2(img, base, method=transforms.InterpolationMode.BICUBIC):
|
| 127 |
+
method = __transforms2pil_resize(method)
|
| 128 |
+
ow, oh = img.size
|
| 129 |
+
h = int(round(oh / base) * base)
|
| 130 |
+
w = int(round(ow / base) * base)
|
| 131 |
+
if h == oh and w == ow:
|
| 132 |
+
return img
|
| 133 |
+
|
| 134 |
+
__print_size_warning(ow, oh, w, h)
|
| 135 |
+
return img.resize((w, h), method)
|
| 136 |
+
|
| 137 |
+
|
| 138 |
+
def __scale_width(img, target_size, crop_size, method=transforms.InterpolationMode.BICUBIC):
|
| 139 |
+
method = __transforms2pil_resize(method)
|
| 140 |
+
ow, oh = img.size
|
| 141 |
+
if ow == target_size and oh >= crop_size:
|
| 142 |
+
return img
|
| 143 |
+
w = target_size
|
| 144 |
+
h = int(max(target_size * oh / ow, crop_size))
|
| 145 |
+
return img.resize((w, h), method)
|
| 146 |
+
|
| 147 |
+
|
| 148 |
+
def __crop(img, pos, size):
|
| 149 |
+
ow, oh = img.size
|
| 150 |
+
x1, y1 = pos
|
| 151 |
+
tw = th = size
|
| 152 |
+
if ow > tw or oh > th:
|
| 153 |
+
return img.crop((x1, y1, x1 + tw, y1 + th))
|
| 154 |
+
return img
|
| 155 |
+
|
| 156 |
+
|
| 157 |
+
def __flip(img, flip):
|
| 158 |
+
if flip:
|
| 159 |
+
return img.transpose(Image.FLIP_LEFT_RIGHT)
|
| 160 |
+
return img
|
| 161 |
+
|
| 162 |
+
|
| 163 |
+
def __print_size_warning(ow, oh, w, h):
|
| 164 |
+
"""Print warning information about image size(only print once)"""
|
| 165 |
+
if not hasattr(__print_size_warning, "has_printed"):
|
| 166 |
+
print("The image size needs to be a multiple of 4. " "The loaded image size was (%d, %d), so it was adjusted to " "(%d, %d). This adjustment will be done to all images " "whose sizes are not multiples of 4" % (ow, oh, w, h))
|
| 167 |
+
__print_size_warning.has_printed = True
|
data/colorization_dataset.py
ADDED
|
@@ -0,0 +1,69 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
from data.base_dataset import BaseDataset, get_transform
|
| 3 |
+
from data.image_folder import make_dataset
|
| 4 |
+
from skimage import color # require skimage
|
| 5 |
+
from PIL import Image
|
| 6 |
+
import numpy as np
|
| 7 |
+
import torchvision.transforms as transforms
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
class ColorizationDataset(BaseDataset):
|
| 11 |
+
"""This dataset class can load a set of natural images in RGB, and convert RGB format into (L, ab) pairs in Lab color space.
|
| 12 |
+
|
| 13 |
+
This dataset is required by pix2pix-based colorization model ('--model colorization')
|
| 14 |
+
"""
|
| 15 |
+
|
| 16 |
+
@staticmethod
|
| 17 |
+
def modify_commandline_options(parser, is_train):
|
| 18 |
+
"""Add new dataset-specific options, and rewrite default values for existing options.
|
| 19 |
+
|
| 20 |
+
Parameters:
|
| 21 |
+
parser -- original option parser
|
| 22 |
+
is_train (bool) -- whether training phase or test phase. You can use this flag to add training-specific or test-specific options.
|
| 23 |
+
|
| 24 |
+
Returns:
|
| 25 |
+
the modified parser.
|
| 26 |
+
|
| 27 |
+
By default, the number of channels for input image is 1 (L) and
|
| 28 |
+
the number of channels for output image is 2 (ab). The direction is from A to B
|
| 29 |
+
"""
|
| 30 |
+
parser.set_defaults(input_nc=1, output_nc=2, direction="AtoB")
|
| 31 |
+
return parser
|
| 32 |
+
|
| 33 |
+
def __init__(self, opt):
|
| 34 |
+
"""Initialize this dataset class.
|
| 35 |
+
|
| 36 |
+
Parameters:
|
| 37 |
+
opt (Option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions
|
| 38 |
+
"""
|
| 39 |
+
BaseDataset.__init__(self, opt)
|
| 40 |
+
self.dir = os.path.join(opt.dataroot, opt.phase)
|
| 41 |
+
self.AB_paths = sorted(make_dataset(self.dir, opt.max_dataset_size))
|
| 42 |
+
assert opt.input_nc == 1 and opt.output_nc == 2 and opt.direction == "AtoB"
|
| 43 |
+
self.transform = get_transform(self.opt, convert=False)
|
| 44 |
+
|
| 45 |
+
def __getitem__(self, index):
|
| 46 |
+
"""Return a data point and its metadata information.
|
| 47 |
+
|
| 48 |
+
Parameters:
|
| 49 |
+
index - - a random integer for data indexing
|
| 50 |
+
|
| 51 |
+
Returns a dictionary that contains A, B, A_paths and B_paths
|
| 52 |
+
A (tensor) - - the L channel of an image
|
| 53 |
+
B (tensor) - - the ab channels of the same image
|
| 54 |
+
A_paths (str) - - image paths
|
| 55 |
+
B_paths (str) - - image paths (same as A_paths)
|
| 56 |
+
"""
|
| 57 |
+
path = self.AB_paths[index]
|
| 58 |
+
im = Image.open(path).convert("RGB")
|
| 59 |
+
im = self.transform(im)
|
| 60 |
+
im = np.array(im)
|
| 61 |
+
lab = color.rgb2lab(im).astype(np.float32)
|
| 62 |
+
lab_t = transforms.ToTensor()(lab)
|
| 63 |
+
A = lab_t[[0], ...] / 50.0 - 1.0
|
| 64 |
+
B = lab_t[[1, 2], ...] / 110.0
|
| 65 |
+
return {"A": A, "B": B, "A_paths": path, "B_paths": path}
|
| 66 |
+
|
| 67 |
+
def __len__(self):
|
| 68 |
+
"""Return the total number of images in the dataset."""
|
| 69 |
+
return len(self.AB_paths)
|
data/image_folder.py
ADDED
|
@@ -0,0 +1,72 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""A modified image folder class
|
| 2 |
+
|
| 3 |
+
We modify the official PyTorch image folder (https://github.com/pytorch/vision/blob/master/torchvision/datasets/folder.py)
|
| 4 |
+
so that this class can load images from both current directory and its subdirectories.
|
| 5 |
+
"""
|
| 6 |
+
|
| 7 |
+
import torch.utils.data as data
|
| 8 |
+
from pathlib import Path
|
| 9 |
+
from PIL import Image
|
| 10 |
+
|
| 11 |
+
IMG_EXTENSIONS = [
|
| 12 |
+
".jpg",
|
| 13 |
+
".JPG",
|
| 14 |
+
".jpeg",
|
| 15 |
+
".JPEG",
|
| 16 |
+
".png",
|
| 17 |
+
".PNG",
|
| 18 |
+
".ppm",
|
| 19 |
+
".PPM",
|
| 20 |
+
".bmp",
|
| 21 |
+
".BMP",
|
| 22 |
+
".tif",
|
| 23 |
+
".TIF",
|
| 24 |
+
".tiff",
|
| 25 |
+
".TIFF",
|
| 26 |
+
]
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
def is_image_file(filename):
|
| 30 |
+
return any(filename.endswith(extension) for extension in IMG_EXTENSIONS)
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
def make_dataset(dir, max_dataset_size=float("inf")):
|
| 34 |
+
images = []
|
| 35 |
+
dir_path = Path(dir)
|
| 36 |
+
assert dir_path.is_dir(), f"{dir} is not a valid directory"
|
| 37 |
+
|
| 38 |
+
for path in sorted(dir_path.rglob("*")):
|
| 39 |
+
if path.is_file() and is_image_file(path.name):
|
| 40 |
+
images.append(str(path))
|
| 41 |
+
return images[: min(max_dataset_size, len(images))]
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
def default_loader(path):
|
| 45 |
+
return Image.open(path).convert("RGB")
|
| 46 |
+
|
| 47 |
+
|
| 48 |
+
class ImageFolder(data.Dataset):
|
| 49 |
+
|
| 50 |
+
def __init__(self, root, transform=None, return_paths=False, loader=default_loader):
|
| 51 |
+
imgs = make_dataset(root)
|
| 52 |
+
if len(imgs) == 0:
|
| 53 |
+
raise (RuntimeError("Found 0 images in: " + root + "\n" "Supported image extensions are: " + ",".join(IMG_EXTENSIONS)))
|
| 54 |
+
|
| 55 |
+
self.root = root
|
| 56 |
+
self.imgs = imgs
|
| 57 |
+
self.transform = transform
|
| 58 |
+
self.return_paths = return_paths
|
| 59 |
+
self.loader = loader
|
| 60 |
+
|
| 61 |
+
def __getitem__(self, index):
|
| 62 |
+
path = self.imgs[index]
|
| 63 |
+
img = self.loader(path)
|
| 64 |
+
if self.transform is not None:
|
| 65 |
+
img = self.transform(img)
|
| 66 |
+
if self.return_paths:
|
| 67 |
+
return img, path
|
| 68 |
+
else:
|
| 69 |
+
return img
|
| 70 |
+
|
| 71 |
+
def __len__(self):
|
| 72 |
+
return len(self.imgs)
|
data/single_dataset.py
ADDED
|
@@ -0,0 +1,40 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from data.base_dataset import BaseDataset, get_transform
|
| 2 |
+
from data.image_folder import make_dataset
|
| 3 |
+
from PIL import Image
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
class SingleDataset(BaseDataset):
|
| 7 |
+
"""This dataset class can load a set of images specified by the path --dataroot /path/to/data.
|
| 8 |
+
|
| 9 |
+
It can be used for generating CycleGAN results only for one side with the model option '-model test'.
|
| 10 |
+
"""
|
| 11 |
+
|
| 12 |
+
def __init__(self, opt):
|
| 13 |
+
"""Initialize this dataset class.
|
| 14 |
+
|
| 15 |
+
Parameters:
|
| 16 |
+
opt (Option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions
|
| 17 |
+
"""
|
| 18 |
+
BaseDataset.__init__(self, opt)
|
| 19 |
+
self.A_paths = sorted(make_dataset(opt.dataroot, opt.max_dataset_size))
|
| 20 |
+
input_nc = self.opt.output_nc if self.opt.direction == "BtoA" else self.opt.input_nc
|
| 21 |
+
self.transform = get_transform(opt, grayscale=(input_nc == 1))
|
| 22 |
+
|
| 23 |
+
def __getitem__(self, index):
|
| 24 |
+
"""Return a data point and its metadata information.
|
| 25 |
+
|
| 26 |
+
Parameters:
|
| 27 |
+
index - - a random integer for data indexing
|
| 28 |
+
|
| 29 |
+
Returns a dictionary that contains A and A_paths
|
| 30 |
+
A(tensor) - - an image in one domain
|
| 31 |
+
A_paths(str) - - the path of the image
|
| 32 |
+
"""
|
| 33 |
+
A_path = self.A_paths[index]
|
| 34 |
+
A_img = Image.open(A_path).convert("RGB")
|
| 35 |
+
A = self.transform(A_img)
|
| 36 |
+
return {"A": A, "A_paths": A_path}
|
| 37 |
+
|
| 38 |
+
def __len__(self):
|
| 39 |
+
"""Return the total number of images in the dataset."""
|
| 40 |
+
return len(self.A_paths)
|
data/template_dataset.py
ADDED
|
@@ -0,0 +1,78 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Dataset class template
|
| 2 |
+
|
| 3 |
+
This module provides a template for users to implement custom datasets.
|
| 4 |
+
You can specify '--dataset_mode template' to use this dataset.
|
| 5 |
+
The class name should be consistent with both the filename and its dataset_mode option.
|
| 6 |
+
The filename should be <dataset_mode>_dataset.py
|
| 7 |
+
The class name should be <Dataset_mode>Dataset.py
|
| 8 |
+
You need to implement the following functions:
|
| 9 |
+
-- <modify_commandline_options>: Add dataset-specific options and rewrite default values for existing options.
|
| 10 |
+
-- <__init__>: Initialize this dataset class.
|
| 11 |
+
-- <__getitem__>: Return a data point and its metadata information.
|
| 12 |
+
-- <__len__>: Return the number of images.
|
| 13 |
+
"""
|
| 14 |
+
|
| 15 |
+
from data.base_dataset import BaseDataset, get_transform
|
| 16 |
+
|
| 17 |
+
# from data.image_folder import make_dataset
|
| 18 |
+
# from PIL import Image
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
class TemplateDataset(BaseDataset):
|
| 22 |
+
"""A template dataset class for you to implement custom datasets."""
|
| 23 |
+
|
| 24 |
+
@staticmethod
|
| 25 |
+
def modify_commandline_options(parser, is_train):
|
| 26 |
+
"""Add new dataset-specific options, and rewrite default values for existing options.
|
| 27 |
+
|
| 28 |
+
Parameters:
|
| 29 |
+
parser -- original option parser
|
| 30 |
+
is_train (bool) -- whether training phase or test phase. You can use this flag to add training-specific or test-specific options.
|
| 31 |
+
|
| 32 |
+
Returns:
|
| 33 |
+
the modified parser.
|
| 34 |
+
"""
|
| 35 |
+
parser.add_argument("--new_dataset_option", type=float, default=1.0, help="new dataset option")
|
| 36 |
+
parser.set_defaults(max_dataset_size=10, new_dataset_option=2.0) # specify dataset-specific default values
|
| 37 |
+
return parser
|
| 38 |
+
|
| 39 |
+
def __init__(self, opt):
|
| 40 |
+
"""Initialize this dataset class.
|
| 41 |
+
|
| 42 |
+
Parameters:
|
| 43 |
+
opt (Option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions
|
| 44 |
+
|
| 45 |
+
A few things can be done here.
|
| 46 |
+
- save the options (have been done in BaseDataset)
|
| 47 |
+
- get image paths and meta information of the dataset.
|
| 48 |
+
- define the image transformation.
|
| 49 |
+
"""
|
| 50 |
+
# save the option and dataset root
|
| 51 |
+
BaseDataset.__init__(self, opt)
|
| 52 |
+
# get the image paths of your dataset;
|
| 53 |
+
self.image_paths = [] # You can call sorted(make_dataset(self.root, opt.max_dataset_size)) to get all the image paths under the directory self.root
|
| 54 |
+
# define the default transform function. You can use <base_dataset.get_transform>; You can also define your custom transform function
|
| 55 |
+
self.transform = get_transform(opt)
|
| 56 |
+
|
| 57 |
+
def __getitem__(self, index):
|
| 58 |
+
"""Return a data point and its metadata information.
|
| 59 |
+
|
| 60 |
+
Parameters:
|
| 61 |
+
index -- a random integer for data indexing
|
| 62 |
+
|
| 63 |
+
Returns:
|
| 64 |
+
a dictionary of data with their names. It usually contains the data itself and its metadata information.
|
| 65 |
+
|
| 66 |
+
Step 1: get a random image path: e.g., path = self.image_paths[index]
|
| 67 |
+
Step 2: load your data from the disk: e.g., image = Image.open(path).convert('RGB').
|
| 68 |
+
Step 3: convert your data to a PyTorch tensor. You can use helpder functions such as self.transform. e.g., data = self.transform(image)
|
| 69 |
+
Step 4: return a data point as a dictionary.
|
| 70 |
+
"""
|
| 71 |
+
path = "temp" # needs to be a string
|
| 72 |
+
data_A = None # needs to be a tensor
|
| 73 |
+
data_B = None # needs to be a tensor
|
| 74 |
+
return {"data_A": data_A, "data_B": data_B, "path": path}
|
| 75 |
+
|
| 76 |
+
def __len__(self):
|
| 77 |
+
"""Return the total number of images."""
|
| 78 |
+
return len(self.image_paths)
|
data/unaligned_dataset.py
ADDED
|
@@ -0,0 +1,71 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
from data.base_dataset import BaseDataset, get_transform
|
| 3 |
+
from data.image_folder import make_dataset
|
| 4 |
+
from PIL import Image
|
| 5 |
+
import random
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
class UnalignedDataset(BaseDataset):
|
| 9 |
+
"""
|
| 10 |
+
This dataset class can load unaligned/unpaired datasets.
|
| 11 |
+
|
| 12 |
+
It requires two directories to host training images from domain A '/path/to/data/trainA'
|
| 13 |
+
and from domain B '/path/to/data/trainB' respectively.
|
| 14 |
+
You can train the model with the dataset flag '--dataroot /path/to/data'.
|
| 15 |
+
Similarly, you need to prepare two directories:
|
| 16 |
+
'/path/to/data/testA' and '/path/to/data/testB' during test time.
|
| 17 |
+
"""
|
| 18 |
+
|
| 19 |
+
def __init__(self, opt):
|
| 20 |
+
"""Initialize this dataset class.
|
| 21 |
+
|
| 22 |
+
Parameters:
|
| 23 |
+
opt (Option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions
|
| 24 |
+
"""
|
| 25 |
+
BaseDataset.__init__(self, opt)
|
| 26 |
+
self.dir_A = os.path.join(opt.dataroot, opt.phase + "A") # create a path '/path/to/data/trainA'
|
| 27 |
+
self.dir_B = os.path.join(opt.dataroot, opt.phase + "B") # create a path '/path/to/data/trainB'
|
| 28 |
+
|
| 29 |
+
self.A_paths = sorted(make_dataset(self.dir_A, opt.max_dataset_size)) # load images from '/path/to/data/trainA'
|
| 30 |
+
self.B_paths = sorted(make_dataset(self.dir_B, opt.max_dataset_size)) # load images from '/path/to/data/trainB'
|
| 31 |
+
self.A_size = len(self.A_paths) # get the size of dataset A
|
| 32 |
+
self.B_size = len(self.B_paths) # get the size of dataset B
|
| 33 |
+
btoA = self.opt.direction == "BtoA"
|
| 34 |
+
input_nc = self.opt.output_nc if btoA else self.opt.input_nc # get the number of channels of input image
|
| 35 |
+
output_nc = self.opt.input_nc if btoA else self.opt.output_nc # get the number of channels of output image
|
| 36 |
+
self.transform_A = get_transform(self.opt, grayscale=(input_nc == 1))
|
| 37 |
+
self.transform_B = get_transform(self.opt, grayscale=(output_nc == 1))
|
| 38 |
+
|
| 39 |
+
def __getitem__(self, index):
|
| 40 |
+
"""Return a data point and its metadata information.
|
| 41 |
+
|
| 42 |
+
Parameters:
|
| 43 |
+
index (int) -- a random integer for data indexing
|
| 44 |
+
|
| 45 |
+
Returns a dictionary that contains A, B, A_paths and B_paths
|
| 46 |
+
A (tensor) -- an image in the input domain
|
| 47 |
+
B (tensor) -- its corresponding image in the target domain
|
| 48 |
+
A_paths (str) -- image paths
|
| 49 |
+
B_paths (str) -- image paths
|
| 50 |
+
"""
|
| 51 |
+
A_path = self.A_paths[index % self.A_size] # make sure index is within then range
|
| 52 |
+
if self.opt.serial_batches: # make sure index is within then range
|
| 53 |
+
index_B = index % self.B_size
|
| 54 |
+
else: # randomize the index for domain B to avoid fixed pairs.
|
| 55 |
+
index_B = random.randint(0, self.B_size - 1)
|
| 56 |
+
B_path = self.B_paths[index_B]
|
| 57 |
+
A_img = Image.open(A_path).convert("RGB")
|
| 58 |
+
B_img = Image.open(B_path).convert("RGB")
|
| 59 |
+
# apply image transformation
|
| 60 |
+
A = self.transform_A(A_img)
|
| 61 |
+
B = self.transform_B(B_img)
|
| 62 |
+
|
| 63 |
+
return {"A": A, "B": B, "A_paths": A_path, "B_paths": B_path}
|
| 64 |
+
|
| 65 |
+
def __len__(self):
|
| 66 |
+
"""Return the total number of images in the dataset.
|
| 67 |
+
|
| 68 |
+
As we have two datasets with potentially different number of images,
|
| 69 |
+
we take a maximum of
|
| 70 |
+
"""
|
| 71 |
+
return max(self.A_size, self.B_size)
|
models/__init__.py
ADDED
|
@@ -0,0 +1,58 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""This package contains modules related to objective functions, optimizations, and network architectures.
|
| 2 |
+
|
| 3 |
+
To add a custom model class called 'dummy', you need to add a file called 'dummy_model.py' and define a subclass DummyModel inherited from BaseModel.
|
| 4 |
+
You need to implement the following five functions:
|
| 5 |
+
-- <__init__>: initialize the class; first call BaseModel.__init__(self, opt).
|
| 6 |
+
-- <set_input>: unpack data from dataset and apply preprocessing.
|
| 7 |
+
-- <forward>: produce intermediate results.
|
| 8 |
+
-- <optimize_parameters>: calculate loss, gradients, and update network weights.
|
| 9 |
+
-- <modify_commandline_options>: (optionally) add model-specific options and set default options.
|
| 10 |
+
|
| 11 |
+
In the function <__init__>, you need to define four lists:
|
| 12 |
+
-- self.loss_names (str list): specify the training losses that you want to plot and save.
|
| 13 |
+
-- self.model_names (str list): define networks used in our training.
|
| 14 |
+
-- self.visual_names (str list): specify the images that you want to display and save.
|
| 15 |
+
-- self.optimizers (optimizer list): define and initialize optimizers. You can define one optimizer for each network. If two networks are updated at the same time, you can use itertools.chain to group them. See cycle_gan_model.py for an usage.
|
| 16 |
+
|
| 17 |
+
Now you can use the model class by specifying flag '--model dummy'.
|
| 18 |
+
See our template model class 'template_model.py' for more details.
|
| 19 |
+
"""
|
| 20 |
+
|
| 21 |
+
import importlib
|
| 22 |
+
from models.base_model import BaseModel
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
def find_model_using_name(model_name: str):
|
| 26 |
+
"""Import the module "models/[model_name]_model.py".
|
| 27 |
+
|
| 28 |
+
In the file, the class called DatasetNameModel() will
|
| 29 |
+
be instantiated. It has to be a subclass of BaseModel,
|
| 30 |
+
and it is case-insensitive.
|
| 31 |
+
"""
|
| 32 |
+
model_filename = "models." + model_name + "_model"
|
| 33 |
+
modellib = importlib.import_module(model_filename)
|
| 34 |
+
model = None
|
| 35 |
+
target_model_name = model_name.replace("_", "") + "model"
|
| 36 |
+
for name, cls in modellib.__dict__.items():
|
| 37 |
+
if name.lower() == target_model_name.lower() and issubclass(cls, BaseModel):
|
| 38 |
+
model = cls
|
| 39 |
+
|
| 40 |
+
if model is None:
|
| 41 |
+
print("In %s.py, there should be a subclass of BaseModel with class name that matches %s in lowercase." % (model_filename, target_model_name))
|
| 42 |
+
exit(0)
|
| 43 |
+
|
| 44 |
+
return model
|
| 45 |
+
|
| 46 |
+
|
| 47 |
+
def get_option_setter(model_name: str):
|
| 48 |
+
"""Return the static method <modify_commandline_options> of the model class."""
|
| 49 |
+
model_class = find_model_using_name(model_name)
|
| 50 |
+
return model_class.modify_commandline_options
|
| 51 |
+
|
| 52 |
+
|
| 53 |
+
def create_model(opt):
|
| 54 |
+
"""Create a model given the option."""
|
| 55 |
+
model = find_model_using_name(opt.model)
|
| 56 |
+
instance = model(opt)
|
| 57 |
+
print(f"model [{type(instance).__name__}] was created")
|
| 58 |
+
return instance
|
models/__pycache__/__init__.cpython-312.pyc
ADDED
|
Binary file (3.65 kB). View file
|
|
|
models/__pycache__/base_model.cpython-312.pyc
ADDED
|
Binary file (16.9 kB). View file
|
|
|
models/__pycache__/cycle_gan_model.cpython-312.pyc
ADDED
|
Binary file (13.2 kB). View file
|
|
|
models/__pycache__/networks.cpython-312.pyc
ADDED
|
Binary file (32.6 kB). View file
|
|
|
models/__pycache__/pix2pix_model.cpython-312.pyc
ADDED
|
Binary file (8.45 kB). View file
|
|
|
models/__pycache__/test_model.cpython-312.pyc
ADDED
|
Binary file (3.99 kB). View file
|
|
|
models/base_model.py
ADDED
|
@@ -0,0 +1,306 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import torch
|
| 3 |
+
import torch.distributed as dist
|
| 4 |
+
from pathlib import Path
|
| 5 |
+
from collections import OrderedDict
|
| 6 |
+
from abc import ABC, abstractmethod
|
| 7 |
+
from . import networks
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
class BaseModel(ABC):
|
| 11 |
+
"""This class is an abstract base class (ABC) for models.
|
| 12 |
+
To create a subclass, you need to implement the following five functions:
|
| 13 |
+
-- <__init__>: initialize the class; first call BaseModel.__init__(self, opt).
|
| 14 |
+
-- <set_input>: unpack data from dataset and apply preprocessing.
|
| 15 |
+
-- <forward>: produce intermediate results.
|
| 16 |
+
-- <optimize_parameters>: calculate losses, gradients, and update network weights.
|
| 17 |
+
-- <modify_commandline_options>: (optionally) add model-specific options and set default options.
|
| 18 |
+
"""
|
| 19 |
+
|
| 20 |
+
def __init__(self, opt):
|
| 21 |
+
"""Initialize the BaseModel class.
|
| 22 |
+
|
| 23 |
+
Parameters:
|
| 24 |
+
opt (Option class)-- stores all the experiment flags; needs to be a subclass of BaseOptions
|
| 25 |
+
|
| 26 |
+
When creating your custom class, you need to implement your own initialization.
|
| 27 |
+
In this function, you should first call <BaseModel.__init__(self, opt)>
|
| 28 |
+
Then, you need to define four lists:
|
| 29 |
+
-- self.loss_names (str list): specify the training losses that you want to plot and save.
|
| 30 |
+
-- self.model_names (str list): define networks used in our training.
|
| 31 |
+
-- self.visual_names (str list): specify the images that you want to display and save.
|
| 32 |
+
-- self.optimizers (optimizer list): define and initialize optimizers. You can define one optimizer for each network. If two networks are updated at the same time, you can use itertools.chain to group them. See cycle_gan_model.py for an example.
|
| 33 |
+
"""
|
| 34 |
+
self.opt = opt
|
| 35 |
+
self.isTrain = opt.isTrain
|
| 36 |
+
self.save_dir = Path(opt.checkpoints_dir) / opt.name # save all the checkpoints to save_dir
|
| 37 |
+
self.device = opt.device
|
| 38 |
+
# with [scale_width], input images might have different sizes, which hurts the performance of cudnn.benchmark.
|
| 39 |
+
if opt.preprocess != "scale_width":
|
| 40 |
+
torch.backends.cudnn.benchmark = True
|
| 41 |
+
self.loss_names = []
|
| 42 |
+
self.model_names = []
|
| 43 |
+
self.visual_names = []
|
| 44 |
+
self.optimizers = []
|
| 45 |
+
self.image_paths = []
|
| 46 |
+
self.metric = 0 # used for learning rate policy 'plateau'
|
| 47 |
+
|
| 48 |
+
@staticmethod
|
| 49 |
+
def modify_commandline_options(parser, is_train):
|
| 50 |
+
"""Add new model-specific options, and rewrite default values for existing options.
|
| 51 |
+
|
| 52 |
+
Parameters:
|
| 53 |
+
parser -- original option parser
|
| 54 |
+
is_train (bool) -- whether training phase or test phase. You can use this flag to add training-specific or test-specific options.
|
| 55 |
+
|
| 56 |
+
Returns:
|
| 57 |
+
the modified parser.
|
| 58 |
+
"""
|
| 59 |
+
return parser
|
| 60 |
+
|
| 61 |
+
@abstractmethod
|
| 62 |
+
def set_input(self, input):
|
| 63 |
+
"""Unpack input data from the dataloader and perform necessary pre-processing steps.
|
| 64 |
+
|
| 65 |
+
Parameters:
|
| 66 |
+
input (dict): includes the data itself and its metadata information.
|
| 67 |
+
"""
|
| 68 |
+
pass
|
| 69 |
+
|
| 70 |
+
@abstractmethod
|
| 71 |
+
def forward(self):
|
| 72 |
+
"""Run forward pass; called by both functions <optimize_parameters> and <test>."""
|
| 73 |
+
pass
|
| 74 |
+
|
| 75 |
+
@abstractmethod
|
| 76 |
+
def optimize_parameters(self):
|
| 77 |
+
"""Calculate losses, gradients, and update network weights; called in every training iteration"""
|
| 78 |
+
pass
|
| 79 |
+
|
| 80 |
+
def setup(self, opt):
|
| 81 |
+
"""Load and print networks; create schedulers
|
| 82 |
+
|
| 83 |
+
Parameters:
|
| 84 |
+
opt (Option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions
|
| 85 |
+
"""
|
| 86 |
+
# Initialize all networks and load if needed
|
| 87 |
+
for name in self.model_names:
|
| 88 |
+
if isinstance(name, str):
|
| 89 |
+
net = getattr(self, "net" + name)
|
| 90 |
+
net = networks.init_net(net, opt.init_type, opt.init_gain)
|
| 91 |
+
|
| 92 |
+
# Load networks if needed
|
| 93 |
+
if not self.isTrain or opt.continue_train:
|
| 94 |
+
load_suffix = f"iter_{opt.load_iter}" if opt.load_iter > 0 else opt.epoch
|
| 95 |
+
load_filename = f"{load_suffix}_net_{name}.pth"
|
| 96 |
+
load_path = self.save_dir / load_filename
|
| 97 |
+
|
| 98 |
+
if isinstance(net, torch.nn.parallel.DistributedDataParallel):
|
| 99 |
+
net = net.module
|
| 100 |
+
print(f"loading the model from {load_path}")
|
| 101 |
+
|
| 102 |
+
state_dict = torch.load(load_path, map_location=str(self.device), weights_only=True)
|
| 103 |
+
|
| 104 |
+
if hasattr(state_dict, "_metadata"):
|
| 105 |
+
del state_dict._metadata
|
| 106 |
+
|
| 107 |
+
# patch InstanceNorm checkpoints
|
| 108 |
+
for key in list(state_dict.keys()):
|
| 109 |
+
self.__patch_instance_norm_state_dict(state_dict, net, key.split("."))
|
| 110 |
+
net.load_state_dict(state_dict)
|
| 111 |
+
|
| 112 |
+
# Move network to device
|
| 113 |
+
net.to(self.device)
|
| 114 |
+
|
| 115 |
+
# Wrap networks with DDP after loading
|
| 116 |
+
if dist.is_initialized():
|
| 117 |
+
# Check if using syncbatch normalization for DDP
|
| 118 |
+
if self.opt.norm == "syncbatch":
|
| 119 |
+
raise ValueError(f"For distributed training, opt.norm must be 'syncbatch' or 'inst', but got '{self.opt.norm}'. " "Please set --norm syncbatch for multi-GPU training.")
|
| 120 |
+
|
| 121 |
+
net = torch.nn.parallel.DistributedDataParallel(net, device_ids=[self.device.index])
|
| 122 |
+
# Sync all processes after DDP wrapping
|
| 123 |
+
dist.barrier()
|
| 124 |
+
|
| 125 |
+
setattr(self, "net" + name, net)
|
| 126 |
+
|
| 127 |
+
self.print_networks(opt.verbose)
|
| 128 |
+
|
| 129 |
+
if self.isTrain:
|
| 130 |
+
self.schedulers = [networks.get_scheduler(optimizer, opt) for optimizer in self.optimizers]
|
| 131 |
+
|
| 132 |
+
def eval(self):
|
| 133 |
+
"""Make models eval mode during test time"""
|
| 134 |
+
for name in self.model_names:
|
| 135 |
+
if isinstance(name, str):
|
| 136 |
+
net = getattr(self, "net" + name)
|
| 137 |
+
net.eval()
|
| 138 |
+
|
| 139 |
+
def test(self):
|
| 140 |
+
"""Forward function used in test time.
|
| 141 |
+
|
| 142 |
+
This function wraps <forward> function in no_grad() so we don't save intermediate steps for backprop
|
| 143 |
+
It also calls <compute_visuals> to produce additional visualization results
|
| 144 |
+
"""
|
| 145 |
+
with torch.no_grad():
|
| 146 |
+
self.forward()
|
| 147 |
+
self.compute_visuals()
|
| 148 |
+
|
| 149 |
+
def compute_visuals(self):
|
| 150 |
+
"""Calculate additional output images for visdom and HTML visualization"""
|
| 151 |
+
pass
|
| 152 |
+
|
| 153 |
+
def get_image_paths(self):
|
| 154 |
+
"""Return image paths that are used to load current data"""
|
| 155 |
+
return self.image_paths
|
| 156 |
+
|
| 157 |
+
def update_learning_rate(self):
|
| 158 |
+
"""Update learning rates for all the networks; called at the end of every epoch"""
|
| 159 |
+
old_lr = self.optimizers[0].param_groups[0]["lr"]
|
| 160 |
+
for scheduler in self.schedulers:
|
| 161 |
+
if self.opt.lr_policy == "plateau":
|
| 162 |
+
scheduler.step(self.metric)
|
| 163 |
+
else:
|
| 164 |
+
scheduler.step()
|
| 165 |
+
|
| 166 |
+
lr = self.optimizers[0].param_groups[0]["lr"]
|
| 167 |
+
print(f"learning rate {old_lr:.7f} -> {lr:.7f}")
|
| 168 |
+
|
| 169 |
+
def get_current_visuals(self):
|
| 170 |
+
"""Return visualization images. train.py will display these images with visdom, and save the images to a HTML"""
|
| 171 |
+
visual_ret = OrderedDict()
|
| 172 |
+
for name in self.visual_names:
|
| 173 |
+
if isinstance(name, str):
|
| 174 |
+
visual_ret[name] = getattr(self, name)
|
| 175 |
+
return visual_ret
|
| 176 |
+
|
| 177 |
+
def get_current_losses(self):
|
| 178 |
+
"""Return traning losses / errors. train.py will print out these errors on console, and save them to a file"""
|
| 179 |
+
errors_ret = OrderedDict()
|
| 180 |
+
for name in self.loss_names:
|
| 181 |
+
if isinstance(name, str):
|
| 182 |
+
errors_ret[name] = float(getattr(self, "loss_" + name)) # float(...) works for both scalar tensor and float number
|
| 183 |
+
return errors_ret
|
| 184 |
+
|
| 185 |
+
def save_networks(self, epoch):
|
| 186 |
+
"""Save all the networks to the disk, unwrapping them first."""
|
| 187 |
+
|
| 188 |
+
# Only allow the main process (rank 0) to save the checkpoint
|
| 189 |
+
if not dist.is_initialized() or dist.get_rank() == 0:
|
| 190 |
+
for name in self.model_names:
|
| 191 |
+
if isinstance(name, str):
|
| 192 |
+
save_filename = f"{epoch}_net_{name}.pth"
|
| 193 |
+
save_path = self.save_dir / save_filename
|
| 194 |
+
net = getattr(self, "net" + name)
|
| 195 |
+
|
| 196 |
+
# 1. First, unwrap from DDP if it exists
|
| 197 |
+
if hasattr(net, "module"):
|
| 198 |
+
model_to_save = net.module
|
| 199 |
+
else:
|
| 200 |
+
model_to_save = net
|
| 201 |
+
|
| 202 |
+
# 2. Second, unwrap from torch.compile if it exists
|
| 203 |
+
if hasattr(model_to_save, "_orig_mod"):
|
| 204 |
+
model_to_save = model_to_save._orig_mod
|
| 205 |
+
|
| 206 |
+
# 3. Save the final, clean state_dict
|
| 207 |
+
torch.save(model_to_save.state_dict(), save_path)
|
| 208 |
+
|
| 209 |
+
def __patch_instance_norm_state_dict(self, state_dict, module, keys, i=0):
|
| 210 |
+
"""Fix InstanceNorm checkpoints incompatibility (prior to 0.4)"""
|
| 211 |
+
key = keys[i]
|
| 212 |
+
if i + 1 == len(keys): # at the end, pointing to a parameter/buffer
|
| 213 |
+
if module.__class__.__name__.startswith("InstanceNorm") and (key == "running_mean" or key == "running_var"):
|
| 214 |
+
if getattr(module, key) is None:
|
| 215 |
+
state_dict.pop(".".join(keys))
|
| 216 |
+
if module.__class__.__name__.startswith("InstanceNorm") and (key == "num_batches_tracked"):
|
| 217 |
+
state_dict.pop(".".join(keys))
|
| 218 |
+
else:
|
| 219 |
+
self.__patch_instance_norm_state_dict(state_dict, getattr(module, key), keys, i + 1)
|
| 220 |
+
|
| 221 |
+
def load_networks(self, epoch):
|
| 222 |
+
"""Load all networks from the disk for DDP."""
|
| 223 |
+
|
| 224 |
+
for name in self.model_names:
|
| 225 |
+
if isinstance(name, str):
|
| 226 |
+
load_filename = f"{epoch}_net_{name}.pth"
|
| 227 |
+
load_path = self.save_dir / load_filename
|
| 228 |
+
net = getattr(self, "net" + name)
|
| 229 |
+
|
| 230 |
+
if isinstance(net, torch.nn.parallel.DistributedDataParallel):
|
| 231 |
+
net = net.module
|
| 232 |
+
print(f"loading the model from {load_path}")
|
| 233 |
+
|
| 234 |
+
state_dict = torch.load(load_path, map_location=str(self.device), weights_only=True)
|
| 235 |
+
|
| 236 |
+
if hasattr(state_dict, "_metadata"):
|
| 237 |
+
del state_dict._metadata
|
| 238 |
+
|
| 239 |
+
# patch InstanceNorm checkpoints
|
| 240 |
+
for key in list(state_dict.keys()):
|
| 241 |
+
self.__patch_instance_norm_state_dict(state_dict, net, key.split("."))
|
| 242 |
+
net.load_state_dict(state_dict)
|
| 243 |
+
|
| 244 |
+
# Add a barrier to sync all processes before continuing
|
| 245 |
+
if dist.is_initialized():
|
| 246 |
+
dist.barrier()
|
| 247 |
+
|
| 248 |
+
def print_networks(self, verbose):
|
| 249 |
+
"""Print the total number of parameters in the network and (if verbose) network architecture
|
| 250 |
+
|
| 251 |
+
Parameters:
|
| 252 |
+
verbose (bool) -- if verbose: print the network architecture
|
| 253 |
+
"""
|
| 254 |
+
print("---------- Networks initialized -------------")
|
| 255 |
+
for name in self.model_names:
|
| 256 |
+
if isinstance(name, str):
|
| 257 |
+
net = getattr(self, "net" + name)
|
| 258 |
+
num_params = 0
|
| 259 |
+
for param in net.parameters():
|
| 260 |
+
num_params += param.numel()
|
| 261 |
+
if verbose:
|
| 262 |
+
print(net)
|
| 263 |
+
print(f"[Network {name}] Total number of parameters : {num_params / 1e6:.3f} M")
|
| 264 |
+
print("-----------------------------------------------")
|
| 265 |
+
|
| 266 |
+
def set_requires_grad(self, nets, requires_grad=False):
|
| 267 |
+
"""Set requies_grad=Fasle for all the networks to avoid unnecessary computations
|
| 268 |
+
Parameters:
|
| 269 |
+
nets (network list) -- a list of networks
|
| 270 |
+
requires_grad (bool) -- whether the networks require gradients or not
|
| 271 |
+
"""
|
| 272 |
+
if not isinstance(nets, list):
|
| 273 |
+
nets = [nets]
|
| 274 |
+
for net in nets:
|
| 275 |
+
if net is not None:
|
| 276 |
+
for param in net.parameters():
|
| 277 |
+
param.requires_grad = requires_grad
|
| 278 |
+
|
| 279 |
+
def init_networks(self, init_type="normal", init_gain=0.02):
|
| 280 |
+
"""Initialize all networks: 1. move to device; 2. initialize weights
|
| 281 |
+
|
| 282 |
+
Parameters:
|
| 283 |
+
init_type (str) -- initialization method: normal | xavier | kaiming | orthogonal
|
| 284 |
+
init_gain (float) -- scaling factor for normal, xavier and orthogonal
|
| 285 |
+
"""
|
| 286 |
+
import os
|
| 287 |
+
|
| 288 |
+
for name in self.model_names:
|
| 289 |
+
if isinstance(name, str):
|
| 290 |
+
net = getattr(self, "net" + name)
|
| 291 |
+
|
| 292 |
+
# Move to device
|
| 293 |
+
if torch.cuda.is_available():
|
| 294 |
+
if "LOCAL_RANK" in os.environ:
|
| 295 |
+
local_rank = int(os.environ["LOCAL_RANK"])
|
| 296 |
+
net.to(local_rank)
|
| 297 |
+
print(f"Initialized network {name} with device cuda:{local_rank}")
|
| 298 |
+
else:
|
| 299 |
+
net.to(0)
|
| 300 |
+
print(f"Initialized network {name} with device cuda:0")
|
| 301 |
+
else:
|
| 302 |
+
net.to("cpu")
|
| 303 |
+
print(f"Initialized network {name} with device cpu")
|
| 304 |
+
|
| 305 |
+
# Initialize weights using networks function
|
| 306 |
+
networks.init_weights(net, init_type, init_gain)
|
models/colorization_model.py
ADDED
|
@@ -0,0 +1,69 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from .pix2pix_model import Pix2PixModel
|
| 2 |
+
import torch
|
| 3 |
+
from skimage import color # used for lab2rgb
|
| 4 |
+
import numpy as np
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
class ColorizationModel(Pix2PixModel):
|
| 8 |
+
"""This is a subclass of Pix2PixModel for image colorization (black & white image -> colorful images).
|
| 9 |
+
|
| 10 |
+
The model training requires '-dataset_model colorization' dataset.
|
| 11 |
+
It trains a pix2pix model, mapping from L channel to ab channels in Lab color space.
|
| 12 |
+
By default, the colorization dataset will automatically set '--input_nc 1' and '--output_nc 2'.
|
| 13 |
+
"""
|
| 14 |
+
|
| 15 |
+
@staticmethod
|
| 16 |
+
def modify_commandline_options(parser, is_train=True):
|
| 17 |
+
"""Add new dataset-specific options, and rewrite default values for existing options.
|
| 18 |
+
|
| 19 |
+
Parameters:
|
| 20 |
+
parser -- original option parser
|
| 21 |
+
is_train (bool) -- whether training phase or test phase. You can use this flag to add training-specific or test-specific options.
|
| 22 |
+
|
| 23 |
+
Returns:
|
| 24 |
+
the modified parser.
|
| 25 |
+
|
| 26 |
+
By default, we use 'colorization' dataset for this model.
|
| 27 |
+
See the original pix2pix paper (https://arxiv.org/pdf/1611.07004.pdf) and colorization results (Figure 9 in the paper)
|
| 28 |
+
"""
|
| 29 |
+
Pix2PixModel.modify_commandline_options(parser, is_train)
|
| 30 |
+
parser.set_defaults(dataset_mode="colorization")
|
| 31 |
+
return parser
|
| 32 |
+
|
| 33 |
+
def __init__(self, opt):
|
| 34 |
+
"""Initialize the class.
|
| 35 |
+
|
| 36 |
+
Parameters:
|
| 37 |
+
opt (Option class)-- stores all the experiment flags; needs to be a subclass of BaseOptions
|
| 38 |
+
|
| 39 |
+
For visualization, we set 'visual_names' as 'real_A' (input real image),
|
| 40 |
+
'real_B_rgb' (ground truth RGB image), and 'fake_B_rgb' (predicted RGB image)
|
| 41 |
+
We convert the Lab image 'real_B' (inherited from Pix2pixModel) to a RGB image 'real_B_rgb'.
|
| 42 |
+
we convert the Lab image 'fake_B' (inherited from Pix2pixModel) to a RGB image 'fake_B_rgb'.
|
| 43 |
+
"""
|
| 44 |
+
# reuse the pix2pix model
|
| 45 |
+
Pix2PixModel.__init__(self, opt)
|
| 46 |
+
# specify the images to be visualized.
|
| 47 |
+
self.visual_names = ["real_A", "real_B_rgb", "fake_B_rgb"]
|
| 48 |
+
|
| 49 |
+
def lab2rgb(self, L, AB):
|
| 50 |
+
"""Convert an Lab tensor image to a RGB numpy output
|
| 51 |
+
Parameters:
|
| 52 |
+
L (1-channel tensor array): L channel images (range: [-1, 1], torch tensor array)
|
| 53 |
+
AB (2-channel tensor array): ab channel images (range: [-1, 1], torch tensor array)
|
| 54 |
+
|
| 55 |
+
Returns:
|
| 56 |
+
rgb (RGB numpy image): rgb output images (range: [0, 255], numpy array)
|
| 57 |
+
"""
|
| 58 |
+
AB2 = AB * 110.0
|
| 59 |
+
L2 = (L + 1.0) * 50.0
|
| 60 |
+
Lab = torch.cat([L2, AB2], dim=1)
|
| 61 |
+
Lab = Lab[0].data.cpu().float().numpy()
|
| 62 |
+
Lab = np.transpose(Lab.astype(np.float64), (1, 2, 0))
|
| 63 |
+
rgb = color.lab2rgb(Lab) * 255
|
| 64 |
+
return rgb
|
| 65 |
+
|
| 66 |
+
def compute_visuals(self):
|
| 67 |
+
"""Calculate additional output images for visdom and HTML visualization"""
|
| 68 |
+
self.real_B_rgb = self.lab2rgb(self.real_A, self.real_B)
|
| 69 |
+
self.fake_B_rgb = self.lab2rgb(self.real_A, self.fake_B)
|
models/cycle_gan_model.py
ADDED
|
@@ -0,0 +1,196 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
import itertools
|
| 3 |
+
from util.image_pool import ImagePool
|
| 4 |
+
from .base_model import BaseModel
|
| 5 |
+
from . import networks
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
class CycleGANModel(BaseModel):
|
| 9 |
+
"""
|
| 10 |
+
This class implements the CycleGAN model, for learning image-to-image translation without paired data.
|
| 11 |
+
|
| 12 |
+
The model training requires '--dataset_mode unaligned' dataset.
|
| 13 |
+
By default, it uses a '--netG resnet_9blocks' ResNet generator,
|
| 14 |
+
a '--netD basic' discriminator (PatchGAN introduced by pix2pix),
|
| 15 |
+
and a least-square GANs objective ('--gan_mode lsgan').
|
| 16 |
+
|
| 17 |
+
CycleGAN paper: https://arxiv.org/pdf/1703.10593.pdf
|
| 18 |
+
"""
|
| 19 |
+
|
| 20 |
+
@staticmethod
|
| 21 |
+
def modify_commandline_options(parser, is_train=True):
|
| 22 |
+
"""Add new dataset-specific options, and rewrite default values for existing options.
|
| 23 |
+
|
| 24 |
+
Parameters:
|
| 25 |
+
parser -- original option parser
|
| 26 |
+
is_train (bool) -- whether training phase or test phase. You can use this flag to add training-specific or test-specific options.
|
| 27 |
+
|
| 28 |
+
Returns:
|
| 29 |
+
the modified parser.
|
| 30 |
+
|
| 31 |
+
For CycleGAN, in addition to GAN losses, we introduce lambda_A, lambda_B, and lambda_identity for the following losses.
|
| 32 |
+
A (source domain), B (target domain).
|
| 33 |
+
Generators: G_A: A -> B; G_B: B -> A.
|
| 34 |
+
Discriminators: D_A: G_A(A) vs. B; D_B: G_B(B) vs. A.
|
| 35 |
+
Forward cycle loss: lambda_A * ||G_B(G_A(A)) - A|| (Eqn. (2) in the paper)
|
| 36 |
+
Backward cycle loss: lambda_B * ||G_A(G_B(B)) - B|| (Eqn. (2) in the paper)
|
| 37 |
+
Identity loss (optional): lambda_identity * (||G_A(B) - B|| * lambda_B + ||G_B(A) - A|| * lambda_A) (Sec 5.2 "Photo generation from paintings" in the paper)
|
| 38 |
+
Dropout is not used in the original CycleGAN paper.
|
| 39 |
+
"""
|
| 40 |
+
parser.set_defaults(no_dropout=True) # default CycleGAN did not use dropout
|
| 41 |
+
if is_train:
|
| 42 |
+
parser.add_argument("--lambda_A", type=float, default=10.0, help="weight for cycle loss (A -> B -> A)")
|
| 43 |
+
parser.add_argument("--lambda_B", type=float, default=10.0, help="weight for cycle loss (B -> A -> B)")
|
| 44 |
+
parser.add_argument(
|
| 45 |
+
"--lambda_identity",
|
| 46 |
+
type=float,
|
| 47 |
+
default=0.5,
|
| 48 |
+
help="use identity mapping. Setting lambda_identity other than 0 has an effect of scaling the weight of the identity mapping loss. For example, if the weight of the identity loss should be 10 times smaller than the weight of the reconstruction loss, please set lambda_identity = 0.1",
|
| 49 |
+
)
|
| 50 |
+
|
| 51 |
+
return parser
|
| 52 |
+
|
| 53 |
+
def __init__(self, opt):
|
| 54 |
+
"""Initialize the CycleGAN class.
|
| 55 |
+
|
| 56 |
+
Parameters:
|
| 57 |
+
opt (Option class)-- stores all the experiment flags; needs to be a subclass of BaseOptions
|
| 58 |
+
"""
|
| 59 |
+
BaseModel.__init__(self, opt)
|
| 60 |
+
# specify the training losses you want to print out. The training/test scripts will call <BaseModel.get_current_losses>
|
| 61 |
+
self.loss_names = ["D_A", "G_A", "cycle_A", "idt_A", "D_B", "G_B", "cycle_B", "idt_B"]
|
| 62 |
+
# specify the images you want to save/display. The training/test scripts will call <BaseModel.get_current_visuals>
|
| 63 |
+
visual_names_A = ["real_A", "fake_B", "rec_A"]
|
| 64 |
+
visual_names_B = ["real_B", "fake_A", "rec_B"]
|
| 65 |
+
if self.isTrain and self.opt.lambda_identity > 0.0: # if identity loss is used, we also visualize idt_B=G_A(B) ad idt_A=G_B(A)
|
| 66 |
+
visual_names_A.append("idt_B")
|
| 67 |
+
visual_names_B.append("idt_A")
|
| 68 |
+
|
| 69 |
+
self.visual_names = visual_names_A + visual_names_B # combine visualizations for A and B
|
| 70 |
+
# specify the models you want to save to the disk. The training/test scripts will call <BaseModel.save_networks> and <BaseModel.load_networks>.
|
| 71 |
+
if self.isTrain:
|
| 72 |
+
self.model_names = ["G_A", "G_B", "D_A", "D_B"]
|
| 73 |
+
else: # during test time, only load Gs
|
| 74 |
+
self.model_names = ["G_A", "G_B"]
|
| 75 |
+
|
| 76 |
+
# define networks (both Generators and discriminators)
|
| 77 |
+
# The naming is different from those used in the paper.
|
| 78 |
+
# Code (vs. paper): G_A (G), G_B (F), D_A (D_Y), D_B (D_X)
|
| 79 |
+
self.netG_A = networks.define_G(opt.input_nc, opt.output_nc, opt.ngf, opt.netG, opt.norm, not opt.no_dropout, opt.init_type, opt.init_gain)
|
| 80 |
+
self.netG_B = networks.define_G(opt.output_nc, opt.input_nc, opt.ngf, opt.netG, opt.norm, not opt.no_dropout, opt.init_type, opt.init_gain)
|
| 81 |
+
|
| 82 |
+
if self.isTrain: # define discriminators
|
| 83 |
+
self.netD_A = networks.define_D(opt.output_nc, opt.ndf, opt.netD, opt.n_layers_D, opt.norm, opt.init_type, opt.init_gain)
|
| 84 |
+
self.netD_B = networks.define_D(opt.input_nc, opt.ndf, opt.netD, opt.n_layers_D, opt.norm, opt.init_type, opt.init_gain)
|
| 85 |
+
|
| 86 |
+
if self.isTrain:
|
| 87 |
+
if opt.lambda_identity > 0.0: # only works when input and output images have the same number of channels
|
| 88 |
+
assert opt.input_nc == opt.output_nc
|
| 89 |
+
self.fake_A_pool = ImagePool(opt.pool_size) # create image buffer to store previously generated images
|
| 90 |
+
self.fake_B_pool = ImagePool(opt.pool_size) # create image buffer to store previously generated images
|
| 91 |
+
# define loss functions
|
| 92 |
+
self.criterionGAN = networks.GANLoss(opt.gan_mode).to(self.device) # define GAN loss.
|
| 93 |
+
self.criterionCycle = torch.nn.L1Loss()
|
| 94 |
+
self.criterionIdt = torch.nn.L1Loss()
|
| 95 |
+
# initialize optimizers; schedulers will be automatically created by function <BaseModel.setup>.
|
| 96 |
+
self.optimizer_G = torch.optim.Adam(itertools.chain(self.netG_A.parameters(), self.netG_B.parameters()), lr=opt.lr, betas=(opt.beta1, 0.999))
|
| 97 |
+
self.optimizer_D = torch.optim.Adam(itertools.chain(self.netD_A.parameters(), self.netD_B.parameters()), lr=opt.lr, betas=(opt.beta1, 0.999))
|
| 98 |
+
self.optimizers.append(self.optimizer_G)
|
| 99 |
+
self.optimizers.append(self.optimizer_D)
|
| 100 |
+
|
| 101 |
+
def set_input(self, input):
|
| 102 |
+
"""Unpack input data from the dataloader and perform necessary pre-processing steps.
|
| 103 |
+
|
| 104 |
+
Parameters:
|
| 105 |
+
input (dict): include the data itself and its metadata information.
|
| 106 |
+
|
| 107 |
+
The option 'direction' can be used to swap domain A and domain B.
|
| 108 |
+
"""
|
| 109 |
+
AtoB = self.opt.direction == "AtoB"
|
| 110 |
+
self.real_A = input["A" if AtoB else "B"].to(self.device)
|
| 111 |
+
self.real_B = input["B" if AtoB else "A"].to(self.device)
|
| 112 |
+
self.image_paths = input["A_paths" if AtoB else "B_paths"]
|
| 113 |
+
|
| 114 |
+
def forward(self):
|
| 115 |
+
"""Run forward pass; called by both functions <optimize_parameters> and <test>."""
|
| 116 |
+
self.fake_B = self.netG_A(self.real_A) # G_A(A)
|
| 117 |
+
self.rec_A = self.netG_B(self.fake_B) # G_B(G_A(A))
|
| 118 |
+
self.fake_A = self.netG_B(self.real_B) # G_B(B)
|
| 119 |
+
self.rec_B = self.netG_A(self.fake_A) # G_A(G_B(B))
|
| 120 |
+
|
| 121 |
+
def backward_D_basic(self, netD, real, fake):
|
| 122 |
+
"""Calculate GAN loss for the discriminator
|
| 123 |
+
|
| 124 |
+
Parameters:
|
| 125 |
+
netD (network) -- the discriminator D
|
| 126 |
+
real (tensor array) -- real images
|
| 127 |
+
fake (tensor array) -- images generated by a generator
|
| 128 |
+
|
| 129 |
+
Return the discriminator loss.
|
| 130 |
+
We also call loss_D.backward() to calculate the gradients.
|
| 131 |
+
"""
|
| 132 |
+
# Real
|
| 133 |
+
pred_real = netD(real)
|
| 134 |
+
loss_D_real = self.criterionGAN(pred_real, True)
|
| 135 |
+
# Fake
|
| 136 |
+
pred_fake = netD(fake.detach())
|
| 137 |
+
loss_D_fake = self.criterionGAN(pred_fake, False)
|
| 138 |
+
# Combined loss and calculate gradients
|
| 139 |
+
loss_D = (loss_D_real + loss_D_fake) * 0.5
|
| 140 |
+
loss_D.backward()
|
| 141 |
+
return loss_D
|
| 142 |
+
|
| 143 |
+
def backward_D_A(self):
|
| 144 |
+
"""Calculate GAN loss for discriminator D_A"""
|
| 145 |
+
fake_B = self.fake_B_pool.query(self.fake_B)
|
| 146 |
+
self.loss_D_A = self.backward_D_basic(self.netD_A, self.real_B, fake_B)
|
| 147 |
+
|
| 148 |
+
def backward_D_B(self):
|
| 149 |
+
"""Calculate GAN loss for discriminator D_B"""
|
| 150 |
+
fake_A = self.fake_A_pool.query(self.fake_A)
|
| 151 |
+
self.loss_D_B = self.backward_D_basic(self.netD_B, self.real_A, fake_A)
|
| 152 |
+
|
| 153 |
+
def backward_G(self):
|
| 154 |
+
"""Calculate the loss for generators G_A and G_B"""
|
| 155 |
+
lambda_idt = self.opt.lambda_identity
|
| 156 |
+
lambda_A = self.opt.lambda_A
|
| 157 |
+
lambda_B = self.opt.lambda_B
|
| 158 |
+
# Identity loss
|
| 159 |
+
if lambda_idt > 0:
|
| 160 |
+
# G_A should be identity if real_B is fed: ||G_A(B) - B||
|
| 161 |
+
self.idt_A = self.netG_A(self.real_B)
|
| 162 |
+
self.loss_idt_A = self.criterionIdt(self.idt_A, self.real_B) * lambda_B * lambda_idt
|
| 163 |
+
# G_B should be identity if real_A is fed: ||G_B(A) - A||
|
| 164 |
+
self.idt_B = self.netG_B(self.real_A)
|
| 165 |
+
self.loss_idt_B = self.criterionIdt(self.idt_B, self.real_A) * lambda_A * lambda_idt
|
| 166 |
+
else:
|
| 167 |
+
self.loss_idt_A = 0
|
| 168 |
+
self.loss_idt_B = 0
|
| 169 |
+
|
| 170 |
+
# GAN loss D_A(G_A(A))
|
| 171 |
+
self.loss_G_A = self.criterionGAN(self.netD_A(self.fake_B), True)
|
| 172 |
+
# GAN loss D_B(G_B(B))
|
| 173 |
+
self.loss_G_B = self.criterionGAN(self.netD_B(self.fake_A), True)
|
| 174 |
+
# Forward cycle loss || G_B(G_A(A)) - A||
|
| 175 |
+
self.loss_cycle_A = self.criterionCycle(self.rec_A, self.real_A) * lambda_A
|
| 176 |
+
# Backward cycle loss || G_A(G_B(B)) - B||
|
| 177 |
+
self.loss_cycle_B = self.criterionCycle(self.rec_B, self.real_B) * lambda_B
|
| 178 |
+
# combined loss and calculate gradients
|
| 179 |
+
self.loss_G = self.loss_G_A + self.loss_G_B + self.loss_cycle_A + self.loss_cycle_B + self.loss_idt_A + self.loss_idt_B
|
| 180 |
+
self.loss_G.backward()
|
| 181 |
+
|
| 182 |
+
def optimize_parameters(self):
|
| 183 |
+
"""Calculate losses, gradients, and update network weights; called in every training iteration"""
|
| 184 |
+
# forward
|
| 185 |
+
self.forward() # compute fake images and reconstruction images.
|
| 186 |
+
# G_A and G_B
|
| 187 |
+
self.set_requires_grad([self.netD_A, self.netD_B], False) # Ds require no gradients when optimizing Gs
|
| 188 |
+
self.optimizer_G.zero_grad() # set G_A and G_B's gradients to zero
|
| 189 |
+
self.backward_G() # calculate gradients for G_A and G_B
|
| 190 |
+
self.optimizer_G.step() # update G_A and G_B's weights
|
| 191 |
+
# D_A and D_B
|
| 192 |
+
self.set_requires_grad([self.netD_A, self.netD_B], True)
|
| 193 |
+
self.optimizer_D.zero_grad() # set D_A and D_B's gradients to zero
|
| 194 |
+
self.backward_D_A() # calculate gradients for D_A
|
| 195 |
+
self.backward_D_B() # calculate graidents for D_B
|
| 196 |
+
self.optimizer_D.step() # update D_A and D_B's weights
|
models/networks.py
ADDED
|
@@ -0,0 +1,588 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
import torch.nn as nn
|
| 3 |
+
from torch.nn import init
|
| 4 |
+
import functools
|
| 5 |
+
from torch.optim import lr_scheduler
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
###############################################################################
|
| 9 |
+
# Helper Functions
|
| 10 |
+
###############################################################################
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
class Identity(nn.Module):
|
| 14 |
+
def forward(self, x):
|
| 15 |
+
return x
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
def get_norm_layer(norm_type="instance"):
|
| 19 |
+
"""Return a normalization layer
|
| 20 |
+
|
| 21 |
+
Parameters:
|
| 22 |
+
norm_type (str) -- the name of the normalization layer: batch | instance | none
|
| 23 |
+
|
| 24 |
+
For BatchNorm, we use learnable affine parameters and track running statistics (mean/stddev).
|
| 25 |
+
For InstanceNorm, we do not use learnable affine parameters. We do not track running statistics.
|
| 26 |
+
"""
|
| 27 |
+
if norm_type == "batch":
|
| 28 |
+
norm_layer = functools.partial(nn.BatchNorm2d, affine=True, track_running_stats=True)
|
| 29 |
+
elif norm_type == "syncbatch":
|
| 30 |
+
norm_layer = functools.partial(nn.SyncBatchNorm, affine=True, track_running_stats=True)
|
| 31 |
+
elif norm_type == "instance":
|
| 32 |
+
norm_layer = functools.partial(nn.InstanceNorm2d, affine=False, track_running_stats=False)
|
| 33 |
+
elif norm_type == "none":
|
| 34 |
+
|
| 35 |
+
def norm_layer(x):
|
| 36 |
+
return Identity()
|
| 37 |
+
|
| 38 |
+
else:
|
| 39 |
+
raise NotImplementedError("normalization layer [%s] is not found" % norm_type)
|
| 40 |
+
return norm_layer
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
def get_scheduler(optimizer, opt):
|
| 44 |
+
"""Return a learning rate scheduler
|
| 45 |
+
|
| 46 |
+
Parameters:
|
| 47 |
+
optimizer -- the optimizer of the network
|
| 48 |
+
opt (option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions.
|
| 49 |
+
opt.lr_policy is the name of learning rate policy: linear | step | plateau | cosine
|
| 50 |
+
|
| 51 |
+
For 'linear', we keep the same learning rate for the first <opt.n_epochs> epochs
|
| 52 |
+
and linearly decay the rate to zero over the next <opt.n_epochs_decay> epochs.
|
| 53 |
+
For other schedulers (step, plateau, and cosine), we use the default PyTorch schedulers.
|
| 54 |
+
See https://pytorch.org/docs/stable/optim.html for more details.
|
| 55 |
+
"""
|
| 56 |
+
if opt.lr_policy == "linear":
|
| 57 |
+
|
| 58 |
+
def lambda_rule(epoch):
|
| 59 |
+
lr_l = 1.0 - max(0, epoch + opt.epoch_count - opt.n_epochs) / float(opt.n_epochs_decay + 1)
|
| 60 |
+
return lr_l
|
| 61 |
+
|
| 62 |
+
scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda_rule)
|
| 63 |
+
elif opt.lr_policy == "step":
|
| 64 |
+
scheduler = lr_scheduler.StepLR(optimizer, step_size=opt.lr_decay_iters, gamma=0.1)
|
| 65 |
+
elif opt.lr_policy == "plateau":
|
| 66 |
+
scheduler = lr_scheduler.ReduceLROnPlateau(optimizer, mode="min", factor=0.2, threshold=0.01, patience=5)
|
| 67 |
+
elif opt.lr_policy == "cosine":
|
| 68 |
+
scheduler = lr_scheduler.CosineAnnealingLR(optimizer, T_max=opt.n_epochs, eta_min=0)
|
| 69 |
+
else:
|
| 70 |
+
return NotImplementedError("learning rate policy [%s] is not implemented", opt.lr_policy)
|
| 71 |
+
return scheduler
|
| 72 |
+
|
| 73 |
+
|
| 74 |
+
def init_weights(net, init_type="normal", init_gain=0.02):
|
| 75 |
+
"""Initialize network weights.
|
| 76 |
+
|
| 77 |
+
Parameters:
|
| 78 |
+
net (network) -- network to be initialized
|
| 79 |
+
init_type (str) -- the name of an initialization method: normal | xavier | kaiming | orthogonal
|
| 80 |
+
init_gain (float) -- scaling factor for normal, xavier and orthogonal.
|
| 81 |
+
|
| 82 |
+
We use 'normal' in the original pix2pix and CycleGAN paper. But xavier and kaiming might
|
| 83 |
+
work better for some applications. Feel free to try yourself.
|
| 84 |
+
"""
|
| 85 |
+
|
| 86 |
+
def init_func(m): # define the initialization function
|
| 87 |
+
classname = m.__class__.__name__
|
| 88 |
+
if hasattr(m, "weight") and (classname.find("Conv") != -1 or classname.find("Linear") != -1):
|
| 89 |
+
if init_type == "normal":
|
| 90 |
+
init.normal_(m.weight.data, 0.0, init_gain)
|
| 91 |
+
elif init_type == "xavier":
|
| 92 |
+
init.xavier_normal_(m.weight.data, gain=init_gain)
|
| 93 |
+
elif init_type == "kaiming":
|
| 94 |
+
init.kaiming_normal_(m.weight.data, a=0, mode="fan_in")
|
| 95 |
+
elif init_type == "orthogonal":
|
| 96 |
+
init.orthogonal_(m.weight.data, gain=init_gain)
|
| 97 |
+
else:
|
| 98 |
+
raise NotImplementedError("initialization method [%s] is not implemented" % init_type)
|
| 99 |
+
if hasattr(m, "bias") and m.bias is not None:
|
| 100 |
+
init.constant_(m.bias.data, 0.0)
|
| 101 |
+
elif classname.find("BatchNorm2d") != -1: # BatchNorm Layer's weight is not a matrix; only normal distribution applies.
|
| 102 |
+
init.normal_(m.weight.data, 1.0, init_gain)
|
| 103 |
+
init.constant_(m.bias.data, 0.0)
|
| 104 |
+
|
| 105 |
+
print("initialize network with %s" % init_type)
|
| 106 |
+
net.apply(init_func) # apply the initialization function <init_func>
|
| 107 |
+
|
| 108 |
+
|
| 109 |
+
def init_net(net, init_type="normal", init_gain=0.02):
|
| 110 |
+
"""Initialize a network: 1. register CPU/GPU device; 2. initialize the network weights
|
| 111 |
+
Parameters:
|
| 112 |
+
net (network) -- the network to be initialized
|
| 113 |
+
init_type (str) -- the name of an initialization method: normal | xavier | kaiming | orthogonal
|
| 114 |
+
gain (float) -- scaling factor for normal, xavier and orthogonal.
|
| 115 |
+
|
| 116 |
+
Return an initialized network.
|
| 117 |
+
"""
|
| 118 |
+
import os
|
| 119 |
+
|
| 120 |
+
if torch.cuda.is_available():
|
| 121 |
+
if "LOCAL_RANK" in os.environ:
|
| 122 |
+
local_rank = int(os.environ["LOCAL_RANK"])
|
| 123 |
+
net.to(local_rank)
|
| 124 |
+
print(f"Initialized with device cuda:{local_rank}")
|
| 125 |
+
else:
|
| 126 |
+
net.to(0)
|
| 127 |
+
print("Initialized with device cuda:0")
|
| 128 |
+
init_weights(net, init_type, init_gain=init_gain)
|
| 129 |
+
return net
|
| 130 |
+
|
| 131 |
+
|
| 132 |
+
def define_G(input_nc, output_nc, ngf, netG, norm="batch", use_dropout=False, init_type="normal", init_gain=0.02):
|
| 133 |
+
"""Create a generator
|
| 134 |
+
|
| 135 |
+
Parameters:
|
| 136 |
+
input_nc (int) -- the number of channels in input images
|
| 137 |
+
output_nc (int) -- the number of channels in output images
|
| 138 |
+
ngf (int) -- the number of filters in the last conv layer
|
| 139 |
+
netG (str) -- the architecture's name: resnet_9blocks | resnet_6blocks | unet_128 | unet_256
|
| 140 |
+
norm (str) -- the name of normalization layers used in the network: batch | instance | none
|
| 141 |
+
use_dropout (bool) -- if use dropout layers.
|
| 142 |
+
init_type (str) -- the name of our initialization method.
|
| 143 |
+
init_gain (float) -- scaling factor for normal, xavier and orthogonal.
|
| 144 |
+
|
| 145 |
+
Returns a generator
|
| 146 |
+
"""
|
| 147 |
+
net = None
|
| 148 |
+
norm_layer = get_norm_layer(norm_type=norm)
|
| 149 |
+
|
| 150 |
+
if netG == "resnet_9blocks":
|
| 151 |
+
net = ResnetGenerator(input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, n_blocks=9)
|
| 152 |
+
elif netG == "resnet_6blocks":
|
| 153 |
+
net = ResnetGenerator(input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, n_blocks=6)
|
| 154 |
+
elif netG == "unet_128":
|
| 155 |
+
net = UnetGenerator(input_nc, output_nc, 7, ngf, norm_layer=norm_layer, use_dropout=use_dropout)
|
| 156 |
+
elif netG == "unet_256":
|
| 157 |
+
net = UnetGenerator(input_nc, output_nc, 8, ngf, norm_layer=norm_layer, use_dropout=use_dropout)
|
| 158 |
+
else:
|
| 159 |
+
raise NotImplementedError("Generator model name [%s] is not recognized" % netG)
|
| 160 |
+
return net
|
| 161 |
+
|
| 162 |
+
|
| 163 |
+
def define_D(input_nc, ndf, netD, n_layers_D=3, norm="batch", init_type="normal", init_gain=0.02):
|
| 164 |
+
"""Create a discriminator
|
| 165 |
+
|
| 166 |
+
Parameters:
|
| 167 |
+
input_nc (int) -- the number of channels in input images
|
| 168 |
+
ndf (int) -- the number of filters in the first conv layer
|
| 169 |
+
netD (str) -- the architecture's name: basic | n_layers | pixel
|
| 170 |
+
n_layers_D (int) -- the number of conv layers in the discriminator; effective when netD=='n_layers'
|
| 171 |
+
norm (str) -- the type of normalization layers used in the network.
|
| 172 |
+
init_type (str) -- the name of the initialization method.
|
| 173 |
+
init_gain (float) -- scaling factor for normal, xavier and orthogonal.
|
| 174 |
+
|
| 175 |
+
Returns a discriminator
|
| 176 |
+
|
| 177 |
+
Our current implementation provides three types of discriminators:
|
| 178 |
+
[basic]: 'PatchGAN' classifier described in the original pix2pix paper.
|
| 179 |
+
It can classify whether 70×70 overlapping patches are real or fake.
|
| 180 |
+
Such a patch-level discriminator architecture has fewer parameters
|
| 181 |
+
than a full-image discriminator and can work on arbitrarily-sized images
|
| 182 |
+
in a fully convolutional fashion.
|
| 183 |
+
|
| 184 |
+
[n_layers]: With this mode, you can specify the number of conv layers in the discriminator
|
| 185 |
+
with the parameter <n_layers_D> (default=3 as used in [basic] (PatchGAN).)
|
| 186 |
+
|
| 187 |
+
[pixel]: 1x1 PixelGAN discriminator can classify whether a pixel is real or not.
|
| 188 |
+
It encourages greater color diversity but has no effect on spatial statistics.
|
| 189 |
+
|
| 190 |
+
The discriminator has been initialized by <init_net>. It uses Leakly RELU for non-linearity.
|
| 191 |
+
"""
|
| 192 |
+
net = None
|
| 193 |
+
norm_layer = get_norm_layer(norm_type=norm)
|
| 194 |
+
|
| 195 |
+
if netD == "basic": # default PatchGAN classifier
|
| 196 |
+
net = NLayerDiscriminator(input_nc, ndf, n_layers=3, norm_layer=norm_layer)
|
| 197 |
+
elif netD == "n_layers": # more options
|
| 198 |
+
net = NLayerDiscriminator(input_nc, ndf, n_layers_D, norm_layer=norm_layer)
|
| 199 |
+
elif netD == "pixel": # classify if each pixel is real or fake
|
| 200 |
+
net = PixelDiscriminator(input_nc, ndf, norm_layer=norm_layer)
|
| 201 |
+
else:
|
| 202 |
+
raise NotImplementedError("Discriminator model name [%s] is not recognized" % netD)
|
| 203 |
+
return net
|
| 204 |
+
|
| 205 |
+
|
| 206 |
+
##############################################################################
|
| 207 |
+
# Classes
|
| 208 |
+
##############################################################################
|
| 209 |
+
class GANLoss(nn.Module):
|
| 210 |
+
"""Define different GAN objectives.
|
| 211 |
+
|
| 212 |
+
The GANLoss class abstracts away the need to create the target label tensor
|
| 213 |
+
that has the same size as the input.
|
| 214 |
+
"""
|
| 215 |
+
|
| 216 |
+
def __init__(self, gan_mode, target_real_label=1.0, target_fake_label=0.0):
|
| 217 |
+
"""Initialize the GANLoss class.
|
| 218 |
+
|
| 219 |
+
Parameters:
|
| 220 |
+
gan_mode (str) - - the type of GAN objective. It currently supports vanilla, lsgan, and wgangp.
|
| 221 |
+
target_real_label (bool) - - label for a real image
|
| 222 |
+
target_fake_label (bool) - - label of a fake image
|
| 223 |
+
|
| 224 |
+
Note: Do not use sigmoid as the last layer of Discriminator.
|
| 225 |
+
LSGAN needs no sigmoid. vanilla GANs will handle it with BCEWithLogitsLoss.
|
| 226 |
+
"""
|
| 227 |
+
super(GANLoss, self).__init__()
|
| 228 |
+
self.register_buffer("real_label", torch.tensor(target_real_label))
|
| 229 |
+
self.register_buffer("fake_label", torch.tensor(target_fake_label))
|
| 230 |
+
self.gan_mode = gan_mode
|
| 231 |
+
if gan_mode == "lsgan":
|
| 232 |
+
self.loss = nn.MSELoss()
|
| 233 |
+
elif gan_mode == "vanilla":
|
| 234 |
+
self.loss = nn.BCEWithLogitsLoss()
|
| 235 |
+
elif gan_mode in ["wgangp"]:
|
| 236 |
+
self.loss = None
|
| 237 |
+
else:
|
| 238 |
+
raise NotImplementedError("gan mode %s not implemented" % gan_mode)
|
| 239 |
+
|
| 240 |
+
def get_target_tensor(self, prediction, target_is_real):
|
| 241 |
+
"""Create label tensors with the same size as the input.
|
| 242 |
+
|
| 243 |
+
Parameters:
|
| 244 |
+
prediction (tensor) - - tpyically the prediction from a discriminator
|
| 245 |
+
target_is_real (bool) - - if the ground truth label is for real images or fake images
|
| 246 |
+
|
| 247 |
+
Returns:
|
| 248 |
+
A label tensor filled with ground truth label, and with the size of the input
|
| 249 |
+
"""
|
| 250 |
+
|
| 251 |
+
if target_is_real:
|
| 252 |
+
target_tensor = self.real_label
|
| 253 |
+
else:
|
| 254 |
+
target_tensor = self.fake_label
|
| 255 |
+
return target_tensor.expand_as(prediction)
|
| 256 |
+
|
| 257 |
+
def __call__(self, prediction, target_is_real):
|
| 258 |
+
"""Calculate loss given Discriminator's output and grount truth labels.
|
| 259 |
+
|
| 260 |
+
Parameters:
|
| 261 |
+
prediction (tensor) - - tpyically the prediction output from a discriminator
|
| 262 |
+
target_is_real (bool) - - if the ground truth label is for real images or fake images
|
| 263 |
+
|
| 264 |
+
Returns:
|
| 265 |
+
the calculated loss.
|
| 266 |
+
"""
|
| 267 |
+
if self.gan_mode in ["lsgan", "vanilla"]:
|
| 268 |
+
target_tensor = self.get_target_tensor(prediction, target_is_real)
|
| 269 |
+
loss = self.loss(prediction, target_tensor)
|
| 270 |
+
elif self.gan_mode == "wgangp":
|
| 271 |
+
if target_is_real:
|
| 272 |
+
loss = -prediction.mean()
|
| 273 |
+
else:
|
| 274 |
+
loss = prediction.mean()
|
| 275 |
+
return loss
|
| 276 |
+
|
| 277 |
+
|
| 278 |
+
def cal_gradient_penalty(netD, real_data, fake_data, device, type="mixed", constant=1.0, lambda_gp=10.0):
|
| 279 |
+
"""Calculate the gradient penalty loss, used in WGAN-GP paper https://arxiv.org/abs/1704.00028
|
| 280 |
+
|
| 281 |
+
Arguments:
|
| 282 |
+
netD (network) -- discriminator network
|
| 283 |
+
real_data (tensor array) -- real images
|
| 284 |
+
fake_data (tensor array) -- generated images from the generator
|
| 285 |
+
device (str) -- GPU / CPU
|
| 286 |
+
type (str) -- if we mix real and fake data or not [real | fake | mixed].
|
| 287 |
+
constant (float) -- the constant used in formula ( ||gradient||_2 - constant)^2
|
| 288 |
+
lambda_gp (float) -- weight for this loss
|
| 289 |
+
|
| 290 |
+
Returns the gradient penalty loss
|
| 291 |
+
"""
|
| 292 |
+
if lambda_gp > 0.0:
|
| 293 |
+
if type == "real": # either use real images, fake images, or a linear interpolation of two.
|
| 294 |
+
interpolatesv = real_data
|
| 295 |
+
elif type == "fake":
|
| 296 |
+
interpolatesv = fake_data
|
| 297 |
+
elif type == "mixed":
|
| 298 |
+
alpha = torch.rand(real_data.shape[0], 1, device=device)
|
| 299 |
+
alpha = alpha.expand(real_data.shape[0], real_data.nelement() // real_data.shape[0]).contiguous().view(*real_data.shape)
|
| 300 |
+
interpolatesv = alpha * real_data + ((1 - alpha) * fake_data)
|
| 301 |
+
else:
|
| 302 |
+
raise NotImplementedError(f"{type} not implemented")
|
| 303 |
+
interpolatesv.requires_grad_(True)
|
| 304 |
+
disc_interpolates = netD(interpolatesv)
|
| 305 |
+
gradients = torch.autograd.grad(outputs=disc_interpolates, inputs=interpolatesv, grad_outputs=torch.ones(disc_interpolates.size()).to(device), create_graph=True, retain_graph=True, only_inputs=True)
|
| 306 |
+
gradients = gradients[0].view(real_data.size(0), -1) # flat the data
|
| 307 |
+
gradient_penalty = (((gradients + 1e-16).norm(2, dim=1) - constant) ** 2).mean() * lambda_gp # added eps
|
| 308 |
+
return gradient_penalty, gradients
|
| 309 |
+
else:
|
| 310 |
+
return 0.0, None
|
| 311 |
+
|
| 312 |
+
|
| 313 |
+
class ResnetGenerator(nn.Module):
|
| 314 |
+
"""Resnet-based generator that consists of Resnet blocks between a few downsampling/upsampling operations.
|
| 315 |
+
|
| 316 |
+
We adapt Torch code and idea from Justin Johnson's neural style transfer project(https://github.com/jcjohnson/fast-neural-style)
|
| 317 |
+
"""
|
| 318 |
+
|
| 319 |
+
def __init__(self, input_nc, output_nc, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False, n_blocks=6, padding_type="reflect"):
|
| 320 |
+
"""Construct a Resnet-based generator
|
| 321 |
+
|
| 322 |
+
Parameters:
|
| 323 |
+
input_nc (int) -- the number of channels in input images
|
| 324 |
+
output_nc (int) -- the number of channels in output images
|
| 325 |
+
ngf (int) -- the number of filters in the last conv layer
|
| 326 |
+
norm_layer -- normalization layer
|
| 327 |
+
use_dropout (bool) -- if use dropout layers
|
| 328 |
+
n_blocks (int) -- the number of ResNet blocks
|
| 329 |
+
padding_type (str) -- the name of padding layer in conv layers: reflect | replicate | zero
|
| 330 |
+
"""
|
| 331 |
+
assert n_blocks >= 0
|
| 332 |
+
super(ResnetGenerator, self).__init__()
|
| 333 |
+
if type(norm_layer) == functools.partial:
|
| 334 |
+
use_bias = norm_layer.func == nn.InstanceNorm2d
|
| 335 |
+
else:
|
| 336 |
+
use_bias = norm_layer == nn.InstanceNorm2d
|
| 337 |
+
|
| 338 |
+
model = [nn.ReflectionPad2d(3), nn.Conv2d(input_nc, ngf, kernel_size=7, padding=0, bias=use_bias), norm_layer(ngf), nn.ReLU(True)]
|
| 339 |
+
|
| 340 |
+
n_downsampling = 2
|
| 341 |
+
for i in range(n_downsampling): # add downsampling layers
|
| 342 |
+
mult = 2**i
|
| 343 |
+
model += [nn.Conv2d(ngf * mult, ngf * mult * 2, kernel_size=3, stride=2, padding=1, bias=use_bias), norm_layer(ngf * mult * 2), nn.ReLU(True)]
|
| 344 |
+
|
| 345 |
+
mult = 2**n_downsampling
|
| 346 |
+
for i in range(n_blocks): # add ResNet blocks
|
| 347 |
+
|
| 348 |
+
model += [ResnetBlock(ngf * mult, padding_type=padding_type, norm_layer=norm_layer, use_dropout=use_dropout, use_bias=use_bias)]
|
| 349 |
+
|
| 350 |
+
for i in range(n_downsampling): # add upsampling layers
|
| 351 |
+
mult = 2 ** (n_downsampling - i)
|
| 352 |
+
model += [nn.ConvTranspose2d(ngf * mult, int(ngf * mult / 2), kernel_size=3, stride=2, padding=1, output_padding=1, bias=use_bias), norm_layer(int(ngf * mult / 2)), nn.ReLU(True)]
|
| 353 |
+
model += [nn.ReflectionPad2d(3)]
|
| 354 |
+
model += [nn.Conv2d(ngf, output_nc, kernel_size=7, padding=0)]
|
| 355 |
+
model += [nn.Tanh()]
|
| 356 |
+
|
| 357 |
+
self.model = nn.Sequential(*model)
|
| 358 |
+
|
| 359 |
+
def forward(self, input):
|
| 360 |
+
"""Standard forward"""
|
| 361 |
+
return self.model(input)
|
| 362 |
+
|
| 363 |
+
|
| 364 |
+
class ResnetBlock(nn.Module):
|
| 365 |
+
"""Define a Resnet block"""
|
| 366 |
+
|
| 367 |
+
def __init__(self, dim, padding_type, norm_layer, use_dropout, use_bias):
|
| 368 |
+
"""Initialize the Resnet block
|
| 369 |
+
|
| 370 |
+
A resnet block is a conv block with skip connections
|
| 371 |
+
We construct a conv block with build_conv_block function,
|
| 372 |
+
and implement skip connections in <forward> function.
|
| 373 |
+
Original Resnet paper: https://arxiv.org/pdf/1512.03385.pdf
|
| 374 |
+
"""
|
| 375 |
+
super(ResnetBlock, self).__init__()
|
| 376 |
+
self.conv_block = self.build_conv_block(dim, padding_type, norm_layer, use_dropout, use_bias)
|
| 377 |
+
|
| 378 |
+
def build_conv_block(self, dim, padding_type, norm_layer, use_dropout, use_bias):
|
| 379 |
+
"""Construct a convolutional block.
|
| 380 |
+
|
| 381 |
+
Parameters:
|
| 382 |
+
dim (int) -- the number of channels in the conv layer.
|
| 383 |
+
padding_type (str) -- the name of padding layer: reflect | replicate | zero
|
| 384 |
+
norm_layer -- normalization layer
|
| 385 |
+
use_dropout (bool) -- if use dropout layers.
|
| 386 |
+
use_bias (bool) -- if the conv layer uses bias or not
|
| 387 |
+
|
| 388 |
+
Returns a conv block (with a conv layer, a normalization layer, and a non-linearity layer (ReLU))
|
| 389 |
+
"""
|
| 390 |
+
conv_block = []
|
| 391 |
+
p = 0
|
| 392 |
+
if padding_type == "reflect":
|
| 393 |
+
conv_block += [nn.ReflectionPad2d(1)]
|
| 394 |
+
elif padding_type == "replicate":
|
| 395 |
+
conv_block += [nn.ReplicationPad2d(1)]
|
| 396 |
+
elif padding_type == "zero":
|
| 397 |
+
p = 1
|
| 398 |
+
else:
|
| 399 |
+
raise NotImplementedError("padding [%s] is not implemented" % padding_type)
|
| 400 |
+
|
| 401 |
+
conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias), norm_layer(dim), nn.ReLU(True)]
|
| 402 |
+
if use_dropout:
|
| 403 |
+
conv_block += [nn.Dropout(0.5)]
|
| 404 |
+
|
| 405 |
+
p = 0
|
| 406 |
+
if padding_type == "reflect":
|
| 407 |
+
conv_block += [nn.ReflectionPad2d(1)]
|
| 408 |
+
elif padding_type == "replicate":
|
| 409 |
+
conv_block += [nn.ReplicationPad2d(1)]
|
| 410 |
+
elif padding_type == "zero":
|
| 411 |
+
p = 1
|
| 412 |
+
else:
|
| 413 |
+
raise NotImplementedError("padding [%s] is not implemented" % padding_type)
|
| 414 |
+
conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias), norm_layer(dim)]
|
| 415 |
+
|
| 416 |
+
return nn.Sequential(*conv_block)
|
| 417 |
+
|
| 418 |
+
def forward(self, x):
|
| 419 |
+
"""Forward function (with skip connections)"""
|
| 420 |
+
out = x + self.conv_block(x) # add skip connections
|
| 421 |
+
return out
|
| 422 |
+
|
| 423 |
+
|
| 424 |
+
class UnetGenerator(nn.Module):
|
| 425 |
+
"""Create a Unet-based generator"""
|
| 426 |
+
|
| 427 |
+
def __init__(self, input_nc, output_nc, num_downs, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False):
|
| 428 |
+
"""Construct a Unet generator
|
| 429 |
+
Parameters:
|
| 430 |
+
input_nc (int) -- the number of channels in input images
|
| 431 |
+
output_nc (int) -- the number of channels in output images
|
| 432 |
+
num_downs (int) -- the number of downsamplings in UNet. For example, # if |num_downs| == 7,
|
| 433 |
+
image of size 128x128 will become of size 1x1 # at the bottleneck
|
| 434 |
+
ngf (int) -- the number of filters in the last conv layer
|
| 435 |
+
norm_layer -- normalization layer
|
| 436 |
+
|
| 437 |
+
We construct the U-Net from the innermost layer to the outermost layer.
|
| 438 |
+
It is a recursive process.
|
| 439 |
+
"""
|
| 440 |
+
super(UnetGenerator, self).__init__()
|
| 441 |
+
# construct unet structure
|
| 442 |
+
unet_block = UnetSkipConnectionBlock(ngf * 8, ngf * 8, input_nc=None, submodule=None, norm_layer=norm_layer, innermost=True) # add the innermost layer
|
| 443 |
+
for i in range(num_downs - 5): # add intermediate layers with ngf * 8 filters
|
| 444 |
+
unet_block = UnetSkipConnectionBlock(ngf * 8, ngf * 8, input_nc=None, submodule=unet_block, norm_layer=norm_layer, use_dropout=use_dropout)
|
| 445 |
+
# gradually reduce the number of filters from ngf * 8 to ngf
|
| 446 |
+
unet_block = UnetSkipConnectionBlock(ngf * 4, ngf * 8, input_nc=None, submodule=unet_block, norm_layer=norm_layer)
|
| 447 |
+
unet_block = UnetSkipConnectionBlock(ngf * 2, ngf * 4, input_nc=None, submodule=unet_block, norm_layer=norm_layer)
|
| 448 |
+
unet_block = UnetSkipConnectionBlock(ngf, ngf * 2, input_nc=None, submodule=unet_block, norm_layer=norm_layer)
|
| 449 |
+
self.model = UnetSkipConnectionBlock(output_nc, ngf, input_nc=input_nc, submodule=unet_block, outermost=True, norm_layer=norm_layer) # add the outermost layer
|
| 450 |
+
|
| 451 |
+
def forward(self, input):
|
| 452 |
+
"""Standard forward"""
|
| 453 |
+
return self.model(input)
|
| 454 |
+
|
| 455 |
+
|
| 456 |
+
class UnetSkipConnectionBlock(nn.Module):
|
| 457 |
+
"""Defines the Unet submodule with skip connection.
|
| 458 |
+
X -------------------identity----------------------
|
| 459 |
+
|-- downsampling -- |submodule| -- upsampling --|
|
| 460 |
+
"""
|
| 461 |
+
|
| 462 |
+
def __init__(self, outer_nc, inner_nc, input_nc=None, submodule=None, outermost=False, innermost=False, norm_layer=nn.BatchNorm2d, use_dropout=False):
|
| 463 |
+
"""Construct a Unet submodule with skip connections.
|
| 464 |
+
|
| 465 |
+
Parameters:
|
| 466 |
+
outer_nc (int) -- the number of filters in the outer conv layer
|
| 467 |
+
inner_nc (int) -- the number of filters in the inner conv layer
|
| 468 |
+
input_nc (int) -- the number of channels in input images/features
|
| 469 |
+
submodule (UnetSkipConnectionBlock) -- previously defined submodules
|
| 470 |
+
outermost (bool) -- if this module is the outermost module
|
| 471 |
+
innermost (bool) -- if this module is the innermost module
|
| 472 |
+
norm_layer -- normalization layer
|
| 473 |
+
use_dropout (bool) -- if use dropout layers.
|
| 474 |
+
"""
|
| 475 |
+
super(UnetSkipConnectionBlock, self).__init__()
|
| 476 |
+
self.outermost = outermost
|
| 477 |
+
if type(norm_layer) == functools.partial:
|
| 478 |
+
use_bias = norm_layer.func == nn.InstanceNorm2d
|
| 479 |
+
else:
|
| 480 |
+
use_bias = norm_layer == nn.InstanceNorm2d
|
| 481 |
+
if input_nc is None:
|
| 482 |
+
input_nc = outer_nc
|
| 483 |
+
downconv = nn.Conv2d(input_nc, inner_nc, kernel_size=4, stride=2, padding=1, bias=use_bias)
|
| 484 |
+
downrelu = nn.LeakyReLU(0.2, True)
|
| 485 |
+
downnorm = norm_layer(inner_nc)
|
| 486 |
+
uprelu = nn.ReLU(True)
|
| 487 |
+
upnorm = norm_layer(outer_nc)
|
| 488 |
+
|
| 489 |
+
if outermost:
|
| 490 |
+
upconv = nn.ConvTranspose2d(inner_nc * 2, outer_nc, kernel_size=4, stride=2, padding=1)
|
| 491 |
+
down = [downconv]
|
| 492 |
+
up = [uprelu, upconv, nn.Tanh()]
|
| 493 |
+
model = down + [submodule] + up
|
| 494 |
+
elif innermost:
|
| 495 |
+
upconv = nn.ConvTranspose2d(inner_nc, outer_nc, kernel_size=4, stride=2, padding=1, bias=use_bias)
|
| 496 |
+
down = [downrelu, downconv]
|
| 497 |
+
up = [uprelu, upconv, upnorm]
|
| 498 |
+
model = down + up
|
| 499 |
+
else:
|
| 500 |
+
upconv = nn.ConvTranspose2d(inner_nc * 2, outer_nc, kernel_size=4, stride=2, padding=1, bias=use_bias)
|
| 501 |
+
down = [downrelu, downconv, downnorm]
|
| 502 |
+
up = [uprelu, upconv, upnorm]
|
| 503 |
+
|
| 504 |
+
if use_dropout:
|
| 505 |
+
model = down + [submodule] + up + [nn.Dropout(0.5)]
|
| 506 |
+
else:
|
| 507 |
+
model = down + [submodule] + up
|
| 508 |
+
|
| 509 |
+
self.model = nn.Sequential(*model)
|
| 510 |
+
|
| 511 |
+
def forward(self, x):
|
| 512 |
+
if self.outermost:
|
| 513 |
+
return self.model(x)
|
| 514 |
+
else: # add skip connections
|
| 515 |
+
return torch.cat([x, self.model(x)], 1)
|
| 516 |
+
|
| 517 |
+
|
| 518 |
+
class NLayerDiscriminator(nn.Module):
|
| 519 |
+
"""Defines a PatchGAN discriminator"""
|
| 520 |
+
|
| 521 |
+
def __init__(self, input_nc, ndf=64, n_layers=3, norm_layer=nn.BatchNorm2d):
|
| 522 |
+
"""Construct a PatchGAN discriminator
|
| 523 |
+
|
| 524 |
+
Parameters:
|
| 525 |
+
input_nc (int) -- the number of channels in input images
|
| 526 |
+
ndf (int) -- the number of filters in the last conv layer
|
| 527 |
+
n_layers (int) -- the number of conv layers in the discriminator
|
| 528 |
+
norm_layer -- normalization layer
|
| 529 |
+
"""
|
| 530 |
+
super(NLayerDiscriminator, self).__init__()
|
| 531 |
+
if type(norm_layer) == functools.partial: # no need to use bias as BatchNorm2d has affine parameters
|
| 532 |
+
use_bias = norm_layer.func == nn.InstanceNorm2d
|
| 533 |
+
else:
|
| 534 |
+
use_bias = norm_layer == nn.InstanceNorm2d
|
| 535 |
+
|
| 536 |
+
kw = 4
|
| 537 |
+
padw = 1
|
| 538 |
+
sequence = [nn.Conv2d(input_nc, ndf, kernel_size=kw, stride=2, padding=padw), nn.LeakyReLU(0.2, True)]
|
| 539 |
+
nf_mult = 1
|
| 540 |
+
nf_mult_prev = 1
|
| 541 |
+
for n in range(1, n_layers): # gradually increase the number of filters
|
| 542 |
+
nf_mult_prev = nf_mult
|
| 543 |
+
nf_mult = min(2**n, 8)
|
| 544 |
+
sequence += [nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=2, padding=padw, bias=use_bias), norm_layer(ndf * nf_mult), nn.LeakyReLU(0.2, True)]
|
| 545 |
+
|
| 546 |
+
nf_mult_prev = nf_mult
|
| 547 |
+
nf_mult = min(2**n_layers, 8)
|
| 548 |
+
sequence += [nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=1, padding=padw, bias=use_bias), norm_layer(ndf * nf_mult), nn.LeakyReLU(0.2, True)]
|
| 549 |
+
|
| 550 |
+
sequence += [nn.Conv2d(ndf * nf_mult, 1, kernel_size=kw, stride=1, padding=padw)] # output 1 channel prediction map
|
| 551 |
+
self.model = nn.Sequential(*sequence)
|
| 552 |
+
|
| 553 |
+
def forward(self, input):
|
| 554 |
+
"""Standard forward."""
|
| 555 |
+
return self.model(input)
|
| 556 |
+
|
| 557 |
+
|
| 558 |
+
class PixelDiscriminator(nn.Module):
|
| 559 |
+
"""Defines a 1x1 PatchGAN discriminator (pixelGAN)"""
|
| 560 |
+
|
| 561 |
+
def __init__(self, input_nc, ndf=64, norm_layer=nn.BatchNorm2d):
|
| 562 |
+
"""Construct a 1x1 PatchGAN discriminator
|
| 563 |
+
|
| 564 |
+
Parameters:
|
| 565 |
+
input_nc (int) -- the number of channels in input images
|
| 566 |
+
ndf (int) -- the number of filters in the last conv layer
|
| 567 |
+
norm_layer -- normalization layer
|
| 568 |
+
"""
|
| 569 |
+
super(PixelDiscriminator, self).__init__()
|
| 570 |
+
if type(norm_layer) == functools.partial: # no need to use bias as BatchNorm2d has affine parameters
|
| 571 |
+
use_bias = norm_layer.func == nn.InstanceNorm2d
|
| 572 |
+
else:
|
| 573 |
+
use_bias = norm_layer == nn.InstanceNorm2d
|
| 574 |
+
|
| 575 |
+
self.net = [
|
| 576 |
+
nn.Conv2d(input_nc, ndf, kernel_size=1, stride=1, padding=0),
|
| 577 |
+
nn.LeakyReLU(0.2, True),
|
| 578 |
+
nn.Conv2d(ndf, ndf * 2, kernel_size=1, stride=1, padding=0, bias=use_bias),
|
| 579 |
+
norm_layer(ndf * 2),
|
| 580 |
+
nn.LeakyReLU(0.2, True),
|
| 581 |
+
nn.Conv2d(ndf * 2, 1, kernel_size=1, stride=1, padding=0, bias=use_bias),
|
| 582 |
+
]
|
| 583 |
+
|
| 584 |
+
self.net = nn.Sequential(*self.net)
|
| 585 |
+
|
| 586 |
+
def forward(self, input):
|
| 587 |
+
"""Standard forward."""
|
| 588 |
+
return self.net(input)
|
models/pix2pix_model.py
ADDED
|
@@ -0,0 +1,127 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
from .base_model import BaseModel
|
| 3 |
+
from . import networks
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
class Pix2PixModel(BaseModel):
|
| 7 |
+
"""This class implements the pix2pix model, for learning a mapping from input images to output images given paired data.
|
| 8 |
+
|
| 9 |
+
The model training requires '--dataset_mode aligned' dataset.
|
| 10 |
+
By default, it uses a '--netG unet256' U-Net generator,
|
| 11 |
+
a '--netD basic' discriminator (PatchGAN),
|
| 12 |
+
and a '--gan_mode' vanilla GAN loss (the cross-entropy objective used in the orignal GAN paper).
|
| 13 |
+
|
| 14 |
+
pix2pix paper: https://arxiv.org/pdf/1611.07004.pdf
|
| 15 |
+
"""
|
| 16 |
+
|
| 17 |
+
@staticmethod
|
| 18 |
+
def modify_commandline_options(parser, is_train=True):
|
| 19 |
+
"""Add new dataset-specific options, and rewrite default values for existing options.
|
| 20 |
+
|
| 21 |
+
Parameters:
|
| 22 |
+
parser -- original option parser
|
| 23 |
+
is_train (bool) -- whether training phase or test phase. You can use this flag to add training-specific or test-specific options.
|
| 24 |
+
|
| 25 |
+
Returns:
|
| 26 |
+
the modified parser.
|
| 27 |
+
|
| 28 |
+
For pix2pix, we do not use image buffer
|
| 29 |
+
The training objective is: GAN Loss + lambda_L1 * ||G(A)-B||_1
|
| 30 |
+
By default, we use vanilla GAN loss, UNet with batchnorm, and aligned datasets.
|
| 31 |
+
"""
|
| 32 |
+
# changing the default values to match the pix2pix paper (https://phillipi.github.io/pix2pix/)
|
| 33 |
+
parser.set_defaults(norm="batch", netG="unet_256", dataset_mode="aligned")
|
| 34 |
+
if is_train:
|
| 35 |
+
parser.set_defaults(pool_size=0, gan_mode="vanilla")
|
| 36 |
+
parser.add_argument("--lambda_L1", type=float, default=100.0, help="weight for L1 loss")
|
| 37 |
+
|
| 38 |
+
return parser
|
| 39 |
+
|
| 40 |
+
def __init__(self, opt):
|
| 41 |
+
"""Initialize the pix2pix class.
|
| 42 |
+
|
| 43 |
+
Parameters:
|
| 44 |
+
opt (Option class)-- stores all the experiment flags; needs to be a subclass of BaseOptions
|
| 45 |
+
"""
|
| 46 |
+
BaseModel.__init__(self, opt)
|
| 47 |
+
# specify the training losses you want to print out. The training/test scripts will call <BaseModel.get_current_losses>
|
| 48 |
+
self.loss_names = ["G_GAN", "G_L1", "D_real", "D_fake"]
|
| 49 |
+
# specify the images you want to save/display. The training/test scripts will call <BaseModel.get_current_visuals>
|
| 50 |
+
self.visual_names = ["real_A", "fake_B", "real_B"]
|
| 51 |
+
# specify the models you want to save to the disk. The training/test scripts will call <BaseModel.save_networks> and <BaseModel.load_networks>
|
| 52 |
+
if self.isTrain:
|
| 53 |
+
self.model_names = ["G", "D"]
|
| 54 |
+
else: # during test time, only load G
|
| 55 |
+
self.model_names = ["G"]
|
| 56 |
+
self.device = opt.device
|
| 57 |
+
# define networks (both generator and discriminator)
|
| 58 |
+
self.netG = networks.define_G(opt.input_nc, opt.output_nc, opt.ngf, opt.netG, opt.norm, not opt.no_dropout, opt.init_type, opt.init_gain)
|
| 59 |
+
|
| 60 |
+
if self.isTrain: # define a discriminator; conditional GANs need to take both input and output images; Therefore, #channels for D is input_nc + output_nc
|
| 61 |
+
self.netD = networks.define_D(opt.input_nc + opt.output_nc, opt.ndf, opt.netD, opt.n_layers_D, opt.norm, opt.init_type, opt.init_gain)
|
| 62 |
+
|
| 63 |
+
if self.isTrain:
|
| 64 |
+
# define loss functions
|
| 65 |
+
self.criterionGAN = networks.GANLoss(opt.gan_mode).to(self.device) # move to the device for custom loss
|
| 66 |
+
self.criterionL1 = torch.nn.L1Loss()
|
| 67 |
+
# initialize optimizers; schedulers will be automatically created by function <BaseModel.setup>.
|
| 68 |
+
self.optimizer_G = torch.optim.Adam(self.netG.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))
|
| 69 |
+
self.optimizer_D = torch.optim.Adam(self.netD.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))
|
| 70 |
+
self.optimizers.append(self.optimizer_G)
|
| 71 |
+
self.optimizers.append(self.optimizer_D)
|
| 72 |
+
|
| 73 |
+
def set_input(self, input):
|
| 74 |
+
"""Unpack input data from the dataloader and perform necessary pre-processing steps.
|
| 75 |
+
|
| 76 |
+
Parameters:
|
| 77 |
+
input (dict): include the data itself and its metadata information.
|
| 78 |
+
|
| 79 |
+
The option 'direction' can be used to swap images in domain A and domain B.
|
| 80 |
+
"""
|
| 81 |
+
AtoB = self.opt.direction == "AtoB"
|
| 82 |
+
self.real_A = input["A" if AtoB else "B"].to(self.device)
|
| 83 |
+
self.real_B = input["B" if AtoB else "A"].to(self.device)
|
| 84 |
+
self.image_paths = input["A_paths" if AtoB else "B_paths"]
|
| 85 |
+
|
| 86 |
+
def forward(self):
|
| 87 |
+
"""Run forward pass; called by both functions <optimize_parameters> and <test>."""
|
| 88 |
+
self.fake_B = self.netG(self.real_A) # G(A)
|
| 89 |
+
|
| 90 |
+
def backward_D(self):
|
| 91 |
+
"""Calculate GAN loss for the discriminator"""
|
| 92 |
+
# Fake; stop backprop to the generator by detaching fake_B
|
| 93 |
+
fake_AB = torch.cat((self.real_A, self.fake_B), 1) # we use conditional GANs; we need to feed both input and output to the discriminator
|
| 94 |
+
pred_fake = self.netD(fake_AB.detach())
|
| 95 |
+
self.loss_D_fake = self.criterionGAN(pred_fake, False)
|
| 96 |
+
# Real
|
| 97 |
+
real_AB = torch.cat((self.real_A, self.real_B), 1)
|
| 98 |
+
pred_real = self.netD(real_AB)
|
| 99 |
+
self.loss_D_real = self.criterionGAN(pred_real, True)
|
| 100 |
+
# combine loss and calculate gradients
|
| 101 |
+
self.loss_D = (self.loss_D_fake + self.loss_D_real) * 0.5
|
| 102 |
+
self.loss_D.backward()
|
| 103 |
+
|
| 104 |
+
def backward_G(self):
|
| 105 |
+
"""Calculate GAN and L1 loss for the generator"""
|
| 106 |
+
# First, G(A) should fake the discriminator
|
| 107 |
+
fake_AB = torch.cat((self.real_A, self.fake_B), 1)
|
| 108 |
+
pred_fake = self.netD(fake_AB)
|
| 109 |
+
self.loss_G_GAN = self.criterionGAN(pred_fake, True)
|
| 110 |
+
# Second, G(A) = B
|
| 111 |
+
self.loss_G_L1 = self.criterionL1(self.fake_B, self.real_B) * self.opt.lambda_L1
|
| 112 |
+
# combine loss and calculate gradients
|
| 113 |
+
self.loss_G = self.loss_G_GAN + self.loss_G_L1
|
| 114 |
+
self.loss_G.backward()
|
| 115 |
+
|
| 116 |
+
def optimize_parameters(self):
|
| 117 |
+
self.forward() # compute fake images: G(A)
|
| 118 |
+
# update D
|
| 119 |
+
self.set_requires_grad(self.netD, True) # enable backprop for D
|
| 120 |
+
self.optimizer_D.zero_grad() # set D's gradients to zero
|
| 121 |
+
self.backward_D() # calculate gradients for D
|
| 122 |
+
self.optimizer_D.step() # update D's weights
|
| 123 |
+
# update G
|
| 124 |
+
self.set_requires_grad(self.netD, False) # D requires no gradients when optimizing G
|
| 125 |
+
self.optimizer_G.zero_grad() # set G's gradients to zero
|
| 126 |
+
self.backward_G() # calculate graidents for G
|
| 127 |
+
self.optimizer_G.step() # update G's weights
|
models/template_model.py
ADDED
|
@@ -0,0 +1,100 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Model class template
|
| 2 |
+
|
| 3 |
+
This module provides a template for users to implement custom models.
|
| 4 |
+
You can specify '--model template' to use this model.
|
| 5 |
+
The class name should be consistent with both the filename and its model option.
|
| 6 |
+
The filename should be <model>_dataset.py
|
| 7 |
+
The class name should be <Model>Dataset.py
|
| 8 |
+
It implements a simple image-to-image translation baseline based on regression loss.
|
| 9 |
+
Given input-output pairs (data_A, data_B), it learns a network netG that can minimize the following L1 loss:
|
| 10 |
+
min_<netG> ||netG(data_A) - data_B||_1
|
| 11 |
+
You need to implement the following functions:
|
| 12 |
+
<modify_commandline_options>: Add model-specific options and rewrite default values for existing options.
|
| 13 |
+
<__init__>: Initialize this model class.
|
| 14 |
+
<set_input>: Unpack input data and perform data pre-processing.
|
| 15 |
+
<forward>: Run forward pass. This will be called by both <optimize_parameters> and <test>.
|
| 16 |
+
<optimize_parameters>: Update network weights; it will be called in every training iteration.
|
| 17 |
+
"""
|
| 18 |
+
|
| 19 |
+
import torch
|
| 20 |
+
from .base_model import BaseModel
|
| 21 |
+
from . import networks
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
class TemplateModel(BaseModel):
|
| 25 |
+
@staticmethod
|
| 26 |
+
def modify_commandline_options(parser, is_train=True):
|
| 27 |
+
"""Add new model-specific options and rewrite default values for existing options.
|
| 28 |
+
|
| 29 |
+
Parameters:
|
| 30 |
+
parser -- the option parser
|
| 31 |
+
is_train -- if it is training phase or test phase. You can use this flag to add training-specific or test-specific options.
|
| 32 |
+
|
| 33 |
+
Returns:
|
| 34 |
+
the modified parser.
|
| 35 |
+
"""
|
| 36 |
+
parser.set_defaults(dataset_mode="aligned") # You can rewrite default values for this model. For example, this model usually uses aligned dataset as its dataset.
|
| 37 |
+
if is_train:
|
| 38 |
+
parser.add_argument("--lambda_regression", type=float, default=1.0, help="weight for the regression loss") # You can define new arguments for this model.
|
| 39 |
+
|
| 40 |
+
return parser
|
| 41 |
+
|
| 42 |
+
def __init__(self, opt):
|
| 43 |
+
"""Initialize this model class.
|
| 44 |
+
|
| 45 |
+
Parameters:
|
| 46 |
+
opt -- training/test options
|
| 47 |
+
|
| 48 |
+
A few things can be done here.
|
| 49 |
+
- (required) call the initialization function of BaseModel
|
| 50 |
+
- define loss function, visualization images, model names, and optimizers
|
| 51 |
+
"""
|
| 52 |
+
BaseModel.__init__(self, opt) # call the initialization method of BaseModel
|
| 53 |
+
# specify the training losses you want to print out. The program will call base_model.get_current_losses to plot the losses to the console and save them to the disk.
|
| 54 |
+
self.loss_names = ["G"]
|
| 55 |
+
# specify the images you want to save and display. The program will call base_model.get_current_visuals to save and display these images.
|
| 56 |
+
self.visual_names = ["data_A", "data_B", "output"]
|
| 57 |
+
# specify the models you want to save to the disk. The program will call base_model.save_networks and base_model.load_networks to save and load networks.
|
| 58 |
+
# you can use opt.isTrain to specify different behaviors for training and test. For example, some networks will not be used during test, and you don't need to load them.
|
| 59 |
+
self.model_names = ["G"]
|
| 60 |
+
# define networks; you can use opt.isTrain to specify different behaviors for training and test.
|
| 61 |
+
self.netG = networks.define_G(opt.input_nc, opt.output_nc, opt.ngf, opt.netG)
|
| 62 |
+
if self.isTrain: # only defined during training time
|
| 63 |
+
# define your loss functions. You can use losses provided by torch.nn such as torch.nn.L1Loss.
|
| 64 |
+
# We also provide a GANLoss class "networks.GANLoss". self.criterionGAN = networks.GANLoss().to(self.device)
|
| 65 |
+
self.criterionLoss = torch.nn.L1Loss()
|
| 66 |
+
# define and initialize optimizers. You can define one optimizer for each network.
|
| 67 |
+
# If two networks are updated at the same time, you can use itertools.chain to group them. See cycle_gan_model.py for an example.
|
| 68 |
+
self.optimizer = torch.optim.Adam(self.netG.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))
|
| 69 |
+
self.optimizers = [self.optimizer]
|
| 70 |
+
|
| 71 |
+
# Our program will automatically call <model.setup> to define schedulers, load networks, and print networks
|
| 72 |
+
|
| 73 |
+
def set_input(self, input):
|
| 74 |
+
"""Unpack input data from the dataloader and perform necessary pre-processing steps.
|
| 75 |
+
|
| 76 |
+
Parameters:
|
| 77 |
+
input: a dictionary that contains the data itself and its metadata information.
|
| 78 |
+
"""
|
| 79 |
+
AtoB = self.opt.direction == "AtoB" # use <direction> to swap data_A and data_B
|
| 80 |
+
self.data_A = input["A" if AtoB else "B"].to(self.device) # get image data A
|
| 81 |
+
self.data_B = input["B" if AtoB else "A"].to(self.device) # get image data B
|
| 82 |
+
self.image_paths = input["A_paths" if AtoB else "B_paths"] # get image paths
|
| 83 |
+
|
| 84 |
+
def forward(self):
|
| 85 |
+
"""Run forward pass. This will be called by both functions <optimize_parameters> and <test>."""
|
| 86 |
+
self.output = self.netG(self.data_A) # generate output image given the input data_A
|
| 87 |
+
|
| 88 |
+
def backward(self):
|
| 89 |
+
"""Calculate losses, gradients, and update network weights; called in every training iteration"""
|
| 90 |
+
# caculate the intermediate results if necessary; here self.output has been computed during function <forward>
|
| 91 |
+
# calculate loss given the input and intermediate results
|
| 92 |
+
self.loss_G = self.criterionLoss(self.output, self.data_B) * self.opt.lambda_regression
|
| 93 |
+
self.loss_G.backward() # calculate gradients of network G w.r.t. loss_G
|
| 94 |
+
|
| 95 |
+
def optimize_parameters(self):
|
| 96 |
+
"""Update network weights; it will be called in every training iteration."""
|
| 97 |
+
self.forward() # first call forward to calculate intermediate results
|
| 98 |
+
self.optimizer.zero_grad() # clear network G's existing gradients
|
| 99 |
+
self.backward() # calculate gradients for network G
|
| 100 |
+
self.optimizer.step() # update gradients for network G
|
models/test_model.py
ADDED
|
@@ -0,0 +1,69 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from .base_model import BaseModel
|
| 2 |
+
from . import networks
|
| 3 |
+
|
| 4 |
+
|
| 5 |
+
class TestModel(BaseModel):
|
| 6 |
+
"""This TesteModel can be used to generate CycleGAN results for only one direction.
|
| 7 |
+
This model will automatically set '--dataset_mode single', which only loads the images from one collection.
|
| 8 |
+
|
| 9 |
+
See the test instruction for more details.
|
| 10 |
+
"""
|
| 11 |
+
|
| 12 |
+
@staticmethod
|
| 13 |
+
def modify_commandline_options(parser, is_train=True):
|
| 14 |
+
"""Add new dataset-specific options, and rewrite default values for existing options.
|
| 15 |
+
|
| 16 |
+
Parameters:
|
| 17 |
+
parser -- original option parser
|
| 18 |
+
is_train (bool) -- whether training phase or test phase. You can use this flag to add training-specific or test-specific options.
|
| 19 |
+
|
| 20 |
+
Returns:
|
| 21 |
+
the modified parser.
|
| 22 |
+
|
| 23 |
+
The model can only be used during test time. It requires '--dataset_mode single'.
|
| 24 |
+
You need to specify the network using the option '--model_suffix'.
|
| 25 |
+
"""
|
| 26 |
+
assert not is_train, "TestModel cannot be used during training time"
|
| 27 |
+
parser.set_defaults(dataset_mode="single")
|
| 28 |
+
parser.add_argument("--model_suffix", type=str, default="", help="In checkpoints_dir, [epoch]_net_G[model_suffix].pth will be loaded as the generator.")
|
| 29 |
+
|
| 30 |
+
return parser
|
| 31 |
+
|
| 32 |
+
def __init__(self, opt):
|
| 33 |
+
"""Initialize the pix2pix class.
|
| 34 |
+
|
| 35 |
+
Parameters:
|
| 36 |
+
opt (Option class)-- stores all the experiment flags; needs to be a subclass of BaseOptions
|
| 37 |
+
"""
|
| 38 |
+
assert not opt.isTrain
|
| 39 |
+
BaseModel.__init__(self, opt)
|
| 40 |
+
# specify the training losses you want to print out. The training/test scripts will call <BaseModel.get_current_losses>
|
| 41 |
+
self.loss_names = []
|
| 42 |
+
# specify the images you want to save/display. The training/test scripts will call <BaseModel.get_current_visuals>
|
| 43 |
+
self.visual_names = ["real", "fake"]
|
| 44 |
+
# specify the models you want to save to the disk. The training/test scripts will call <BaseModel.save_networks> and <BaseModel.load_networks>
|
| 45 |
+
self.model_names = ["G" + opt.model_suffix] # only generator is needed.
|
| 46 |
+
self.netG = networks.define_G(opt.input_nc, opt.output_nc, opt.ngf, opt.netG, opt.norm, not opt.no_dropout, opt.init_type, opt.init_gain)
|
| 47 |
+
|
| 48 |
+
# assigns the model to self.netG_[suffix] so that it can be loaded
|
| 49 |
+
# please see <BaseModel.load_networks>
|
| 50 |
+
setattr(self, "netG" + opt.model_suffix, self.netG) # store netG in self.
|
| 51 |
+
|
| 52 |
+
def set_input(self, input):
|
| 53 |
+
"""Unpack input data from the dataloader and perform necessary pre-processing steps.
|
| 54 |
+
|
| 55 |
+
Parameters:
|
| 56 |
+
input: a dictionary that contains the data itself and its metadata information.
|
| 57 |
+
|
| 58 |
+
We need to use 'single_dataset' dataset mode. It only load images from one domain.
|
| 59 |
+
"""
|
| 60 |
+
self.real = input["A"].to(self.device)
|
| 61 |
+
self.image_paths = input["A_paths"]
|
| 62 |
+
|
| 63 |
+
def forward(self):
|
| 64 |
+
"""Run forward pass."""
|
| 65 |
+
self.fake = self.netG(self.real) # G(real)
|
| 66 |
+
|
| 67 |
+
def optimize_parameters(self):
|
| 68 |
+
"""No optimization for test model."""
|
| 69 |
+
pass
|
options/__init__.py
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
"""This package options includes option modules: training options, test options, and basic options (used in both training and test)."""
|
options/__pycache__/__init__.cpython-312.pyc
ADDED
|
Binary file (343 Bytes). View file
|
|
|
options/__pycache__/base_options.cpython-312.pyc
ADDED
|
Binary file (9.96 kB). View file
|
|
|
options/__pycache__/test_options.cpython-312.pyc
ADDED
|
Binary file (1.66 kB). View file
|
|
|
options/__pycache__/train_options.cpython-312.pyc
ADDED
|
Binary file (3.57 kB). View file
|
|
|
options/base_options.py
ADDED
|
@@ -0,0 +1,127 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import argparse
|
| 2 |
+
from pathlib import Path
|
| 3 |
+
from util import util
|
| 4 |
+
import torch
|
| 5 |
+
import models
|
| 6 |
+
import data
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
class BaseOptions:
|
| 10 |
+
"""This class defines options used during both training and test time.
|
| 11 |
+
|
| 12 |
+
It also implements several helper functions such as parsing, printing, and saving the options.
|
| 13 |
+
It also gathers additional options defined in <modify_commandline_options> functions in both dataset class and model class.
|
| 14 |
+
"""
|
| 15 |
+
|
| 16 |
+
def __init__(self):
|
| 17 |
+
"""Reset the class; indicates the class hasn't been initailized"""
|
| 18 |
+
self.initialized = False
|
| 19 |
+
|
| 20 |
+
def initialize(self, parser):
|
| 21 |
+
"""Define the common options that are used in both training and test."""
|
| 22 |
+
# basic parameters
|
| 23 |
+
parser.add_argument("--dataroot", required=True, help="path to images (should have subfolders trainA, trainB, valA, valB, etc)")
|
| 24 |
+
parser.add_argument("--name", type=str, default="experiment_name", help="name of the experiment. It decides where to store samples and models")
|
| 25 |
+
parser.add_argument("--checkpoints_dir", type=str, default="./checkpoints", help="models are saved here")
|
| 26 |
+
# model parameters
|
| 27 |
+
parser.add_argument("--model", type=str, default="cycle_gan", help="chooses which model to use. [cycle_gan | pix2pix | test | colorization]")
|
| 28 |
+
parser.add_argument("--input_nc", type=int, default=3, help="# of input image channels: 3 for RGB and 1 for grayscale")
|
| 29 |
+
parser.add_argument("--output_nc", type=int, default=3, help="# of output image channels: 3 for RGB and 1 for grayscale")
|
| 30 |
+
parser.add_argument("--ngf", type=int, default=64, help="# of gen filters in the last conv layer")
|
| 31 |
+
parser.add_argument("--ndf", type=int, default=64, help="# of discrim filters in the first conv layer")
|
| 32 |
+
parser.add_argument("--netD", type=str, default="basic", help="specify discriminator architecture [basic | n_layers | pixel]. The basic model is a 70x70 PatchGAN. n_layers allows you to specify the layers in the discriminator")
|
| 33 |
+
parser.add_argument("--netG", type=str, default="resnet_9blocks", help="specify generator architecture [resnet_9blocks | resnet_6blocks | unet_256 | unet_128]")
|
| 34 |
+
parser.add_argument("--n_layers_D", type=int, default=3, help="only used if netD==n_layers")
|
| 35 |
+
parser.add_argument("--norm", type=str, default="instance", help="instance normalization or batch normalization [instance | batch | none | syncbatch]")
|
| 36 |
+
parser.add_argument("--init_type", type=str, default="normal", help="network initialization [normal | xavier | kaiming | orthogonal]")
|
| 37 |
+
parser.add_argument("--init_gain", type=float, default=0.02, help="scaling factor for normal, xavier and orthogonal.")
|
| 38 |
+
parser.add_argument("--no_dropout", action="store_true", help="no dropout for the generator")
|
| 39 |
+
# dataset parameters
|
| 40 |
+
parser.add_argument("--dataset_mode", type=str, default="unaligned", help="chooses how datasets are loaded. [unaligned | aligned | single | colorization]")
|
| 41 |
+
parser.add_argument("--direction", type=str, default="AtoB", help="AtoB or BtoA")
|
| 42 |
+
parser.add_argument("--serial_batches", action="store_true", help="if true, takes images in order to make batches, otherwise takes them randomly")
|
| 43 |
+
parser.add_argument("--num_threads", default=4, type=int, help="# threads for loading data")
|
| 44 |
+
parser.add_argument("--batch_size", type=int, default=1, help="input batch size")
|
| 45 |
+
parser.add_argument("--load_size", type=int, default=286, help="scale images to this size")
|
| 46 |
+
parser.add_argument("--crop_size", type=int, default=256, help="then crop to this size")
|
| 47 |
+
parser.add_argument("--max_dataset_size", type=int, default=float("inf"), help="Maximum number of samples allowed per dataset. If the dataset directory contains more than max_dataset_size, only a subset is loaded.")
|
| 48 |
+
parser.add_argument("--preprocess", type=str, default="resize_and_crop", help="scaling and cropping of images at load time [resize_and_crop | crop | scale_width | scale_width_and_crop | none]")
|
| 49 |
+
parser.add_argument("--no_flip", action="store_true", help="if specified, do not flip the images for data augmentation")
|
| 50 |
+
parser.add_argument("--display_winsize", type=int, default=256, help="display window size for both visdom and HTML")
|
| 51 |
+
# additional parameters
|
| 52 |
+
parser.add_argument("--epoch", type=str, default="latest", help="which epoch to load? set to latest to use latest cached model")
|
| 53 |
+
parser.add_argument("--load_iter", type=int, default="0", help="which iteration to load? if load_iter > 0, the code will load models by iter_[load_iter]; otherwise, the code will load models by [epoch]")
|
| 54 |
+
parser.add_argument("--verbose", action="store_true", help="if specified, print more debugging information")
|
| 55 |
+
parser.add_argument("--suffix", default="", type=str, help="customized suffix: opt.name = opt.name + suffix: e.g., {model}_{netG}_size{load_size}")
|
| 56 |
+
# wandb parameters
|
| 57 |
+
parser.add_argument("--use_wandb", action="store_true", help="if specified, then init wandb logging")
|
| 58 |
+
parser.add_argument("--wandb_project_name", type=str, default="CycleGAN-and-pix2pix", help="specify wandb project name")
|
| 59 |
+
self.initialized = True
|
| 60 |
+
return parser
|
| 61 |
+
|
| 62 |
+
def gather_options(self):
|
| 63 |
+
"""Initialize our parser with basic options(only once).
|
| 64 |
+
Add additional model-specific and dataset-specific options.
|
| 65 |
+
These options are defined in the <modify_commandline_options> function
|
| 66 |
+
in model and dataset classes.
|
| 67 |
+
"""
|
| 68 |
+
if not self.initialized: # check if it has been initialized
|
| 69 |
+
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
|
| 70 |
+
parser = self.initialize(parser)
|
| 71 |
+
|
| 72 |
+
# get the basic options
|
| 73 |
+
opt, _ = parser.parse_known_args()
|
| 74 |
+
|
| 75 |
+
# modify model-related parser options
|
| 76 |
+
model_name = opt.model
|
| 77 |
+
model_option_setter = models.get_option_setter(model_name)
|
| 78 |
+
parser = model_option_setter(parser, self.isTrain)
|
| 79 |
+
opt, _ = parser.parse_known_args() # parse again with new defaults
|
| 80 |
+
|
| 81 |
+
# modify dataset-related parser options
|
| 82 |
+
dataset_name = opt.dataset_mode
|
| 83 |
+
dataset_option_setter = data.get_option_setter(dataset_name)
|
| 84 |
+
parser = dataset_option_setter(parser, self.isTrain)
|
| 85 |
+
|
| 86 |
+
# save and return the parser
|
| 87 |
+
self.parser = parser
|
| 88 |
+
return parser.parse_args()
|
| 89 |
+
|
| 90 |
+
def print_options(self, opt):
|
| 91 |
+
"""Print and save options
|
| 92 |
+
|
| 93 |
+
It will print both current options and default values(if different).
|
| 94 |
+
It will save options into a text file / [checkpoints_dir] / opt.txt
|
| 95 |
+
"""
|
| 96 |
+
message = ""
|
| 97 |
+
message += "----------------- Options ---------------\n"
|
| 98 |
+
for k, v in sorted(vars(opt).items()):
|
| 99 |
+
comment = ""
|
| 100 |
+
default = self.parser.get_default(k)
|
| 101 |
+
if v != default:
|
| 102 |
+
comment = "\t[default: %s]" % str(default)
|
| 103 |
+
message += "{:>25}: {:<30}{}\n".format(str(k), str(v), comment)
|
| 104 |
+
message += "----------------- End -------------------"
|
| 105 |
+
print(message)
|
| 106 |
+
|
| 107 |
+
# save to the disk
|
| 108 |
+
expr_dir = Path(opt.checkpoints_dir) / opt.name
|
| 109 |
+
util.mkdirs(expr_dir)
|
| 110 |
+
file_name = expr_dir / f"{opt.phase}_opt.txt"
|
| 111 |
+
with open(file_name, "wt") as opt_file:
|
| 112 |
+
opt_file.write(message)
|
| 113 |
+
opt_file.write("\n")
|
| 114 |
+
|
| 115 |
+
def parse(self):
|
| 116 |
+
"""Parse our options, create checkpoints directory suffix, and set up gpu device."""
|
| 117 |
+
opt = self.gather_options()
|
| 118 |
+
opt.isTrain = self.isTrain # train or test
|
| 119 |
+
|
| 120 |
+
# process opt.suffix
|
| 121 |
+
if opt.suffix:
|
| 122 |
+
suffix = ("_" + opt.suffix.format(**vars(opt))) if opt.suffix != "" else ""
|
| 123 |
+
opt.name = opt.name + suffix
|
| 124 |
+
|
| 125 |
+
self.print_options(opt)
|
| 126 |
+
self.opt = opt
|
| 127 |
+
return self.opt
|
options/test_options.py
ADDED
|
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from .base_options import BaseOptions
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
class TestOptions(BaseOptions):
|
| 5 |
+
"""This class includes test options.
|
| 6 |
+
|
| 7 |
+
It also includes shared options defined in BaseOptions.
|
| 8 |
+
"""
|
| 9 |
+
|
| 10 |
+
def initialize(self, parser):
|
| 11 |
+
parser = BaseOptions.initialize(self, parser) # define shared options
|
| 12 |
+
parser.add_argument('--results_dir', type=str, default='./results/', help='saves results here.')
|
| 13 |
+
parser.add_argument('--aspect_ratio', type=float, default=1.0, help='aspect ratio of result images')
|
| 14 |
+
parser.add_argument('--phase', type=str, default='test', help='train, val, test, etc')
|
| 15 |
+
# Dropout and Batchnorm has different behavioir during training and test.
|
| 16 |
+
parser.add_argument('--eval', action='store_true', help='use eval mode during test time.')
|
| 17 |
+
parser.add_argument('--num_test', type=int, default=50, help='how many test images to run')
|
| 18 |
+
# rewrite devalue values
|
| 19 |
+
parser.set_defaults(model='test')
|
| 20 |
+
# To avoid cropping, the load_size should be the same as crop_size
|
| 21 |
+
parser.set_defaults(load_size=parser.get_default('crop_size'))
|
| 22 |
+
self.isTrain = False
|
| 23 |
+
return parser
|
options/train_options.py
ADDED
|
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from .base_options import BaseOptions
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
class TrainOptions(BaseOptions):
|
| 5 |
+
"""This class includes training options.
|
| 6 |
+
|
| 7 |
+
It also includes shared options defined in BaseOptions.
|
| 8 |
+
"""
|
| 9 |
+
|
| 10 |
+
def initialize(self, parser):
|
| 11 |
+
parser = BaseOptions.initialize(self, parser)
|
| 12 |
+
# HTML visualization parameters
|
| 13 |
+
parser.add_argument('--display_freq', type=int, default=400, help='frequency of showing training results on screen')
|
| 14 |
+
parser.add_argument('--update_html_freq', type=int, default=1000, help='frequency of saving training results to html')
|
| 15 |
+
parser.add_argument('--print_freq', type=int, default=100, help='frequency of showing training results on console')
|
| 16 |
+
parser.add_argument('--no_html', action='store_true', help='do not save intermediate training results to [opt.checkpoints_dir]/[opt.name]/web/')
|
| 17 |
+
# network saving and loading parameters
|
| 18 |
+
parser.add_argument('--save_latest_freq', type=int, default=5000, help='frequency of saving the latest results')
|
| 19 |
+
parser.add_argument('--save_epoch_freq', type=int, default=5, help='frequency of saving checkpoints at the end of epochs')
|
| 20 |
+
parser.add_argument('--save_by_iter', action='store_true', help='whether saves model by iteration')
|
| 21 |
+
parser.add_argument('--continue_train', action='store_true', help='continue training: load the latest model')
|
| 22 |
+
parser.add_argument('--epoch_count', type=int, default=1, help='the starting epoch count, we save the model by <epoch_count>, <epoch_count>+<save_latest_freq>, ...')
|
| 23 |
+
parser.add_argument('--phase', type=str, default='train', help='train, val, test, etc')
|
| 24 |
+
# training parameters
|
| 25 |
+
parser.add_argument('--n_epochs', type=int, default=100, help='number of epochs with the initial learning rate')
|
| 26 |
+
parser.add_argument('--n_epochs_decay', type=int, default=100, help='number of epochs to linearly decay learning rate to zero')
|
| 27 |
+
parser.add_argument('--beta1', type=float, default=0.5, help='momentum term of adam')
|
| 28 |
+
parser.add_argument('--lr', type=float, default=0.0002, help='initial learning rate for adam')
|
| 29 |
+
parser.add_argument('--gan_mode', type=str, default='lsgan', help='the type of GAN objective. [vanilla| lsgan | wgangp]. vanilla GAN loss is the cross-entropy objective used in the original GAN paper.')
|
| 30 |
+
parser.add_argument('--pool_size', type=int, default=50, help='the size of image buffer that stores previously generated images')
|
| 31 |
+
parser.add_argument('--lr_policy', type=str, default='linear', help='learning rate policy. [linear | step | plateau | cosine]')
|
| 32 |
+
parser.add_argument('--lr_decay_iters', type=int, default=50, help='multiply by a gamma every lr_decay_iters iterations')
|
| 33 |
+
|
| 34 |
+
self.isTrain = True
|
| 35 |
+
return parser
|
requirements.txt
ADDED
|
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# requirements.txt
|
| 2 |
+
|
| 3 |
+
gradio
|
| 4 |
+
numpy
|
| 5 |
+
opencv-python
|
| 6 |
+
pillow
|
| 7 |
+
torch
|
| 8 |
+
torchvision
|
util/__init__.py
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
"""This package includes a miscellaneous collection of useful helper functions."""
|
util/__pycache__/__init__.cpython-312.pyc
ADDED
|
Binary file (284 Bytes). View file
|
|
|
util/__pycache__/html.cpython-312.pyc
ADDED
|
Binary file (5.5 kB). View file
|
|
|
util/__pycache__/image_pool.cpython-312.pyc
ADDED
|
Binary file (2.6 kB). View file
|
|
|
util/__pycache__/util.cpython-312.pyc
ADDED
|
Binary file (6.41 kB). View file
|
|
|
util/__pycache__/visualizer.cpython-312.pyc
ADDED
|
Binary file (10.1 kB). View file
|
|
|
util/get_data.py
ADDED
|
@@ -0,0 +1,107 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import print_function
|
| 2 |
+
from pathlib import Path
|
| 3 |
+
import tarfile
|
| 4 |
+
import requests
|
| 5 |
+
from warnings import warn
|
| 6 |
+
from zipfile import ZipFile
|
| 7 |
+
from bs4 import BeautifulSoup
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
class GetData(object):
|
| 11 |
+
"""A Python script for downloading CycleGAN or pix2pix datasets.
|
| 12 |
+
|
| 13 |
+
Parameters:
|
| 14 |
+
technique (str) -- One of: 'cyclegan' or 'pix2pix'.
|
| 15 |
+
verbose (bool) -- If True, print additional information.
|
| 16 |
+
|
| 17 |
+
Examples:
|
| 18 |
+
>>> from util.get_data import GetData
|
| 19 |
+
>>> gd = GetData(technique='cyclegan')
|
| 20 |
+
>>> new_data_path = gd.get(save_path='./datasets') # options will be displayed.
|
| 21 |
+
|
| 22 |
+
Alternatively, You can use bash scripts: 'scripts/download_pix2pix_model.sh'
|
| 23 |
+
and 'scripts/download_cyclegan_model.sh'.
|
| 24 |
+
"""
|
| 25 |
+
|
| 26 |
+
def __init__(self, technique="cyclegan", verbose=True):
|
| 27 |
+
url_dict = {
|
| 28 |
+
"pix2pix": "http://efrosgans.eecs.berkeley.edu/pix2pix/datasets/",
|
| 29 |
+
"cyclegan": "http://efrosgans.eecs.berkeley.edu/pix2pix/datasets",
|
| 30 |
+
}
|
| 31 |
+
self.url = url_dict.get(technique.lower())
|
| 32 |
+
self._verbose = verbose
|
| 33 |
+
|
| 34 |
+
def _print(self, text):
|
| 35 |
+
if self._verbose:
|
| 36 |
+
print(text)
|
| 37 |
+
|
| 38 |
+
@staticmethod
|
| 39 |
+
def _get_options(r):
|
| 40 |
+
soup = BeautifulSoup(r.text, "lxml")
|
| 41 |
+
options = [h.text for h in soup.find_all("a", href=True) if h.text.endswith((".zip", "tar.gz"))]
|
| 42 |
+
return options
|
| 43 |
+
|
| 44 |
+
def _present_options(self):
|
| 45 |
+
r = requests.get(self.url)
|
| 46 |
+
options = self._get_options(r)
|
| 47 |
+
print("Options:\n")
|
| 48 |
+
for i, o in enumerate(options):
|
| 49 |
+
print("{0}: {1}".format(i, o))
|
| 50 |
+
choice = input("\nPlease enter the number of the " "dataset above you wish to download:")
|
| 51 |
+
return options[int(choice)]
|
| 52 |
+
|
| 53 |
+
def _download_data(self, dataset_url, save_path):
|
| 54 |
+
save_path = Path(save_path)
|
| 55 |
+
if not save_path.is_dir():
|
| 56 |
+
save_path.mkdir(parents=True, exist_ok=True)
|
| 57 |
+
|
| 58 |
+
base = Path(dataset_url).name
|
| 59 |
+
temp_save_path = save_path / base
|
| 60 |
+
|
| 61 |
+
with open(temp_save_path, "wb") as f:
|
| 62 |
+
r = requests.get(dataset_url)
|
| 63 |
+
f.write(r.content)
|
| 64 |
+
|
| 65 |
+
if base.endswith(".tar.gz"):
|
| 66 |
+
obj = tarfile.open(temp_save_path)
|
| 67 |
+
elif base.endswith(".zip"):
|
| 68 |
+
obj = ZipFile(temp_save_path, "r")
|
| 69 |
+
else:
|
| 70 |
+
raise ValueError("Unknown File Type: {0}.".format(base))
|
| 71 |
+
|
| 72 |
+
self._print("Unpacking Data...")
|
| 73 |
+
obj.extractall(save_path)
|
| 74 |
+
obj.close()
|
| 75 |
+
temp_save_path.unlink()
|
| 76 |
+
|
| 77 |
+
def get(self, save_path, dataset=None):
|
| 78 |
+
"""
|
| 79 |
+
|
| 80 |
+
Download a dataset.
|
| 81 |
+
|
| 82 |
+
Parameters:
|
| 83 |
+
save_path (str) -- A directory to save the data to.
|
| 84 |
+
dataset (str) -- (optional). A specific dataset to download.
|
| 85 |
+
Note: this must include the file extension.
|
| 86 |
+
If None, options will be presented for you
|
| 87 |
+
to choose from.
|
| 88 |
+
|
| 89 |
+
Returns:
|
| 90 |
+
save_path_full (str) -- the absolute path to the downloaded data.
|
| 91 |
+
|
| 92 |
+
"""
|
| 93 |
+
if dataset is None:
|
| 94 |
+
selected_dataset = self._present_options()
|
| 95 |
+
else:
|
| 96 |
+
selected_dataset = dataset
|
| 97 |
+
|
| 98 |
+
save_path_full = Path(save_path) / selected_dataset.split(".")[0]
|
| 99 |
+
|
| 100 |
+
if save_path_full.is_dir():
|
| 101 |
+
warn(f"\n'{save_path_full}' already exists. Voiding Download.")
|
| 102 |
+
else:
|
| 103 |
+
self._print("Downloading Data...")
|
| 104 |
+
url = f"{self.url}/{selected_dataset}"
|
| 105 |
+
self._download_data(url, save_path=save_path)
|
| 106 |
+
|
| 107 |
+
return save_path_full.resolve()
|
util/html.py
ADDED
|
@@ -0,0 +1,84 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import dominate
|
| 2 |
+
from dominate.tags import meta, h3, table, tr, td, p, a, img, br
|
| 3 |
+
from pathlib import Path
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
class HTML:
|
| 7 |
+
"""This HTML class allows us to save images and write texts into a single HTML file.
|
| 8 |
+
|
| 9 |
+
It consists of functions such as <add_header> (add a text header to the HTML file),
|
| 10 |
+
<add_images> (add a row of images to the HTML file), and <save> (save the HTML to the disk).
|
| 11 |
+
It is based on Python library 'dominate', a Python library for creating and manipulating HTML documents using a DOM API.
|
| 12 |
+
"""
|
| 13 |
+
|
| 14 |
+
def __init__(self, web_dir, title, refresh=0):
|
| 15 |
+
"""Initialize the HTML classes
|
| 16 |
+
|
| 17 |
+
Parameters:
|
| 18 |
+
web_dir (str) -- a directory that stores the webpage. HTML file will be created at <web_dir>/index.html; images will be saved at <web_dir/images/
|
| 19 |
+
title (str) -- the webpage name
|
| 20 |
+
refresh (int) -- how often the website refresh itself; if 0; no refreshing
|
| 21 |
+
"""
|
| 22 |
+
self.title = title
|
| 23 |
+
self.web_dir = Path(web_dir)
|
| 24 |
+
self.img_dir = self.web_dir / "images"
|
| 25 |
+
|
| 26 |
+
self.web_dir.mkdir(parents=True, exist_ok=True)
|
| 27 |
+
self.img_dir.mkdir(parents=True, exist_ok=True)
|
| 28 |
+
|
| 29 |
+
self.doc = dominate.document(title=title)
|
| 30 |
+
if refresh > 0:
|
| 31 |
+
with self.doc.head:
|
| 32 |
+
meta(http_equiv="refresh", content=str(refresh))
|
| 33 |
+
|
| 34 |
+
def get_image_dir(self):
|
| 35 |
+
"""Return the directory that stores images"""
|
| 36 |
+
return self.img_dir
|
| 37 |
+
|
| 38 |
+
def add_header(self, text):
|
| 39 |
+
"""Insert a header to the HTML file
|
| 40 |
+
|
| 41 |
+
Parameters:
|
| 42 |
+
text (str) -- the header text
|
| 43 |
+
"""
|
| 44 |
+
with self.doc:
|
| 45 |
+
h3(text)
|
| 46 |
+
|
| 47 |
+
def add_images(self, ims, txts, links, width=400):
|
| 48 |
+
"""add images to the HTML file
|
| 49 |
+
|
| 50 |
+
Parameters:
|
| 51 |
+
ims (str list) -- a list of image paths
|
| 52 |
+
txts (str list) -- a list of image names shown on the website
|
| 53 |
+
links (str list) -- a list of hyperref links; when you click an image, it will redirect you to a new page
|
| 54 |
+
"""
|
| 55 |
+
self.t = table(border=1, style="table-layout: fixed;") # Insert a table
|
| 56 |
+
self.doc.add(self.t)
|
| 57 |
+
with self.t:
|
| 58 |
+
with tr():
|
| 59 |
+
for im, txt, link in zip(ims, txts, links):
|
| 60 |
+
with td(style="word-wrap: break-word;", halign="center", valign="top"):
|
| 61 |
+
with p():
|
| 62 |
+
with a(href=Path("images") / link):
|
| 63 |
+
img(style=f"width:{width}px", src=Path("images") / im)
|
| 64 |
+
br()
|
| 65 |
+
p(txt)
|
| 66 |
+
|
| 67 |
+
def save(self):
|
| 68 |
+
"""save the current content to the HMTL file"""
|
| 69 |
+
html_file = self.web_dir / "index.html"
|
| 70 |
+
with open(html_file, "wt") as f:
|
| 71 |
+
f.write(self.doc.render())
|
| 72 |
+
|
| 73 |
+
|
| 74 |
+
if __name__ == "__main__": # we show an example usage here.
|
| 75 |
+
html = HTML("web/", "test_html")
|
| 76 |
+
html.add_header("hello world")
|
| 77 |
+
|
| 78 |
+
ims, txts, links = [], [], []
|
| 79 |
+
for n in range(4):
|
| 80 |
+
ims.append(f"image_{n}.png")
|
| 81 |
+
txts.append(f"text_{n}")
|
| 82 |
+
links.append(f"image_{n}.png")
|
| 83 |
+
html.add_images(ims, txts, links)
|
| 84 |
+
html.save()
|
util/image_pool.py
ADDED
|
@@ -0,0 +1,54 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import random
|
| 2 |
+
import torch
|
| 3 |
+
|
| 4 |
+
|
| 5 |
+
class ImagePool:
|
| 6 |
+
"""This class implements an image buffer that stores previously generated images.
|
| 7 |
+
|
| 8 |
+
This buffer enables us to update discriminators using a history of generated images
|
| 9 |
+
rather than the ones produced by the latest generators.
|
| 10 |
+
"""
|
| 11 |
+
|
| 12 |
+
def __init__(self, pool_size):
|
| 13 |
+
"""Initialize the ImagePool class
|
| 14 |
+
|
| 15 |
+
Parameters:
|
| 16 |
+
pool_size (int) -- the size of image buffer, if pool_size=0, no buffer will be created
|
| 17 |
+
"""
|
| 18 |
+
self.pool_size = pool_size
|
| 19 |
+
if self.pool_size > 0: # create an empty pool
|
| 20 |
+
self.num_imgs = 0
|
| 21 |
+
self.images = []
|
| 22 |
+
|
| 23 |
+
def query(self, images):
|
| 24 |
+
"""Return an image from the pool.
|
| 25 |
+
|
| 26 |
+
Parameters:
|
| 27 |
+
images: the latest generated images from the generator
|
| 28 |
+
|
| 29 |
+
Returns images from the buffer.
|
| 30 |
+
|
| 31 |
+
By 50/100, the buffer will return input images.
|
| 32 |
+
By 50/100, the buffer will return images previously stored in the buffer,
|
| 33 |
+
and insert the current images to the buffer.
|
| 34 |
+
"""
|
| 35 |
+
if self.pool_size == 0: # if the buffer size is 0, do nothing
|
| 36 |
+
return images
|
| 37 |
+
return_images = []
|
| 38 |
+
for image in images:
|
| 39 |
+
image = torch.unsqueeze(image.data, 0)
|
| 40 |
+
if self.num_imgs < self.pool_size: # if the buffer is not full; keep inserting current images to the buffer
|
| 41 |
+
self.num_imgs = self.num_imgs + 1
|
| 42 |
+
self.images.append(image)
|
| 43 |
+
return_images.append(image)
|
| 44 |
+
else:
|
| 45 |
+
p = random.uniform(0, 1)
|
| 46 |
+
if p > 0.5: # by 50% chance, the buffer will return a previously stored image, and insert the current image into the buffer
|
| 47 |
+
random_id = random.randint(0, self.pool_size - 1) # randint is inclusive
|
| 48 |
+
tmp = self.images[random_id].clone()
|
| 49 |
+
self.images[random_id] = image
|
| 50 |
+
return_images.append(tmp)
|
| 51 |
+
else: # by another 50% chance, the buffer will return the current image
|
| 52 |
+
return_images.append(image)
|
| 53 |
+
return_images = torch.cat(return_images, 0) # collect all the images and return
|
| 54 |
+
return return_images
|
util/util.py
ADDED
|
@@ -0,0 +1,130 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""This module contains simple helper functions"""
|
| 2 |
+
|
| 3 |
+
from __future__ import print_function
|
| 4 |
+
import torch
|
| 5 |
+
import numpy as np
|
| 6 |
+
from PIL import Image
|
| 7 |
+
from pathlib import Path
|
| 8 |
+
import torch.distributed as dist
|
| 9 |
+
import os
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
def tensor2im(input_image, imtype=np.uint8):
|
| 13 |
+
""" "Converts a Tensor array into a numpy image array.
|
| 14 |
+
|
| 15 |
+
Parameters:
|
| 16 |
+
input_image (tensor) -- the input image tensor array
|
| 17 |
+
imtype (type) -- the desired type of the converted numpy array
|
| 18 |
+
"""
|
| 19 |
+
if not isinstance(input_image, np.ndarray):
|
| 20 |
+
if isinstance(input_image, torch.Tensor): # get the data from a variable
|
| 21 |
+
image_tensor = input_image.data
|
| 22 |
+
else:
|
| 23 |
+
return input_image
|
| 24 |
+
image_numpy = image_tensor[0].cpu().float().numpy() # convert it into a numpy array
|
| 25 |
+
if image_numpy.shape[0] == 1: # grayscale to RGB
|
| 26 |
+
image_numpy = np.tile(image_numpy, (3, 1, 1))
|
| 27 |
+
image_numpy = (np.transpose(image_numpy, (1, 2, 0)) + 1) / 2.0 * 255.0 # post-processing: tranpose and scaling
|
| 28 |
+
else: # if it is a numpy array, do nothing
|
| 29 |
+
image_numpy = input_image
|
| 30 |
+
return image_numpy.astype(imtype)
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
def diagnose_network(net, name="network"):
|
| 34 |
+
"""Calculate and print the mean of average absolute(gradients)
|
| 35 |
+
|
| 36 |
+
Parameters:
|
| 37 |
+
net (torch network) -- Torch network
|
| 38 |
+
name (str) -- the name of the network
|
| 39 |
+
"""
|
| 40 |
+
mean = 0.0
|
| 41 |
+
count = 0
|
| 42 |
+
for param in net.parameters():
|
| 43 |
+
if param.grad is not None:
|
| 44 |
+
mean += torch.mean(torch.abs(param.grad.data))
|
| 45 |
+
count += 1
|
| 46 |
+
if count > 0:
|
| 47 |
+
mean = mean / count
|
| 48 |
+
print(name)
|
| 49 |
+
print(mean)
|
| 50 |
+
|
| 51 |
+
|
| 52 |
+
# initialize ddp
|
| 53 |
+
def init_ddp():
|
| 54 |
+
# Initialize DDP if LOCAL_RANK is set
|
| 55 |
+
is_ddp = "WORLD_SIZE" in os.environ and int(os.environ["WORLD_SIZE"]) > 1
|
| 56 |
+
|
| 57 |
+
if is_ddp:
|
| 58 |
+
if not dist.is_initialized():
|
| 59 |
+
dist.init_process_group(backend="nccl")
|
| 60 |
+
local_rank = int(os.environ["LOCAL_RANK"])
|
| 61 |
+
device = torch.device(f"cuda:{local_rank}")
|
| 62 |
+
torch.cuda.set_device(local_rank)
|
| 63 |
+
elif torch.cuda.is_available():
|
| 64 |
+
device = torch.device("cuda:0")
|
| 65 |
+
torch.cuda.set_device(0)
|
| 66 |
+
else:
|
| 67 |
+
device = torch.device("cpu")
|
| 68 |
+
print(f"Initialized with device {device}")
|
| 69 |
+
return device
|
| 70 |
+
|
| 71 |
+
|
| 72 |
+
# cleanup ddp
|
| 73 |
+
def cleanup_ddp():
|
| 74 |
+
if dist.is_initialized():
|
| 75 |
+
dist.destroy_process_group()
|
| 76 |
+
|
| 77 |
+
|
| 78 |
+
def save_image(image_numpy, image_path, aspect_ratio=1.0):
|
| 79 |
+
"""Save a numpy image to the disk
|
| 80 |
+
|
| 81 |
+
Parameters:
|
| 82 |
+
image_numpy (numpy array) -- input numpy array
|
| 83 |
+
image_path (str) -- the path of the image
|
| 84 |
+
"""
|
| 85 |
+
|
| 86 |
+
image_pil = Image.fromarray(image_numpy)
|
| 87 |
+
h, w, _ = image_numpy.shape
|
| 88 |
+
|
| 89 |
+
if aspect_ratio > 1.0:
|
| 90 |
+
image_pil = image_pil.resize((h, int(w * aspect_ratio)), Image.BICUBIC)
|
| 91 |
+
if aspect_ratio < 1.0:
|
| 92 |
+
image_pil = image_pil.resize((int(h / aspect_ratio), w), Image.BICUBIC)
|
| 93 |
+
image_pil.save(image_path)
|
| 94 |
+
|
| 95 |
+
|
| 96 |
+
def print_numpy(x, val=True, shp=False):
|
| 97 |
+
"""Print the mean, min, max, median, std, and size of a numpy array
|
| 98 |
+
|
| 99 |
+
Parameters:
|
| 100 |
+
val (bool) -- if print the values of the numpy array
|
| 101 |
+
shp (bool) -- if print the shape of the numpy array
|
| 102 |
+
"""
|
| 103 |
+
x = x.astype(np.float64)
|
| 104 |
+
if shp:
|
| 105 |
+
print("shape,", x.shape)
|
| 106 |
+
if val:
|
| 107 |
+
x = x.flatten()
|
| 108 |
+
print("mean = %3.3f, min = %3.3f, max = %3.3f, median = %3.3f, std=%3.3f" % (np.mean(x), np.min(x), np.max(x), np.median(x), np.std(x)))
|
| 109 |
+
|
| 110 |
+
|
| 111 |
+
def mkdirs(paths):
|
| 112 |
+
"""create empty directories if they don't exist
|
| 113 |
+
|
| 114 |
+
Parameters:
|
| 115 |
+
paths (str list) -- a list of directory paths
|
| 116 |
+
"""
|
| 117 |
+
if isinstance(paths, list) and not isinstance(paths, str):
|
| 118 |
+
for path in paths:
|
| 119 |
+
mkdir(path)
|
| 120 |
+
else:
|
| 121 |
+
mkdir(paths)
|
| 122 |
+
|
| 123 |
+
|
| 124 |
+
def mkdir(path):
|
| 125 |
+
"""create a single empty directory if it didn't exist
|
| 126 |
+
|
| 127 |
+
Parameters:
|
| 128 |
+
path (str) -- a single directory path
|
| 129 |
+
"""
|
| 130 |
+
Path(path).mkdir(parents=True, exist_ok=True)
|