Commit
·
f1d6080
1
Parent(s):
fe2a1de
Add production codes
Browse files- app.py +103 -0
- requirements.txt +59 -0
- src/color_controls.py +89 -0
- src/cyano.py +151 -0
- src/get_patch_rgb.py +3 -0
- src/mono_alternative.py +213 -0
- src/prediction.py +68 -0
- src/utils.py +26 -0
app.py
ADDED
@@ -0,0 +1,103 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from flask import Flask, request, jsonify, render_template, make_response
|
2 |
+
from flask_cors import CORS
|
3 |
+
import numpy as np
|
4 |
+
import cv2
|
5 |
+
|
6 |
+
from src.color_controls import control_kelvin, control_contrast, control_HSV
|
7 |
+
from src.cyano import Cyanotype
|
8 |
+
from src.prediction import predict_img, optimize_img, update_patch
|
9 |
+
from src.utils import cv_to_pil, pil_to_cv
|
10 |
+
|
11 |
+
UPLOAD_FOLDER = './uploads'
|
12 |
+
app = Flask(__name__, template_folder='/client', static_folder='/client')
|
13 |
+
|
14 |
+
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
|
15 |
+
|
16 |
+
CORS(
|
17 |
+
app,
|
18 |
+
supports_credentials=True
|
19 |
+
)
|
20 |
+
|
21 |
+
|
22 |
+
@app.route('/api/process', methods=['POST'])
|
23 |
+
def process():
|
24 |
+
imgfile = request.files['img']
|
25 |
+
img_array = np.asarray(bytearray(imgfile.stream.read()), dtype=np.uint8)
|
26 |
+
img = cv2.imdecode(img_array, cv2.IMREAD_COLOR)
|
27 |
+
|
28 |
+
data = request.form
|
29 |
+
hue = int(data["hue"])
|
30 |
+
saturation = float(data["saturation"])
|
31 |
+
lightness = float(data["lightness"])
|
32 |
+
contrast = int(data["contrast"])
|
33 |
+
kelvin = int(data["kelvin"])
|
34 |
+
|
35 |
+
img = control_contrast(img, contrast)
|
36 |
+
img = control_HSV(img, hue, saturation, lightness)
|
37 |
+
|
38 |
+
img_pil = cv_to_pil(img)
|
39 |
+
img_pil = control_kelvin(img_pil, kelvin)
|
40 |
+
img = pil_to_cv(img_pil)
|
41 |
+
|
42 |
+
response = make_response(cv2.imencode('.png', img)[1].tobytes())
|
43 |
+
response.headers.set('Content-Type', 'image/png')
|
44 |
+
|
45 |
+
return response
|
46 |
+
|
47 |
+
|
48 |
+
@app.route('/api/predict/<process_name>', methods=['POST'])
|
49 |
+
def predict(process_name):
|
50 |
+
if not process_name in ['cyanotype_mono', 'cyanotype_full', 'salt', 'platinum']:
|
51 |
+
return jsonify({ 'error': 'process name is invalid' })
|
52 |
+
|
53 |
+
imgfile = request.files['img']
|
54 |
+
img_array = np.asarray(bytearray(imgfile.stream.read()), dtype=np.uint8)
|
55 |
+
img = cv2.imdecode(img_array, cv2.IMREAD_COLOR)
|
56 |
+
|
57 |
+
if 'colorpatch' in request.files:
|
58 |
+
patchfile = request.files['colorpatch']
|
59 |
+
patch_array = np.asarray(bytearray(patchfile.stream.read()), dtype=np.uint8)
|
60 |
+
colorpatch = cv2.imdecode(colorpatch_array, cv2.IMREAD_COLOR)
|
61 |
+
update_patch(process_name, colorpatch)
|
62 |
+
|
63 |
+
img = predict_img(process_name, img)
|
64 |
+
|
65 |
+
response = make_response(cv2.imencode('.png', img)[1].tobytes())
|
66 |
+
response.headers.set('Content-Type', 'image/png')
|
67 |
+
|
68 |
+
return response
|
69 |
+
|
70 |
+
|
71 |
+
@app.route('/api/optimize/<process_name>', methods=['POST'])
|
72 |
+
def optimize(process_name):
|
73 |
+
if not process_name in ['cyanotype_mono', 'cyanotype_full', 'salt', 'platinum']:
|
74 |
+
return jsonify({ 'error': 'process name is invalid' })
|
75 |
+
|
76 |
+
imgfile = request.files['img']
|
77 |
+
img_array = np.asarray(bytearray(imgfile.stream.read()), dtype=np.uint8)
|
78 |
+
img = cv2.imdecode(img_array, cv2.IMREAD_COLOR)
|
79 |
+
|
80 |
+
if 'colorpatch' in request.files:
|
81 |
+
patchfile = request.files['colorpatch']
|
82 |
+
patch_array = np.asarray(bytearray(patchfile.stream.read()), dtype=np.uint8)
|
83 |
+
colorpatch = cv2.imdecode(colorpatch_array, cv2.IMREAD_COLOR)
|
84 |
+
update_patch(process_name, colorpatch)
|
85 |
+
|
86 |
+
(opt_img, preview_img) = optimize_img(process_name, img)
|
87 |
+
|
88 |
+
h, w = preview_img.shape[:2]
|
89 |
+
if process_name.endswith('full'):
|
90 |
+
opt_img = np.reshape(opt_img, (h, w, 3))
|
91 |
+
else:
|
92 |
+
opt_img = np.reshape(opt_img, (h, w, 1))
|
93 |
+
opt_img = np.array([[[i[0]] * 3 for i in j] for j in opt_img], dtype=np.uint8)
|
94 |
+
|
95 |
+
img = cv2.hconcat([opt_img, preview_img])
|
96 |
+
response = make_response(cv2.imencode('.png', img)[1].tobytes())
|
97 |
+
response.headers.set('Content-Type', 'image/png')
|
98 |
+
|
99 |
+
return response
|
100 |
+
|
101 |
+
|
102 |
+
if __name__ == "__main__":
|
103 |
+
app.run(debug=True, port=8000)
|
requirements.txt
ADDED
@@ -0,0 +1,59 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
-i https://pypi.org/simple
|
2 |
+
absl-py==1.2.0; python_version >= '3.6'
|
3 |
+
astunparse==1.6.3
|
4 |
+
cachetools==5.2.0; python_version ~= '3.7'
|
5 |
+
certifi==2022.6.15; python_version >= '3.6'
|
6 |
+
charset-normalizer==2.1.0; python_version >= '3.6'
|
7 |
+
click==8.1.3; python_version >= '3.7'
|
8 |
+
flask==2.2.2
|
9 |
+
flask-cors==3.0.10
|
10 |
+
flatbuffers==1.12
|
11 |
+
gast==0.4.0; python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'
|
12 |
+
google-auth==2.10.0; python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4, 3.5'
|
13 |
+
google-auth-oauthlib==0.4.6; python_version >= '3.6'
|
14 |
+
google-pasta==0.2.0
|
15 |
+
grpcio==1.47.0; python_version >= '3.6'
|
16 |
+
gunicorn==20.1.0
|
17 |
+
h5py==3.7.0; python_version >= '3.7'
|
18 |
+
idna==3.3; python_version >= '3.5'
|
19 |
+
importlib-metadata==4.12.0; python_version < '3.10'
|
20 |
+
itsdangerous==2.1.2; python_version >= '3.7'
|
21 |
+
jinja2==3.1.2; python_version >= '3.7'
|
22 |
+
joblib==1.1.0; python_version >= '3.6'
|
23 |
+
keras==2.9.0
|
24 |
+
keras-preprocessing==1.1.2
|
25 |
+
libclang==14.0.6
|
26 |
+
markdown==3.4.1; python_version >= '3.7'
|
27 |
+
markupsafe==2.1.1; python_version >= '3.7'
|
28 |
+
numpy==1.21.6
|
29 |
+
oauthlib==3.2.0; python_version >= '3.6'
|
30 |
+
opencv-python==4.6.0.66
|
31 |
+
opt-einsum==3.3.0; python_version >= '3.5'
|
32 |
+
packaging==21.3; python_version >= '3.6'
|
33 |
+
pandas==1.4.2
|
34 |
+
pillow==9.2.0
|
35 |
+
protobuf==3.19.4; python_version >= '3.5'
|
36 |
+
pyasn1==0.4.8
|
37 |
+
pyasn1-modules==0.2.8
|
38 |
+
pyparsing==3.0.9; python_full_version >= '3.6.8'
|
39 |
+
requests==2.28.1; python_version >= '3.7' and python_version < '4'
|
40 |
+
requests-oauthlib==1.3.1; python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'
|
41 |
+
rsa==4.9; python_version >= '3.6'
|
42 |
+
scikit-learn==1.0.2
|
43 |
+
scipy==1.7.3; python_version < '3.11' and python_version >= '3.7'
|
44 |
+
setuptools==65.0.0; python_version >= '3.7'
|
45 |
+
six==1.16.0; python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'
|
46 |
+
tensorboard==2.9.1; python_version >= '3.6'
|
47 |
+
tensorboard-data-server==0.6.1; python_version >= '3.6'
|
48 |
+
tensorboard-plugin-wit==1.8.1
|
49 |
+
tensorflow==2.9.1
|
50 |
+
tensorflow-estimator==2.9.0; python_version >= '3.7'
|
51 |
+
tensorflow-io-gcs-filesystem==0.26.0; python_version < '3.11' and python_version >= '3.7'
|
52 |
+
termcolor==1.1.0
|
53 |
+
threadpoolctl==3.1.0; python_version >= '3.6'
|
54 |
+
typing-extensions==4.3.0; python_version >= '3.7'
|
55 |
+
urllib3==1.26.11; python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4, 3.5' and python_version < '4'
|
56 |
+
werkzeug==2.2.2; python_version >= '3.7'
|
57 |
+
wheel==0.37.1; python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4'
|
58 |
+
wrapt==1.14.1; python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4'
|
59 |
+
zipp==3.8.1; python_version >= '3.7'
|
src/color_controls.py
ADDED
@@ -0,0 +1,89 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import cv2
|
2 |
+
import numpy as np
|
3 |
+
import math
|
4 |
+
|
5 |
+
# HSV(色相,彩度,明度)を制御する関数
|
6 |
+
def control_HSV(img, h_deg, s_mag, v_mag):
|
7 |
+
img_hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
|
8 |
+
print(h_deg, s_mag, v_mag)
|
9 |
+
# HSVの回転
|
10 |
+
img_hsv[:,:,(0)] = img_hsv[:,:,(0)] + h_deg
|
11 |
+
img_hsv[:,:,(1)] = img_hsv[:,:,(1)] * s_mag
|
12 |
+
img_hsv[:,:,(2)] = img_hsv[:,:,(2)] * v_mag
|
13 |
+
# HSV to RGB
|
14 |
+
img_bgr = cv2.cvtColor(img_hsv, cv2.COLOR_HSV2BGR)
|
15 |
+
return img_bgr
|
16 |
+
|
17 |
+
# コントラストを制御する関数
|
18 |
+
def control_contrast(img, contrast):
|
19 |
+
#コントラスト調整ファクター
|
20 |
+
factor = (259 *(contrast + 255)) / (255 *(259 - contrast))
|
21 |
+
#float型に変換
|
22 |
+
newImage = np.array(img, dtype = 'float64')
|
23 |
+
#コントラスト調整。(0以下 or 255以上)はクリッピング
|
24 |
+
newImage = np.clip((newImage[:,:,:] - 128) * factor + 128, 0, 255)
|
25 |
+
#int型に戻す
|
26 |
+
newImage = np.array(newImage, dtype = 'uint8')
|
27 |
+
|
28 |
+
return newImage
|
29 |
+
|
30 |
+
|
31 |
+
def __clamp(value: float, min_val: int = 0, max_val: int = 255) -> int:
|
32 |
+
# use rounding to better represent values between max and min
|
33 |
+
return int(round(max(min(value, max_val), min_val)))
|
34 |
+
|
35 |
+
|
36 |
+
def kelvin_to_rgb(kelvin):
|
37 |
+
temperature = kelvin / 100.0
|
38 |
+
|
39 |
+
if temperature < 66.0:
|
40 |
+
red = 255
|
41 |
+
else:
|
42 |
+
# a + b x + c Log[x] /.
|
43 |
+
# {a -> 351.97690566805693`,
|
44 |
+
# b -> 0.114206453784165`,
|
45 |
+
# c -> -40.25366309332127
|
46 |
+
# x -> (kelvin/100) - 55}
|
47 |
+
red = temperature - 55.0
|
48 |
+
red = 351.97690566805693 + 0.114206453784165 * red - 40.25366309332127 * math.log(red)
|
49 |
+
|
50 |
+
# Calculate green
|
51 |
+
if temperature < 66.0:
|
52 |
+
# a + b x + c Log[x] /.
|
53 |
+
# {a -> -155.25485562709179`,
|
54 |
+
# b -> -0.44596950469579133`,
|
55 |
+
# c -> 104.49216199393888`,
|
56 |
+
# x -> (kelvin/100) - 2}
|
57 |
+
green = temperature - 2
|
58 |
+
green = -155.25485562709179 - 0.44596950469579133 * green + 104.49216199393888 * math.log(green)
|
59 |
+
else:
|
60 |
+
# a + b x + c Log[x] /.
|
61 |
+
# {a -> 325.4494125711974`,
|
62 |
+
# b -> 0.07943456536662342`,
|
63 |
+
# c -> -28.0852963507957`,
|
64 |
+
# x -> (kelvin/100) - 50}
|
65 |
+
green = temperature - 50.0
|
66 |
+
green = 325.4494125711974 + 0.07943456536662342 * green - 28.0852963507957 * math.log(green)
|
67 |
+
|
68 |
+
# Calculate blue
|
69 |
+
if temperature >= 66.0:
|
70 |
+
blue = 255
|
71 |
+
elif temperature <= 20.0:
|
72 |
+
blue = 0
|
73 |
+
else:
|
74 |
+
# a + b x + c Log[x] /.
|
75 |
+
# {a -> -254.76935184120902`,
|
76 |
+
# b -> 0.8274096064007395`,
|
77 |
+
# c -> 115.67994401066147`,
|
78 |
+
# x -> kelvin/100 - 10}
|
79 |
+
blue = temperature - 10
|
80 |
+
blue = -254.76935184120902 + 0.8274096064007395 * blue + 115.67994401066147 * math.log(blue)
|
81 |
+
|
82 |
+
return __clamp(red, 0, 255), __clamp(blue, 0, 255), __clamp(green, 0, 255)
|
83 |
+
|
84 |
+
def control_kelvin(img, kelvin):
|
85 |
+
r, g, b = kelvin_to_rgb(int(kelvin))
|
86 |
+
color_matrix = (r / 255.0, 0.0, 0.0, 0.0, 0.0, g / 255.0, 0.0, 0.0, 0.0, 0.0, b / 255.0, 0.0)
|
87 |
+
new_img = img.convert('RGB', color_matrix)
|
88 |
+
|
89 |
+
return new_img
|
src/cyano.py
ADDED
@@ -0,0 +1,151 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import cv2
|
2 |
+
import numpy as np
|
3 |
+
import tensorflow as tf
|
4 |
+
from sklearn.linear_model import LinearRegression
|
5 |
+
|
6 |
+
from src.get_patch_rgb import get_patch_rgb
|
7 |
+
|
8 |
+
|
9 |
+
class Cyanotype():
|
10 |
+
def __init__(self):
|
11 |
+
### set for patch ###
|
12 |
+
patch_img_path = './colorpatches/cyanotype_full.png'
|
13 |
+
self.update_patch(cv2.imread(patch_img_path, cv2.IMREAD_COLOR))
|
14 |
+
|
15 |
+
|
16 |
+
def update_patch(self, patch_img):
|
17 |
+
self.rgb_cyano = [[0,0,0]]
|
18 |
+
self.patch_img = patch_img
|
19 |
+
self.patch_img_height, self.patch_img_width, _ = self.patch_img.shape
|
20 |
+
|
21 |
+
self.crop_img()
|
22 |
+
|
23 |
+
self.patch_rgb = get_patch_rgb()
|
24 |
+
self.patch_rgb = np.array(self.patch_rgb)
|
25 |
+
# self.patch_rgb = self.patch_rgb/255.0
|
26 |
+
self.rgb_cyano = np.array(self.rgb_cyano)
|
27 |
+
# self.rgb_cyano = self.rgb_cyano/255.0
|
28 |
+
print(self.patch_rgb.shape)
|
29 |
+
print(self.rgb_cyano.shape)
|
30 |
+
|
31 |
+
self.fit_model()
|
32 |
+
|
33 |
+
|
34 |
+
def crop_img(self):
|
35 |
+
# 対象範囲を切り出し
|
36 |
+
h_pix = 14
|
37 |
+
w_pix = 21
|
38 |
+
w_ = round(self.patch_img_width/w_pix)
|
39 |
+
h_ = round(self.patch_img_height/h_pix)
|
40 |
+
for i in range(h_pix):
|
41 |
+
for j in range(w_pix):
|
42 |
+
boxFromX = j*w_+5 #対象範囲開始位置 X座標
|
43 |
+
boxFromY = i*h_+5 #対象範囲開始位置 Y座標
|
44 |
+
boxToX = ((j+1)*w_)-7 #対象範囲終了位置 X座標
|
45 |
+
boxToY = ((i+1)*h_)-7 #対象範囲終了位置 Y座標
|
46 |
+
# y:y+h, x:x+w の順で設定
|
47 |
+
imgBox = self.patch_img[boxFromY: boxToY, boxFromX: boxToX]
|
48 |
+
|
49 |
+
# RGB平均値を出力
|
50 |
+
# flattenで一次元化しmeanで平均を取得
|
51 |
+
b = imgBox.T[0].flatten().mean()
|
52 |
+
g = imgBox.T[1].flatten().mean()
|
53 |
+
r = imgBox.T[2].flatten().mean()
|
54 |
+
|
55 |
+
self.rgb_cyano.append([r,g,b])
|
56 |
+
|
57 |
+
del self.rgb_cyano[0]
|
58 |
+
|
59 |
+
|
60 |
+
def fit_model(self):
|
61 |
+
self.reg = LinearRegression().fit(self.patch_rgb, self.rgb_cyano)
|
62 |
+
self.reg.score(self.patch_rgb, self.rgb_cyano)
|
63 |
+
print('self.reg.coef_: ', self.reg.coef_)
|
64 |
+
print('self.reg.intercept_: ', self.reg.intercept_)
|
65 |
+
|
66 |
+
|
67 |
+
def predict_img(self, img):
|
68 |
+
print(img.shape)
|
69 |
+
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
|
70 |
+
# img = cv2.resize(img, dsize=None, fx=0.2, fy=0.2)
|
71 |
+
# img = cv2.resize(img, dsize=(100, 100))
|
72 |
+
# img = cv2.resize(img, dsize=(500, 500))
|
73 |
+
|
74 |
+
img_cyano = img @ self.reg.coef_.T + self.reg.intercept_
|
75 |
+
img_cyano = img_cyano.astype(np.uint8)
|
76 |
+
img_cyano = cv2.cvtColor(img_cyano, cv2.COLOR_RGB2BGR)
|
77 |
+
img_cyano = np.array(img_cyano)
|
78 |
+
print(img_cyano.shape)
|
79 |
+
|
80 |
+
return img_cyano
|
81 |
+
|
82 |
+
|
83 |
+
def MSE(self, imageA, imageB):
|
84 |
+
err = np.sum((imageA.astype("float") - imageB.astype("float")) ** 2)
|
85 |
+
err /= float(imageA.shape[0] * imageA.shape[1] * imageA.shape[2])
|
86 |
+
return err
|
87 |
+
|
88 |
+
|
89 |
+
# ---------- Optimization with Tensorflow ---------- #
|
90 |
+
def tf_optimize(self, img):
|
91 |
+
print('\n---------- Start Optimization ----------')
|
92 |
+
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
|
93 |
+
x = self.reg.coef_
|
94 |
+
A = img
|
95 |
+
target = img
|
96 |
+
A_height = A.shape[0]
|
97 |
+
A_width = A.shape[1]
|
98 |
+
cnt = A_height*A_width
|
99 |
+
print(A.shape)
|
100 |
+
print(cnt)
|
101 |
+
|
102 |
+
param_tf = tf.Variable(A, dtype=tf.float64)
|
103 |
+
coef_tf = tf.constant(x.T, dtype=tf.float64)
|
104 |
+
intercept_tf = tf.constant(self.reg.intercept_, dtype=tf.float64)
|
105 |
+
target_tf = tf.constant(target, dtype=tf.float64)
|
106 |
+
|
107 |
+
opt = tf.keras.optimizers.Adam(learning_rate=5.0)
|
108 |
+
# opt = tf.keras.optimizers.Adam(learning_rate=0.1)
|
109 |
+
|
110 |
+
def loss():
|
111 |
+
x0 = param_tf
|
112 |
+
x0 = tf.where(x0 > 255.0, 255.0, x0)
|
113 |
+
x0 = tf.where(x0 < 0.0, 0.0, x0)
|
114 |
+
x0 = tf.reshape(x0, [cnt, 3])
|
115 |
+
t_tf = target_tf
|
116 |
+
t_tf = tf.reshape(t_tf, [cnt, 3])
|
117 |
+
pred = tf.linalg.matmul(x0, coef_tf) + intercept_tf
|
118 |
+
diff = pred - t_tf
|
119 |
+
diff_2 = diff**2
|
120 |
+
pix_cnt = tf.size(t_tf)
|
121 |
+
pix_cnt = tf.cast(pix_cnt, dtype=tf.float64)
|
122 |
+
loss_val = tf.math.reduce_sum(diff_2) / pix_cnt
|
123 |
+
print('loss_val: ', loss_val)
|
124 |
+
return loss_val
|
125 |
+
|
126 |
+
for i in range(50):
|
127 |
+
step_count = opt.minimize(loss, [param_tf]).numpy()
|
128 |
+
# if step_count==10:
|
129 |
+
# break
|
130 |
+
print(step_count)
|
131 |
+
|
132 |
+
# ----- check optimized result ----- #
|
133 |
+
x0 = param_tf
|
134 |
+
x0 = tf.where(x0 > 255.0, 255.0, x0)
|
135 |
+
x0 = tf.where(x0 < 0.0, 0.0, x0)
|
136 |
+
x0 = x0.numpy()
|
137 |
+
x0 = x0.reshape((cnt, 3))
|
138 |
+
sim_opt = x0 @ x.T + self.reg.intercept_
|
139 |
+
sim_opt = sim_opt.reshape((A_height, A_width, 3))
|
140 |
+
sim_opt = sim_opt.astype(np.uint8)
|
141 |
+
sim_opt = cv2.cvtColor(sim_opt, cv2.COLOR_RGB2BGR)
|
142 |
+
|
143 |
+
return (x0, sim_opt)
|
144 |
+
|
145 |
+
|
146 |
+
if __name__ == '__main__':
|
147 |
+
img = cv2.imread('samples/input/00.jpg', cv2.IMREAD_COLOR)
|
148 |
+
cy = Cyanotype()
|
149 |
+
cy.fit_model()
|
150 |
+
cy.predict_img(img)
|
151 |
+
cy.tf_optimize(img)
|
src/get_patch_rgb.py
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
def get_patch_rgb():
|
2 |
+
rgb = [[0.0000,0.0000,0.0000],[25.000,0.0000,0.0000],[51.000,0.0000,0.0000],[76.000,0.0000,0.0000],[102.00,0.0000,0.0000],[127.00,0.0000,0.0000],[153.00,0.0000,0.0000],[178.00,0.0000,0.0000],[204.00,0.0000,0.0000],[229.00,0.0000,0.0000],[255.00,0.0000,0.0000],[0.0000,25.000,0.0000],[0.0000,51.000,0.0000],[0.0000,76.000,0.0000],[0.0000,102.00,0.0000],[0.0000,127.00,0.0000],[0.0000,153.00,0.0000],[0.0000,178.00,0.0000],[0.0000,204.00,0.0000],[0.0000,229.00,0.0000],[0.0000,255.00,0.0000],[0.0000,0.0000,25.000],[0.0000,0.0000,51.000],[0.0000,0.0000,76.000],[0.0000,0.0000,102.00] ,[0.0000,0.0000,127.00],[0.0000,0.0000,153.00] ,[0.0000,0.0000,178.00],[0.0000,0.0000,204.00],[0.0000,0.0000,229.00],[0.0000,0.0000,255.00] ,[255.00,255.00,0.0000],[255.00,0.0000,255.00],[0.0000,255.00,255.00],[255.00,255.00,255.00],[255.00,255.00,191.00],[255.00,255.00,127.00],[255.00,255.00,63.000],[255.00,191.00,255.00],[255.00,191.00,191.00],[255.00,191.00,127.00] ,[255.00,191.00,63.000],[255.00,191.00,0.0000] ,[255.00,127.00,255.00],[255.00,127.00,191.00],[255.00,127.00,127.00] ,[255.00,127.00,63.000],[255.00,127.00,0.0000] ,[255.00,63.000,255.00],[255.00,63.000,191.00],[255.00,63.000,127.00],[255.00,63.000,63.000],[255.00,63.000,0.0000],[255.00,0.0000,191.00],[255.00,0.0000,127.00],[255.00,0.0000,63.000],[191.00,255.00,255.00],[191.00,255.00,191.00],[191.00,255.00,127.00],[191.00,255.00,63.000],[191.00,255.00,0.0000],[191.00,191.00,255.00],[191.00,191.00,191.00],[191.00,191.00,127.00],[191.00,191.00,63.000],[191.00,191.00,0.0000],[191.00,127.00,255.00],[191.00,127.00,191.00],[191.00,127.00,127.00],[191.00,127.00,63.000],[191.00,127.00,0.0000],[191.00,63.000,255.00],[191.00,63.000,191.00],[191.00,63.000,127.00],[191.00,63.000,63.000],[191.00,63.000,0.0000],[191.00,0.0000,255.00] ,[191.00,0.0000,191.00] ,[191.00,0.0000,127.00] ,[191.00,0.0000,63.000] ,[191.00,0.0000,0.0000] ,[127.00,255.00,255.00] ,[127.00,255.00,191.00] ,[127.00,255.00,127.00] ,[127.00,255.00,63.000],[127.00,255.00,0.0000] ,[127.00,191.00,255.00] ,[127.00,191.00,191.00] ,[127.00,191.00,127.00] ,[127.00,191.00,63.000] ,[127.00,191.00,0.0000],[127.00,127.00,255.00],[127.00,127.00,191.00] ,[127.00,127.00,127.00] ,[127.00,127.00,63.000] ,[127.00,127.00,0.0000] ,[127.00,63.000,255.00] ,[127.00,63.000,191.00],[127.00,63.000,127.00],[127.00,63.000,63.000] ,[127.00,63.000,0.0000] ,[127.00,0.0000,255.00],[127.00,0.0000,191.00] ,[127.00,0.0000,127.00] ,[127.00,0.0000,63.000] ,[63.000,255.00,255.00],[63.000,255.00,191.00],[63.000,255.00,127.00] ,[63.000,255.00,63.000] ,[63.000,255.00,0.0000] ,[63.000,191.00,255.00],[63.000,191.00,191.00] ,[63.000,191.00,127.00] ,[63.000,191.00,63.000],[63.000,191.00,0.0000],[63.000,127.00,255.00] ,[63.000,127.00,191.00] ,[63.000,127.00,127.00] ,[63.000,127.00,63.000] ,[63.000,127.00,0.0000] ,[63.000,63.000,255.00] ,[63.000,63.000,191.00] ,[63.000,63.000,127.00] ,[63.000,63.000,63.000] ,[63.000,63.000,0.0000] ,[63.000,0.0000,255.00] ,[63.000,0.0000,191.00] ,[63.000,0.0000,127.00] ,[63.000,0.0000,63.000] ,[63.000,0.0000,0.0000] ,[0.0000,255.00,191.00],[0.0000,255.00,127.00] ,[0.0000,255.00,63.000] ,[0.0000,191.00,255.00] ,[0.0000,191.00,191.00] ,[0.0000,191.00,127.00] ,[0.0000,191.00,63.000] ,[0.0000,191.00,0.0000] ,[0.0000,127.00,255.00],[0.0000,127.00,191.00],[0.0000,127.00,127.00] ,[0.0000,127.00,63.000],[0.0000,63.000,255.00] ,[0.0000,63.000,191.00] ,[0.0000,63.000,127.00] ,[0.0000,63.000,63.000] ,[0.0000,63.000,0.0000],[0.0000,0.0000,191.00],[0.0000,0.0000,63.000] ,[234.00,255.00,255.00] ,[211.00,255.00,255.00] ,[158.00,255.00,255.00] ,[255.00,234.00,255.00],[255.00,211.00,255.00] ,[255.00,158.00,255.00] ,[255.00,255.00,234.00],[255.00,255.00,211.00],[255.00,255.00,158.00],[20.000,255.00,255.00],[43.000,255.00,255.00],[96.000,255.00,255.00],[0.0000,234.00,255.00],[0.0000,211.00,255.00],[0.0000,158.00,255.00],[0.0000,255.00,234.00],[0.0000,255.00,211.00],[0.0000,255.00,158.00],[234.00,0.0000,255.00],[211.00,0.0000,255.00],[158.00,0.0000,255.00],[255.00,20.000,255.00],[255.00,43.000,255.00],[255.00,96.000,255.00],[255.00,0.0000,234.00],[255.00,0.0000,211.00],[255.00,0.0000,158.00],[20.000,0.0000,255.00],[43.000,0.0000,255.00],[96.000,0.0000,255.00],[0.0000,20.000,255.00],[0.0000,43.000,255.00],[0.0000,96.000,255.00],[0.0000,0.0000,234.00],[0.0000,0.0000,211.00],[0.0000,0.0000,158.00],[234.00,255.00,0.0000],[211.00,255.00,0.0000],[158.00,255.00,0.0000],[255.00,234.00,0.0000],[255.00,211.00,0.0000],[255.00,158.00,0.0000],[255.00,255.00,20.000],[255.00,255.00,43.000],[255.00,255.00,96.000],[20.000,255.00,0.0000],[43.000,255.00,0.0000],[96.000,255.00,0.0000],[0.0000,234.00,0.0000],[0.0000,211.00,0.0000],[0.0000,158.00,0.0000],[0.0000,255.00,20.000],[0.0000,255.00,43.000],[0.0000,255.00,96.000],[234.00,0.0000,0.0000],[211.00,0.0000,0.0000],[158.00,0.0000,0.0000],[255.00,20.000,0.0000],[255.00,43.000,0.0000],[255.00,96.000,0.0000],[255.00,0.0000,20.000],[255.00,0.0000,43.000],[255.00,0.0000,96.000],[20.000,0.0000,0.0000],[43.000,0.0000,0.0000],[96.000,0.0000,0.0000],[0.0000,20.000,0.0000],[0.0000,43.000,0.0000],[0.0000,96.000,0.0000],[0.0000,0.0000,20.000],[0.0000,0.0000,43.000],[0.0000,0.0000,96.000],[30.000,30.000,30.000],[94.000,94.000,94.000],[158.00,158.00,158.00],[221.00,221.00,221.00],[15.000,0.0000,0.0000],[0.0000,15.000,0.0000],[0.0000,0.0000,15.000],[0.0000,15.000,15.000],[15.000,0.0000,15.000],[15.000,15.000,0.0000],[48.000,30.000,30.000],[30.000,48.000,30.000],[30.000,30.000,48.000],[30.000,48.000,48.000],[48.000,30.000,48.000],[48.000,48.000,30.000],[79.000,63.000,63.000],[63.000,79.000,63.000],[63.000,63.000,79.000],[63.000,79.000,79.000],[79.000,63.000,79.000],[79.000,79.000,63.000],[112.00,94.000,94.000],[94.000,112.00,94.000],[94.000,94.000,112.00],[94.000,112.00,112.00],[112.00,94.000,112.00],[112.00,112.00,94.000],[142.00,127.00,127.00],[127.00,142.00,127.00],[127.00,127.00,142.00],[127.00,142.00,142.00],[142.00,127.00,142.00],[142.00,142.00,127.00],[175.00,158.00,158.00],[158.00,175.00,158.00],[158.00,158.00,175.00],[158.00,175.00,175.00],[175.00,158.00,175.00],[175.00,175.00,158.00],[206.00,191.00,191.00],[191.00,206.00,191.00],[191.00,191.00,206.00],[191.00,206.00,206.00],[206.00,191.00,206.00],[206.00,206.00,191.00],[239.00,221.00,221.00],[221.00,239.00,221.00],[221.00,221.00,239.00],[221.00,239.00,239.00],[239.00,221.00,239.00],[239.00,239.00,221.00],[30.000,0.0000,0.0000],[0.0000,30.000,0.0000],[0.0000,0.0000,30.000],[0.0000,30.000,30.000],[30.000,0.0000,30.000],[30.000,30.000,0.0000],[94.000,63.000,63.000],[63.000,94.000,63.000],[63.000,63.000,94.000],[63.000,94.000,94.000],[255.00,255.00,255.00],[255.00,255.00,255.00],[255.00,255.00,255.00],[255.00,255.00,255.00],[255.00,255.00,255.00],[255.00,255.00,255.00],[255.00,255.00,255.00],[255.00,255.00,255.00],[255.00,255.00,255.00],[255.00,255.00,255.00],[255.00,255.00,255.00]]
|
3 |
+
return rgb
|
src/mono_alternative.py
ADDED
@@ -0,0 +1,213 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import cv2
|
3 |
+
import numpy as np
|
4 |
+
import tensorflow as tf
|
5 |
+
import pandas as pd
|
6 |
+
from sklearn.linear_model import LinearRegression
|
7 |
+
|
8 |
+
|
9 |
+
class MonoAlternative():
|
10 |
+
def __init__(
|
11 |
+
self,
|
12 |
+
process_name,
|
13 |
+
debug = False,
|
14 |
+
):
|
15 |
+
patch_img_path = f'./colorpatches/{process_name}.png'
|
16 |
+
self.debug = debug
|
17 |
+
self.update_patch(cv2.imread(patch_img_path, cv2.IMREAD_COLOR))
|
18 |
+
|
19 |
+
|
20 |
+
def update_patch(self, patch_img):
|
21 |
+
self.patch_img = patch_img
|
22 |
+
self.patch_img = cv2.resize(self.patch_img, (512,512))
|
23 |
+
self.patch_img_height, self.patch_img_width, _ = self.patch_img.shape
|
24 |
+
|
25 |
+
self.cyano_rgb = [[0,0,0]]
|
26 |
+
self.crop_img()
|
27 |
+
self.cyano_rgb = np.array(self.cyano_rgb)
|
28 |
+
|
29 |
+
self.patch_rgb = self.create_patch_arr()
|
30 |
+
self.patch_rgb = np.array(self.patch_rgb)
|
31 |
+
print(self.cyano_rgb.shape)
|
32 |
+
print(self.patch_rgb.shape)
|
33 |
+
|
34 |
+
self.create_LUT()
|
35 |
+
self.fit_model()
|
36 |
+
|
37 |
+
|
38 |
+
def create_patch_arr(self):
|
39 |
+
patch_arr = np.empty((256, 3))
|
40 |
+
for i in range(256):
|
41 |
+
patch_arr[i] = np.array([i, i, i])
|
42 |
+
return patch_arr
|
43 |
+
|
44 |
+
|
45 |
+
def save_cropped_img(self, img, cnt):
|
46 |
+
patch_dir = './patch_data/'
|
47 |
+
if not os.path.exists(patch_dir):
|
48 |
+
os.makedirs(patch_dir)
|
49 |
+
cv2.imwrite(patch_dir + "patch_" + str(cnt) + ".png", img)
|
50 |
+
|
51 |
+
|
52 |
+
def create_LUT(self):
|
53 |
+
self.lut_arr = np.hstack([self.patch_rgb, self.cyano_rgb])
|
54 |
+
print('self.lut_arr.shape: ', self.lut_arr.shape)
|
55 |
+
df = pd.DataFrame(self.lut_arr, columns=['r','g','b','r_','g_','b_'])
|
56 |
+
print(df)
|
57 |
+
df.to_csv('./lut.csv')
|
58 |
+
|
59 |
+
|
60 |
+
def crop_img(self):
|
61 |
+
# 対象範囲を切り出し
|
62 |
+
h_pix = 16
|
63 |
+
w_pix = 16
|
64 |
+
w_ = round(self.patch_img_width/w_pix)
|
65 |
+
h_ = round(self.patch_img_height/h_pix)
|
66 |
+
for i in range(w_pix):
|
67 |
+
for j in range(h_pix):
|
68 |
+
boxFromX = i*w_+5 #対象範囲開始位置 X座標
|
69 |
+
boxFromY = j*h_+5 #対象範囲開始位置 Y座標
|
70 |
+
boxToX = ((i+1)*w_)-12 #対象範囲終了位置 X座標
|
71 |
+
boxToY = ((j+1)*h_)-12 #対象範囲終了位置 Y座標
|
72 |
+
# y:y+h, x:x+w の順で設定
|
73 |
+
imgBox = self.patch_img[boxFromY: boxToY, boxFromX: boxToX]
|
74 |
+
if self.debug:
|
75 |
+
cnt = i*h_pix+j
|
76 |
+
self.save_cropped_img(imgBox, cnt)
|
77 |
+
|
78 |
+
# RGB平均値を出力
|
79 |
+
# flattenで一次元化しmeanで平均を取得
|
80 |
+
b = imgBox.T[0].flatten().mean()
|
81 |
+
g = imgBox.T[1].flatten().mean()
|
82 |
+
r = imgBox.T[2].flatten().mean()
|
83 |
+
|
84 |
+
self.cyano_rgb.append([r,g,b])
|
85 |
+
|
86 |
+
del self.cyano_rgb[0]
|
87 |
+
|
88 |
+
|
89 |
+
def predict_img_LUT(self, img):
|
90 |
+
'''
|
91 |
+
img: color img (have to convert it to grayscale one)
|
92 |
+
return> bgr img with predicted cyano color
|
93 |
+
'''
|
94 |
+
print('img.shape:', img.shape)
|
95 |
+
|
96 |
+
if len(img.shape)==3: # if img is color
|
97 |
+
img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
|
98 |
+
else:
|
99 |
+
img_gray = img
|
100 |
+
|
101 |
+
print('img_gray.shape:', img.shape)
|
102 |
+
h, w = img_gray.shape
|
103 |
+
pred_img = np.empty((h, w, 3))
|
104 |
+
for i in range(h):
|
105 |
+
for j in range(w):
|
106 |
+
pix_val = img_gray[i][j]
|
107 |
+
pred_img[i][j] = self.lut_arr[pix_val, 3:6]
|
108 |
+
|
109 |
+
print('pred_img.shape:', pred_img.shape)
|
110 |
+
pred_img_bgr = cv2.cvtColor(pred_img.astype(np.float32), cv2.COLOR_RGB2BGR)
|
111 |
+
|
112 |
+
return pred_img_bgr, img_gray
|
113 |
+
|
114 |
+
|
115 |
+
def MSE(self, imageA, imageB):
|
116 |
+
err = np.sum((imageA.astype("float") - imageB.astype("float")) ** 2)
|
117 |
+
err /= float(imageA.shape[0] * imageA.shape[1] * imageA.shape[2])
|
118 |
+
return err
|
119 |
+
|
120 |
+
|
121 |
+
def fit_model(self):
|
122 |
+
self.patch_gray = np.array([self.patch_rgb[:, 0]]).reshape((256,1))
|
123 |
+
self.reg = LinearRegression().fit(self.patch_gray, self.cyano_rgb)
|
124 |
+
self.reg.score(self.patch_gray, self.cyano_rgb)
|
125 |
+
print('self.reg.coef_: ', self.reg.coef_)
|
126 |
+
print('self.reg.intercept_: ', self.reg.intercept_)
|
127 |
+
|
128 |
+
|
129 |
+
def predict_img(self, img):
|
130 |
+
print(img.shape)
|
131 |
+
h = img.shape[0]
|
132 |
+
w = img.shape[1]
|
133 |
+
if len(img.shape) == 3 and img.shape[2] == 3:
|
134 |
+
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
|
135 |
+
img = img.reshape((h,w,1))
|
136 |
+
print(img.shape)
|
137 |
+
print(self.reg.coef_.T.shape)
|
138 |
+
img_cyano = img @ self.reg.coef_.T + self.reg.intercept_
|
139 |
+
img_cyano = img_cyano.astype(np.uint8)
|
140 |
+
img_cyano = cv2.cvtColor(img_cyano, cv2.COLOR_RGB2BGR)
|
141 |
+
img_cyano = np.array(img_cyano)
|
142 |
+
print(img_cyano.shape)
|
143 |
+
|
144 |
+
return img_cyano
|
145 |
+
|
146 |
+
|
147 |
+
# ---------- Optimization with Tensorflow ---------- #
|
148 |
+
def tf_optimize(self, img):
|
149 |
+
'''
|
150 |
+
img: 1 channel gray
|
151 |
+
but, target is 3 channel output
|
152 |
+
'''
|
153 |
+
print('\n---------- Start Optimization ----------')
|
154 |
+
img_3ch = np.stack((img,)*3, axis=-1)
|
155 |
+
x = self.reg.coef_
|
156 |
+
A = img
|
157 |
+
target = img_3ch
|
158 |
+
A_height = A.shape[0]
|
159 |
+
A_width = A.shape[1]
|
160 |
+
cnt = A_height*A_width
|
161 |
+
print(A.shape)
|
162 |
+
print(cnt)
|
163 |
+
|
164 |
+
param_tf = tf.Variable(A, dtype=tf.float64)
|
165 |
+
coef_tf = tf.constant(x.T, dtype=tf.float64)
|
166 |
+
intercept_tf = tf.constant(self.reg.intercept_, dtype=tf.float64)
|
167 |
+
target_tf = tf.constant(target, dtype=tf.float64)
|
168 |
+
|
169 |
+
opt = tf.keras.optimizers.Adam(learning_rate=5.0)
|
170 |
+
# opt = tf.keras.optimizers.Adam(learning_rate=0.1)
|
171 |
+
|
172 |
+
def loss():
|
173 |
+
x0 = param_tf
|
174 |
+
x0 = tf.where(x0 > 255.0, 255.0, x0)
|
175 |
+
x0 = tf.where(x0 < 0.0, 0.0, x0)
|
176 |
+
x0 = tf.reshape(x0, [cnt, 1])
|
177 |
+
t_tf = target_tf
|
178 |
+
t_tf = tf.reshape(t_tf, [cnt, 3])
|
179 |
+
pred = tf.linalg.matmul(x0, coef_tf) + intercept_tf
|
180 |
+
diff = pred - t_tf
|
181 |
+
diff_2 = diff**2
|
182 |
+
pix_cnt = tf.size(t_tf)
|
183 |
+
pix_cnt = tf.cast(pix_cnt, dtype=tf.float64)
|
184 |
+
loss_val = tf.math.reduce_sum(diff_2) / pix_cnt
|
185 |
+
print('loss_val: ', loss_val)
|
186 |
+
return loss_val
|
187 |
+
|
188 |
+
for i in range(50):
|
189 |
+
step_count = opt.minimize(loss, [param_tf]).numpy()
|
190 |
+
# if step_count==10:
|
191 |
+
# break
|
192 |
+
print(step_count)
|
193 |
+
|
194 |
+
# ----- check optimized result ----- #
|
195 |
+
x0 = param_tf
|
196 |
+
x0 = tf.where(x0 > 255.0, 255.0, x0)
|
197 |
+
x0 = tf.where(x0 < 0.0, 0.0, x0)
|
198 |
+
x0 = x0.numpy()
|
199 |
+
x0_1d = x0.reshape((cnt, 1))
|
200 |
+
sim_opt = x0_1d @ x.T + self.reg.intercept_
|
201 |
+
sim_opt = sim_opt.reshape((A_height, A_width, 3))
|
202 |
+
sim_opt = sim_opt.astype(np.uint8)
|
203 |
+
sim_opt = cv2.cvtColor(sim_opt, cv2.COLOR_RGB2BGR)
|
204 |
+
|
205 |
+
return (x0, sim_opt)
|
206 |
+
|
207 |
+
|
208 |
+
|
209 |
+
if __name__ == '__main__':
|
210 |
+
cy = MonoAlternative()
|
211 |
+
cy.fit_model()
|
212 |
+
cy.predict_img()
|
213 |
+
cy.tf_optimize()
|
src/prediction.py
ADDED
@@ -0,0 +1,68 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import re
|
3 |
+
from glob import glob
|
4 |
+
import cv2
|
5 |
+
from src.cyano import Cyanotype
|
6 |
+
from src.mono_alternative import MonoAlternative
|
7 |
+
|
8 |
+
print('Fitting models...')
|
9 |
+
|
10 |
+
models = {
|
11 |
+
'cyanotype_full': Cyanotype(),
|
12 |
+
'cyanotype_mono': MonoAlternative('cyanotype_mono'),
|
13 |
+
'salt': MonoAlternative('salt'),
|
14 |
+
'platinum': MonoAlternative('platinum'),
|
15 |
+
}
|
16 |
+
|
17 |
+
|
18 |
+
def get_suffix_number(directory):
|
19 |
+
files = glob(f'{directory}/*.png')
|
20 |
+
suffixes = [re.search(r'[0-9]+', f) for f in files]
|
21 |
+
return max([int(s.group()) for s in suffixes if s] + [0]) + 1
|
22 |
+
|
23 |
+
|
24 |
+
def update_patch(process_name, colorpatch):
|
25 |
+
model = models[process_name]
|
26 |
+
model.update_patch(colorpatch)
|
27 |
+
model.fit_model()
|
28 |
+
|
29 |
+
|
30 |
+
def predict_img(process_name, img):
|
31 |
+
out_dir = f'outputs/{process_name}'
|
32 |
+
if not os.path.exists(out_dir):
|
33 |
+
os.makedirs(out_dir)
|
34 |
+
|
35 |
+
suf = get_suffix_number(out_dir)
|
36 |
+
|
37 |
+
model = models[process_name]
|
38 |
+
|
39 |
+
if process_name != 'cyanotype_full':
|
40 |
+
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
|
41 |
+
|
42 |
+
pred_img = model.predict_img(img)
|
43 |
+
out_path = f'{out_dir}/linear_{suf}.png'
|
44 |
+
cv2.imwrite(out_path, pred_img)
|
45 |
+
|
46 |
+
return model.predict_img(pred_img)
|
47 |
+
|
48 |
+
|
49 |
+
def optimize_img(process_name, img):
|
50 |
+
out_dir = f'outputs/{process_name}'
|
51 |
+
if not os.path.exists(out_dir):
|
52 |
+
os.makedirs(out_dir)
|
53 |
+
|
54 |
+
suf = get_suffix_number(out_dir)
|
55 |
+
|
56 |
+
model = models[process_name]
|
57 |
+
|
58 |
+
if process_name != 'cyanotype_full':
|
59 |
+
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
|
60 |
+
|
61 |
+
x0, opt_img = model.tf_optimize(img)
|
62 |
+
|
63 |
+
out_path = f'{out_dir}/opt_{suf}.png'
|
64 |
+
cv2.imwrite(out_path, opt_img)
|
65 |
+
out_path = f'{out_dir}/x0_{suf}.png'
|
66 |
+
cv2.imwrite(out_path, x0)
|
67 |
+
|
68 |
+
return x0, opt_img
|
src/utils.py
ADDED
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import numpy as np
|
2 |
+
import cv2
|
3 |
+
from PIL import Image
|
4 |
+
|
5 |
+
|
6 |
+
def pil_to_cv(img):
|
7 |
+
new_img = np.array(img, dtype=np.uint8)
|
8 |
+
if new_img.ndim == 2: # モノクロ
|
9 |
+
pass
|
10 |
+
elif new_img.shape[2] == 3: # カラー
|
11 |
+
new_img = cv2.cvtColor(new_img, cv2.COLOR_RGB2BGR)
|
12 |
+
elif new_img.shape[2] == 4: # 透過
|
13 |
+
new_img = cv2.cvtColor(new_img, cv2.COLOR_RGBA2BGRA)
|
14 |
+
return new_img
|
15 |
+
|
16 |
+
def cv_to_pil(img):
|
17 |
+
new_img = img.copy()
|
18 |
+
if new_img.ndim == 2: # モノクロ
|
19 |
+
pass
|
20 |
+
elif new_img.shape[2] == 3: # カラー
|
21 |
+
new_img = cv2.cvtColor(new_img, cv2.COLOR_BGR2RGB)
|
22 |
+
elif new_img.shape[2] == 4: # 透過
|
23 |
+
new_img = cv2.cvtColor(new_img, cv2.COLOR_BGRA2RGBA)
|
24 |
+
new_img = Image.fromarray(new_img)
|
25 |
+
|
26 |
+
return new_img
|