Spaces:
Build error
Build error
import tensorflow as tf | |
import os | |
import pathlib | |
import time | |
import datetime | |
from matplotlib import pyplot as plt | |
import numpy as np | |
from cv2 import cv2 | |
import math | |
import keras | |
#tuz-karabiber gürültüsü | |
def saltpepperNoise(image): | |
row,col,ch = image.shape | |
s_vs_p = 0.5 | |
amount = 0.004 | |
out = image | |
# Salt mode | |
num_salt = np.ceil(amount * image.size * s_vs_p) | |
coords = [np.random.randint(0, i - 1, int(num_salt)) | |
for i in image.shape] | |
out[coords] = 1 | |
# Pepper mode | |
num_pepper = np.ceil(amount* image.size * (1. - s_vs_p)) | |
coords = [np.random.randint(0, i - 1, int(num_pepper)) | |
for i in image.shape] | |
out[coords] = 0 | |
return out | |
def color_imread(path): | |
img = cv2.imread(path) | |
img = cv2.cvtColor(img , cv2.COLOR_BGR2RGB) | |
img = (img/127.5) - 1 | |
img = img.astype(np.float32) | |
return img | |
def gray_imread(path): | |
img = cv2.imread(path) | |
img = cv2.cvtColor(img ,cv2.COLOR_BGR2GRAY) | |
img = img.astype(np.float32) | |
return img | |
def load(): | |
input_paths = sorted('*.png') | |
real_paths = sorted('*.png') | |
input_images = [] | |
real_images = [] | |
for path in input_paths: | |
image = gray_imread(path) | |
input_images.append(image) | |
for path in real_paths: | |
image = color_imread(path) | |
real_images.append(image) | |
return input_images , real_images | |
def reshape(gray_img): | |
gray_img = np.asarray(gray_img) | |
gray_img = gray_img.reshape(256,256,1) | |
return gray_img | |
#input_images , real_images = load() | |
#test = gray_imread("/content/drive/MyDrive/ColabNotebooks/enhance/landscape.png") | |
#test = cv2.resize(test,(256,256)) | |
#for i in range(len(input_images)): | |
# input_images[i] = reshape(input_images[i]) | |
#test = reshape(test) | |
#print(np.asarray(test).shape) | |
array_Gen_loss=[] | |
def histogram_graphic(img): | |
hist,bins = np.histogram(img.flatten(),256,[0,256]) | |
cdf = hist.cumsum() | |
cdf_normalized = cdf * float(hist.max()) / cdf.max() | |
plt.plot(cdf_normalized, color = 'b') | |
plt.hist(img.flatten(),256,[0,256], color = 'r') | |
plt.xlim([0, 230]) | |
plt.legend(('cdf','histogram'), loc = 'upper left') | |
plt.show() | |
def preprocessing(path): | |
img = cv2.imread(path) | |
img = np.asarray(img).reshape(256,256,3) | |
#print(img.shape) | |
#cv2.imshow(img) | |
#cv2.imwrite("/content/drive/MyDrive/ColabNotebooks/enhance/Before_hist_equalizer.png",img) | |
#Işık ayarı | |
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV) #hsv formatında gerekiyor | |
hue, sat, val = cv2.split(hsv) | |
mid = 0.5 | |
mean = np.mean(val) | |
gamma = math.log(mid*255)/math.log(mean) | |
#print("Gamma:",gamma) | |
#Çıkan gamma değerine göre ters işlem uygulayacak | |
#value kanalında gamma correction | |
#val_gamma = np.power(val, gamma).clip(0,255).astype(np.uint8) | |
# yeni value kanalı orijinal hue ve sat kanallarıyla birleştiriliyor | |
#hsv_gamma = cv2.merge([hue, sat, val_gamma]) | |
#img_gamma = cv2.cvtColor(hsv_gamma, cv2.COLOR_HSV2BGR) | |
#cv2.imwrite("/content/drive/MyDrive/ColabNotebooks/img_gamma.png",img_gamma) | |
#cv2.imshow(img_gamma) | |
#Adaptive Histogram Equalization | |
#gamma_path = "/content/drive/MyDrive/ColabNotebooks/img_gamma.png" | |
#img2 = cv2.imread(gamma_path,0) | |
#img2 = np.asarray(img2).reshape(256,256,1) | |
#clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8)) | |
#clipLimit -> Kontrast sınırı | |
#clahe_equ = clahe.apply(img2) | |
#cv2.imshow(clahe_equ) | |
#cv2.imwrite("/content/drive/MyDrive/ColabNotebooks/enhance/After_clahe_equalizer.png",clahe_equ) | |
#return clahe_equ | |
#preprocessing("/content/drive/MyDrive/ColabNotebooks/enhance/landscape.png") | |
def image_colorfulness(image): | |
# split the image into its respective RGB components | |
(B, G, R) = cv2.split(image.astype("float")) | |
# compute rg = R - G | |
rg = np.absolute(R - G) | |
# compute yb = 0.5 * (R + G) - B | |
yb = np.absolute(0.5 * (R + G) - B) | |
# compute the mean and standard deviation of both `rg` and `yb` | |
(rbMean, rbStd) = (np.mean(rg), np.std(rg)) | |
(ybMean, ybStd) = (np.mean(yb), np.std(yb)) | |
# combine the mean and standard deviations | |
stdRoot = np.sqrt((rbStd ** 2) + (ybStd ** 2)) | |
meanRoot = np.sqrt((rbMean ** 2) + (ybMean ** 2)) | |
# derive the "colorfulness" metric and return it | |
return stdRoot + (0.3 * meanRoot) # sınırı 24 | |
from PIL import Image, ImageEnhance | |
def add_saturation(path): | |
clr = cv2.imread(path) | |
value = image_colorfulness(clr) | |
print(value) | |
img = Image.open(path) | |
enhanced_obj = ImageEnhance.Color(img) | |
if value<30 : #renk doygunluğu iyi durumda çıkanları da bir miktar arttırmak için sınırı 30 yapıyoruz | |
enhanced_obj.enhance((30-value)*0.1 + 0.75).save("enhance/deneme_sat.jpg") | |
#add_saturation("/content/drive/MyDrive/ColabNotebooks/enhance/cikti2.jpeg") | |
def unsharp_mask(image, kernel_size=(5, 5), sigma=1.0, amount=1.0, threshold=0): | |
"""Return a sharpened version of the image, using an unsharp mask.""" | |
blurred = cv2.GaussianBlur(image, kernel_size, sigma) | |
sharpened = float(amount + 1) * image - float(amount) * blurred | |
sharpened = np.maximum(sharpened, np.zeros(sharpened.shape)) | |
sharpened = np.minimum(sharpened, 255 * np.ones(sharpened.shape)) | |
sharpened = sharpened.round().astype(np.uint8) | |
if threshold > 0: | |
low_contrast_mask = np.absolute(image - blurred) < threshold | |
np.copyto(sharpened, image, where=low_contrast_mask) | |
return sharpened | |
def example(image,name): | |
sharpened_image = unsharp_mask(image) | |
cv2.imwrite(name, sharpened_image) | |
#s_img= cv2.imread("/content/drive/MyDrive/ColabNotebooks/enhance/deneme.jpg") | |
#example(s_img,"/content/drive/MyDrive/ColabNotebooks/enhance/deneme_sharp.jpg") | |
#img2 = cv2.imread("/content/drive/MyDrive/ColabNotebooks/enhance/landscape.png") | |
#newimg2 = cv2.imread("/content/drive/MyDrive/ColabNotebooks/enhance/Output/nadam_image9.png") | |
#psnr(img2,newimg2) | |
#ssim(img2,newimg2) | |
import math | |
import cv2 | |
import numpy as np | |
#original = cv2.imread("/content/drive/MyDrive/ColabNotebooks/enhance/landscape.png",0) | |
#contrast = cv2.imread("/content/drive/MyDrive/ColabNotebooks/enhance/After_clahe_equalizer_with_gamma.png",0) | |
#print(original.dtype) | |
#db = psnr(original, contrast) | |
#print(db) | |
OUTPUT_CHANNELS = 3 | |
def downsample(filters, size, apply_batchnorm=True): | |
initializer = tf.random_normal_initializer(0., 0.02) | |
result = tf.keras.Sequential() | |
result.add(tf.keras.layers.Conv2D(filters, size, strides=2, padding='same',kernel_initializer=initializer, use_bias=False)) | |
# Burada 2'ye bölüyoruz 256 --> 128 | |
if apply_batchnorm: | |
result.add(tf.keras.layers.BatchNormalization()) | |
result.add(tf.keras.layers.LeakyReLU()) | |
return result | |
def upsample(filters, size, apply_dropout=False): | |
initializer = tf.random_normal_initializer(0., 0.02) | |
result = tf.keras.Sequential() | |
result.add( | |
tf.keras.layers.Conv2DTranspose(filters, size, strides=2, | |
padding='same', | |
kernel_initializer=initializer, | |
use_bias=False)) | |
# burada da 2 kat arttırıyoruz | |
result.add(tf.keras.layers.BatchNormalization()) | |
if apply_dropout: | |
result.add(tf.keras.layers.Dropout(0.5)) | |
result.add(tf.keras.layers.ReLU()) | |
return result | |
def Generator(tpu=False): | |
inputs = tf.keras.layers.Input(shape=[256, 256, 3]) | |
down_stack = [ | |
downsample(64, 4, apply_batchnorm=False), # (batch_size, 128, 128, 64) | |
downsample(128, 4), # (batch_size, 64, 64, 128) | |
downsample(256, 4), # (batch_size, 32, 32, 256) | |
downsample(512, 4), # (batch_size, 16, 16, 512) | |
downsample(512, 4), # (batch_size, 8, 8, 512) | |
downsample(512, 4), # (batch_size, 4, 4, 512) | |
downsample(512, 4), # (batch_size, 2, 2, 512) | |
downsample(512, 4), # (batch_size, 1, 1, 512) | |
] | |
up_stack = [ | |
upsample(512, 4, apply_dropout=True), # (batch_size, 2, 2, 1024) | |
upsample(512, 4, apply_dropout=True), # (batch_size, 4, 4, 1024) | |
upsample(512, 4, apply_dropout=True), # (batch_size, 8, 8, 1024) | |
upsample(512, 4), # (batch_size, 16, 16, 1024) | |
upsample(256, 4), # (batch_size, 32, 32, 512) | |
upsample(128, 4), # (batch_size, 64, 64, 256) | |
upsample(64, 4), # (batch_size, 128, 128, 128) | |
] | |
initializer = tf.random_normal_initializer(0., 0.02) | |
last = tf.keras.layers.Conv2DTranspose(OUTPUT_CHANNELS, 4, | |
strides=2, | |
padding='same', | |
kernel_initializer=initializer, | |
activation='tanh') # (batch_size, 256, 256, 3) | |
# Build U-NET | |
x = inputs | |
# Downsampling through the model | |
skips = [] | |
for down in down_stack: | |
x = down(x) | |
skips.append(x) | |
skips = reversed(skips[:-1]) # son elemani almadan terste yazdirir | |
# Upsampling and establishing the skip connections | |
for up, skip in zip(up_stack, skips): | |
x = up(x) | |
x = tf.keras.layers.Concatenate()([x, skip]) | |
x = last(x) | |
model = tf.keras.Model(inputs=inputs, outputs=x) | |
return model | |
def Generator2(tpu=False): | |
inputs = tf.keras.layers.Input(shape=[256, 256, 3]) | |
down_stack = [ | |
downsample(64, 4, apply_batchnorm=False), # (batch_size, 128, 128, 64) | |
downsample(128, 4), # (batch_size, 64, 64, 128) | |
downsample(256, 4), # (batch_size, 32, 32, 256) | |
downsample(512, 4), # (batch_size, 16, 16, 512) | |
downsample(512, 4), # (batch_size, 8, 8, 512) | |
downsample(512, 4), # (batch_size, 4, 4, 512) | |
downsample(512, 4), # (batch_size, 2, 2, 512) | |
downsample(512, 4), # (batch_size, 1, 1, 512) | |
] | |
up_stack = [ | |
upsample(512, 4, apply_dropout=True), # (batch_size, 2, 2, 1024) | |
upsample(512, 4, apply_dropout=True), # (batch_size, 4, 4, 1024) | |
upsample(512, 4, apply_dropout=True), # (batch_size, 8, 8, 1024) | |
upsample(512, 4), # (batch_size, 16, 16, 1024) | |
upsample(256, 4), # (batch_size, 32, 32, 512) | |
upsample(128, 4), # (batch_size, 64, 64, 256) | |
upsample(64, 4), # (batch_size, 128, 128, 128) | |
] | |
initializer = tf.random_normal_initializer(0., 0.02) | |
last = tf.keras.layers.Conv2DTranspose(OUTPUT_CHANNELS, 4, | |
strides=2, | |
padding='same', | |
kernel_initializer=initializer, | |
activation='tanh') # (batch_size, 256, 256, 3) | |
# Build U-NET | |
x = inputs | |
# Downsampling through the model | |
skips = [] | |
for down in down_stack: | |
x = down(x) | |
skips.append(x) | |
skips = reversed(skips[:-1]) # son elemani almadan terste yazdirir | |
# Upsampling and establishing the skip connections | |
for up, skip in zip(up_stack, skips): | |
x = up(x) | |
x = tf.keras.layers.Concatenate()([x, skip]) | |
x = last(x) | |
model = tf.keras.Model(inputs=inputs, outputs=x) | |
return model | |
#pre_trained = Generator() | |
#pre_trained.compile(optimizer='adam',loss=tf.keras.losses.BinaryCrossentropy(from_logits=True)) | |
pre_trained2 = Generator2() | |
pre_trained2.compile(optimizer='adam',loss=tf.keras.losses.BinaryCrossentropy(from_logits=True)) | |
from tensorflow.keras.models import * | |
from tensorflow.keras.layers import * | |
from tensorflow.keras.optimizers import * | |
#integer = (input_images[0]+1)*127.5 | |
LAMBDA = 100 | |
loss_object = tf.keras.losses.BinaryCrossentropy(from_logits=True) | |
def generator_loss(disc_generated_output, gen_output, target,total_loop): | |
gan_loss = loss_object(tf.ones_like(disc_generated_output), disc_generated_output) | |
# Mean absolute error | |
l1_loss = tf.reduce_mean(tf.abs(target - gen_output)) | |
total_gen_loss = gan_loss + (LAMBDA * l1_loss) | |
if total_loop % 2 == 0: | |
array_Gen_loss.append(total_gen_loss) | |
return total_gen_loss, gan_loss, l1_loss | |
ssim_results = [] | |
psnr_results = [] | |
def ssim_psnr(pre,target): | |
ssim_res = ssim(pre,target) | |
psnr_res = psnr(pre,target) | |
ssim_results.append(ssim_res) | |
psnr_results.append(ssim_results) | |
def Discriminator(): | |
initializer = tf.random_normal_initializer(0., 0.02) | |
inp = tf.keras.layers.Input(shape=[256, 256, 3], name='input_image') # lr | |
tar = tf.keras.layers.Input(shape=[256, 256, 3], name='target_image') # hr | |
x = tf.keras.layers.concatenate([inp, tar]) # (batch_size, 256, 256, channels*2) | |
down1 = downsample(64, 4, False)(x) # (batch_size, 128, 128, 64) | |
down2 = downsample(128, 4)(down1) # (batch_size, 64, 64, 128) | |
down3 = downsample(256, 4)(down2) # (batch_size, 32, 32, 256) | |
zero_pad1 = tf.keras.layers.ZeroPadding2D()(down3) # (batch_size, 34, 34, 256) | |
conv = tf.keras.layers.Conv2D(512, 4, strides=1, | |
kernel_initializer=initializer, | |
use_bias=False)(zero_pad1) # (batch_size, 31, 31, 512) | |
batchnorm1 = tf.keras.layers.BatchNormalization()(conv) | |
leaky_relu = tf.keras.layers.LeakyReLU()(batchnorm1) | |
zero_pad2 = tf.keras.layers.ZeroPadding2D()(leaky_relu) # (batch_size, 33, 33, 512) | |
last = tf.keras.layers.Conv2D(1, 4, strides=1, | |
kernel_initializer=initializer)(zero_pad2) # (batch_size, 30, 30, 1) | |
return tf.keras.Model(inputs=[inp, tar], outputs=last) | |
discriminator = Discriminator() | |
def discriminator_loss(disc_real_output, disc_generated_output): | |
real_loss = loss_object(tf.ones_like(disc_real_output), disc_real_output) | |
generated_loss = loss_object(tf.zeros_like(disc_generated_output), disc_generated_output) | |
total_disc_loss = real_loss + generated_loss # 0.5 ile de çarpabilirsin | |
return total_disc_loss | |
generator_optimizer = tf.keras.optimizers.Nadam(learning_rate=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-07, name="Nadam") | |
discriminator_optimizer = tf.keras.optimizers.Nadam(learning_rate=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-07, name="Nadam") | |
def generate_images(model, test_input,step): | |
if (step%1 == 0): | |
prediction = model(test_input, training=True) | |
pre = prediction[0] | |
pre = (pre+1)*127.5 | |
pre = np.uint8(pre) | |
name = 'image{step}.png'.format(step=step) | |
plt.imsave(name,pre) | |
#test = np.array(test).reshape(1,256,256,1) | |
#input_images = np.array(input_images).reshape(-1,1,256,256,1) | |
#real_images = np.array(real_images).reshape(-1,1,256,256,3) | |
#print(real_images[0].shape) | |
def train_step(input_image, target, step): | |
with tf.GradientTape() as gen_tape, tf.GradientTape() as disc_tape: | |
gen_output = pre_trained(input_image, training=True) | |
disc_real_output = discriminator([input_image, target], training=True) | |
disc_generated_output = discriminator([input_image, gen_output], training=True) | |
gen_total_loss, gen_gan_loss, gen_l1_loss = generator_loss(disc_generated_output, gen_output, target,10) | |
disc_loss = discriminator_loss(disc_real_output, disc_generated_output) | |
generator_gradients = gen_tape.gradient(gen_total_loss, | |
pre_trained.trainable_variables) | |
discriminator_gradients = disc_tape.gradient(disc_loss, | |
discriminator.trainable_variables) | |
generator_optimizer.apply_gradients(zip(generator_gradients, | |
pre_trained.trainable_variables)) | |
discriminator_optimizer.apply_gradients(zip(discriminator_gradients, | |
discriminator.trainable_variables)) | |
def fit(input_images,real_images,test,steps): | |
example_input = test | |
start = time.time() | |
step = 0 | |
i = 0 | |
while step<steps: | |
print("Step = ",step) | |
while i < len(input_images): | |
train_step(input_images[i], real_images[i], step) | |
if (i%200 == 0): | |
print('i= ',i) | |
i +=1 | |
generate_images(pre_trained, example_input,step) | |
step+=1 | |
i = 0 | |
generate_images(pre_trained, example_input,step) | |
#fit(input_images,real_images,test,10) | |
#pre_trained.save("enhance/pix2pix.h5") | |
a = array_Gen_loss | |
a = np.asarray(a) | |
plt.plot(a) | |
plt.ylabel('Loss Percent') | |
plt.xlabel('Epochs') | |
plt.show() | |
#pre_trained.summary() | |
#pre_trained.optimizer | |
#path2 = '/content/drive/MyDrive/ColabNotebooks/enhance/landscape.png' | |
#image = gray_imread(path2) | |
#image = saltpepperNoise(image) | |
#image = np.array(image).reshape(1,256,256,1) | |
#prediction = pre_trained(image,training=True) | |
#pre = prediction[0] | |
#pre = (pre+1)*127.5 | |
#pre = np.uint8(pre) | |
#name = '/content/drive/MyDrive/ColabNotebooks/enhance/pre_trained.png' | |
#plt.imsave(name,pre) | |
#cv2.imshow(pre) | |
#def ssim(original,predict): | |
# ssim = tf.image.ssim(original, predict, max_val=1.0, filter_size=11, filter_sigma=1.5, k1=0.01, k2=0.03) | |
# return ssim | |
#def psnr(Input,Output,Choice): | |
# psnr = tf.image.psnr(Input, Output, max_val=255) | |
# return psnr | |
def result(Input,Choice,Step): | |
if Choice=="Indoor-Coloring": | |
if Step == 1: | |
pre_trained = tf.keras.models.load_model("indoor_1.h5") | |
if Step == 2: | |
pre_trained = tf.keras.models.load_model("indoor_2.h5") | |
if Step == 3: | |
pre_trained = tf.keras.models.load_model("indoor_3.h5") | |
size0 = Input.shape[0] | |
size1 = Input.shape[1] | |
start = Input | |
Input = cv2.resize(Input, (256,256), interpolation = cv2.INTER_AREA) | |
Input = cv2.cvtColor(Input , cv2.COLOR_BGR2GRAY) | |
Input = np.array(Input).reshape(1,256,256,1) | |
prediction = pre_trained(Input,training=True) | |
Input = prediction[0] | |
Input = (Input+1)*127.5 | |
Input = np.uint8(Input) | |
Input = cv2.resize(Input, (size1,size0), interpolation = cv2.INTER_AREA) | |
finish = Input | |
mse = np.mean((start - finish) ** 2) | |
MAX = np.iinfo(start.dtype).max | |
if mse == 0: | |
Psnr = 100 | |
else: | |
Psnr = 20 * math.log10(MAX / math.sqrt(mse)) | |
return Input,Psnr | |
if Choice=="Outdoor-Coloring": | |
if Step == 1: | |
pre_trained = tf.keras.models.load_model("outdoor_1.h5") | |
if Step == 2: | |
pre_trained = tf.keras.models.load_model("outdoor_2.h5") | |
if Step == 3: | |
pre_trained = tf.keras.models.load_model("outdoor_3.h5") | |
size0 = Input.shape[0] | |
size1 = Input.shape[1] | |
start = Input | |
Input = cv2.resize(Input, (256,256), interpolation = cv2.INTER_AREA) | |
Input = cv2.cvtColor(Input , cv2.COLOR_BGR2GRAY) | |
Input = np.array(Input).reshape(1,256,256,1) | |
prediction = pre_trained(Input,training=True) | |
Input = prediction[0] | |
Input = (Input+1)*127.5 | |
Input = np.uint8(Input) | |
Input = cv2.resize(Input, (size1,size0), interpolation = cv2.INTER_AREA) | |
finish = Input | |
mse = np.mean((start - finish) ** 2) | |
MAX = np.iinfo(start.dtype).max | |
if mse == 0: | |
Psnr = 100 | |
else: | |
Psnr = 20 * math.log10(MAX / math.sqrt(mse)) | |
return Input,Psnr | |
if Choice=="Enhancement": | |
pre_trained2 = tf.keras.models.load_model("gradio_pix2pix.h5") | |
size0 = Input.shape[0] | |
size1 = Input.shape[1] | |
Input = cv2.resize(Input, (256,256), interpolation = cv2.INTER_AREA) | |
Input = cv2.cvtColor(Input , cv2.COLOR_BGR2GRAY) | |
Input = np.array(Input).reshape(1,256,256,1) | |
prediction = pre_trained2(Input,training=True) | |
Input = prediction[0] | |
Input = (Input+1)*127.5 | |
Input = np.uint8(Input) | |
Input = cv2.resize(Input, (size1,size0), interpolation = cv2.INTER_AREA) | |
return Input | |
#lst = cv2.imread('/content/drive/MyDrive/ColabNotebooks/enhance/low-sat.jpg') | |
#r = result(lst) | |
#cv2.imshow(r) | |
import gradio as gr | |
iface = gr.Interface(fn=result, inputs=[gr.inputs.Image(type="numpy",image_mode="RGB"),gr.inputs.Radio(["Indoor-Coloring","Outdoor-Coloring","Enhancement","Repair","Repair and Color"]),gr.inputs.Slider(minimum=1,maximum=3,default=3,step=1)], outputs=[gr.outputs.Image( type="auto", label="Output"),gr.outputs.Textbox(type="number",label="Psnr")],theme="grass",live=True | |
,css=""" body {background-color: rgba(127,191,63,0.48)} """,title="Image Enhancement",article=""" <a href="https://docs.google.com/document/d/19k6dyR5x_hd1M0yoU8i49dlDWvFmtnBT/edit?usp=sharing&ouid=115743073712072785012&rtpof=true&sd=true" download="example.docx"><img src="https://img.icons8.com/external-itim2101-lineal-color-itim2101/64/000000/external-article-blogger-and-influencer-itim2101-lineal-color-itim2101-1.png" alt="Article"></a>""",examples=[["dog.jpg","Coloring"],["woman.png","Coloring"]]) | |
iface.launch(debug="True",show_tips="True",inbrowser=True) |