from PIL import Image, ImageChops
import numpy as np
import cv2 
from skimage.metrics import structural_similarity

def are_same_image(image1, image2):
    if np.sum(np.array(ImageChops.difference(image1, image2).getdata())) == 0:
        return True
    return False


# Load the images
# img_1_path = "logs/2023-02-08_CLASSICAL_DYNAFILL_CORRECT_FULL_LOW_RES/images/train/samples_gs-003750_e-000001_b-001330.png"
# img_2_path =  "logs/2023-02-08_SEG_DYNAFILL_FULLARRANGE_SD_NEW_KEYS_SEG_LOW_RES/images/train/samples_gs-003750_e-000001_b-001330.png"


# img_1_path = "logs/2023-02-08_CLASSICAL_DYNAFILL_CORRECT_FULL_LOW_RES/images/train/samples_gs-004500_e-000001_b-002080.png"
# img_2_path =  "logs/2023-02-08_SEG_DYNAFILL_FULLARRANGE_SD_NEW_KEYS_SEG_LOW_RES/images/train/samples_gs-004500_e-000001_b-002080.png"

# img_1_path = "logs/2023-02-08_CLASSICAL_DYNAFILL_CORRECT_FULL_LOW_RES/images/train/samples_gs-005250_e-000002_b-000410.png"
# img_2_path =  "logs/2023-02-08_SEG_DYNAFILL_FULLARRANGE_SD_NEW_KEYS_SEG_LOW_RES/images/train/samples_gs-005250_e-000002_b-000410.png"
# img_1_path = "logs/2023-02-08_CLASSICAL_DYNAFILL_CORRECT_FULL_LOW_RES/images/train/samples_gs-006000_e-000002_b-001160.png"
# img_2_path =  "logs/2023-02-08_SEG_DYNAFILL_FULLARRANGE_SD_NEW_KEYS_SEG_LOW_RES/images/train/samples_gs-006000_e-000002_b-001160.png"

img_1_path = "logs/2023-02-08_CLASSICAL_DYNAFILL_CORRECT_FULL_LOW_RES/images/train/samples_gs-006750_e-000002_b-001910.png"
img_2_path =  "logs/2023-02-08_SEG_DYNAFILL_FULLARRANGE_SD_NEW_KEYS_SEG_LOW_RES/images/train/samples_gs-006750_e-000002_b-001910.png"


before = cv2.imread(img_1_path)
after = cv2.imread(img_2_path)


# Convert images to grayscale
before_gray = cv2.cvtColor(before, cv2.COLOR_BGR2GRAY)
after_gray = cv2.cvtColor(after, cv2.COLOR_BGR2GRAY)


# Compute SSIM between the two images
(score, diff) = structural_similarity(before_gray, after_gray, full=True)
print("Image Similarity: {:.4f}%".format(score * 100))
# The diff image contains the actual image differences between the two images
# and is represented as a floating point data type in the range [0,1] 
# so we must convert the array to 8-bit unsigned integers in the range
# [0,255] before we can use it with OpenCV
diff = (diff * 255).astype("uint8")
diff_box = cv2.merge([diff, diff, diff])

# Threshold the difference image, followed by finding contours to
# obtain the regions of the two input images that differ
thresh = cv2.threshold(diff, 0, 255, cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)[1]
contours = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
contours = contours[0] if len(contours) == 2 else contours[1]

mask = np.zeros(before.shape, dtype='uint8')
filled_after = after.copy()

for c in contours:
    area = cv2.contourArea(c)
    if area > 40:
        x,y,w,h = cv2.boundingRect(c)
        cv2.rectangle(before, (x, y), (x + w, y + h), (36,255,12), 2)
        cv2.rectangle(after, (x, y), (x + w, y + h), (36,255,12), 2)
        cv2.rectangle(diff_box, (x, y), (x + w, y + h), (36,255,12), 2)
        cv2.drawContours(mask, [c], 0, (255,255,255), -1)
        cv2.drawContours(filled_after, [c], 0, (0,255,0), -1)

cv2.imwrite('scripts/outputs/diff_box.png', diff_box)
cv2.imwrite('scripts/outputs/mask.png', mask)
cv2.imwrite('scripts/outputs/filled after.png', filled_after)