from typing import Tuple, List, Any

import cv2
import numpy as np
import argparse
import os, glob, pathlib
from multiprocessing import Pool
from pathlib import Path

from numpy import ndarray

# the ratio to resize the image
RATIO = 2.0


def order_rect(points: np.ndarray) -> np.ndarray:
	"""
	Order the rectangle points in the following order:
		- top-left
		- top-right
		- bottom-right
		- bottom-left
	:param points: 4 points of the rectangle
	:return: ordered points
	"""
	# initialize result -> rectangle coordinates (4 corners, 2 coordinates (x,y))
	res = np.zeros((4, 2), dtype = np.float32)

	left_to_right = points[points[:, 0].argsort()]  #  Sorted by x

	left_points = left_to_right[:2, :]
	left_points = left_points[left_points[:, 1].argsort()]  # Sorted by y
	right_points = left_to_right[2:, :]
	right_points = right_points[right_points[:, 1].argsort()]  # Sorted by y

	res[0] = left_points[0]
	res[1] = right_points[0]
	res[2] = right_points[1]
	res[3] = left_points[1]

	return res

def four_point_transform(img: np.ndarray, points: np.ndarray) -> np.ndarray:
	"""
	Apply a perspective transform to an image
	:param img: the image to transform
	:param points: four points of the rectangle
	:return: a image with the perspective transform applied
	"""
	# copied from: https://www.pyimagesearch.com/2014/08/25/4-point-opencv-getperspective-transform-example/
	# obtain a consistent order of the points and unpack them
	# individually
	rect = order_rect(points)
	(tl, tr, br, bl) = rect

	# compute the width of the new image, which will be the
	# maximum distance between bottom-right and bottom-left
	# x-coordiates or the top-right and top-left x-coordinates
	widthA = np.sqrt(((br[0] - bl[0]) ** 2) + ((br[1] - bl[1]) ** 2))
	widthB = np.sqrt(((tr[0] - tl[0]) ** 2) + ((tr[1] - tl[1]) ** 2))
	maxWidth = max(int(widthA), int(widthB))

	# compute the height of the new image, which will be the
	# maximum distance between the top-right and bottom-right
	# y-coordinates or the top-left and bottom-left y-coordinates
	heightA = np.sqrt(((tr[0] - br[0]) ** 2) + ((tr[1] - br[1]) ** 2))
	heightB = np.sqrt(((tl[0] - bl[0]) ** 2) + ((tl[1] - bl[1]) ** 2))
	maxHeight = max(int(heightA), int(heightB))

	# now that we have the dimensions of the new image, construct
	# the set of destination points to obtain a "birds eye view",
	# (i.e. top-down view) of the image, again specifying points
	# in the top-left, top-right, bottom-right, and bottom-left
	# order
	dst = np.array([[0, 0],
	                [maxWidth - 1, 0],
	                [maxWidth - 1, maxHeight - 1],
	                [0, maxHeight - 1]], dtype = np.float32)

	# compute the perspective transform matrix and then apply it
	M = cv2.getPerspectiveTransform(rect, dst)
	warped = cv2.warpPerspective(img, M, (maxWidth, maxHeight))

	# return the warped image
	return warped


def cont(img: np.ndarray, gray: np.ndarray, user_thresh: int, crop: bool, show_rect: bool = True) -> tuple[int, ndarray] | tuple[
	int, list[Any]]:
	"""
	to be used in autocrop
	:param img: image to be cropped
	:param gray: gray version of the image
	:param user_thresh: threshold to be used
	:param crop: crop the image or not
	:return: cropped image
	"""

	# image properties: height, width, area
	global kept_contours
	im_h, im_w = img.shape[:2]
	im_area = im_w * im_h

	# blur the image, because it helps to detect the contours
	Blur = cv2.GaussianBlur(gray, (5, 5), 1)  # apply blur to roi

	# TODO Always resize to the same size (instead of using a constant ratio)
	# resize the image
	res_gray = cv2.resize(Blur, (int(im_w / RATIO), int(im_h / RATIO)), interpolation = cv2.INTER_CUBIC)

	# detect the contours of the image
	times = 0
	# user_thresh is the threshold to be used for the detection,
	# it is incremented by 5 until a rectangle is found
	# or until the threshold is 255
	while 0 < user_thresh <= 255:
		times += 1
		print(f"times = {times}")
		print(f"Detect with threshold: {user_thresh}")

		# apply threshold
		ret, thresh = cv2.threshold(res_gray, user_thresh, 255, cv2.THRESH_BINARY)
		# find contours
		contours = cv2.findContours(thresh, cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE)[0]

		# large_contours is the number of contours with an area between 1% and 99% of the image area
		large_contours = 0
		# kept_contours is the list of contours that are kept, i.e. the ones that are rectangles
		kept_contours = []
		# thres_incr is the value to increment the threshold by if the number of contours is too high or too low
		thres_incr = 0

		for cnt in contours:
			# Resize the image for the detection
			cnt[:, :, 0] = cnt[:, :, 0] * RATIO
			cnt[:, :, 1] = cnt[:, :, 1] * RATIO
			# compute the area of the contour
			area = cv2.contourArea(cnt)
			# if the area is between 1% and 99% of the image area
			if (im_area / 100) < area < (im_area / 1.01):
				# increment the number of large contours
				large_contours += 1
				# compute the perimeter of the contour
				epsilon = 0.07 * cv2.arcLength(cnt, True)
				# approximate the contour with a polygon
				approx = cv2.approxPolyDP(cnt, epsilon, True)

				# if the polygon has 4 sides, it is a rectangle
				if len(approx) == 4:
					print(f"Found an image !")
					kept_contours.append(approx)
				# if the polygon has more than 4 sides, the threshold is too low
				elif len(approx) > 4:
					thres_incr -= 1
				# if the polygon has less than 4 sides, the threshold is too high
				elif len(approx) < 4:
					thres_incr += 1

		print(f"Contours {len(contours)} with {large_contours} large and {len(kept_contours)} images found")
		print(f"large_contours: {large_contours} & len(kept_contours) = {len(kept_contours)}")
		# if the number of large contours is equal to the number of kept contours, we have found all the images
		if large_contours == len(kept_contours):
			break
		# if the number of large contours is too high, the threshold is too low
		elif thres_incr == 0:
			print("WARNING: This seems to be an edge case.")
			break
		# if the number of large contours is too low, the threshold is too high
		elif times >= 30:
			return 0, img
		# otherwise, increment the threshold
		else:
			user_thresh += thres_incr

	found_images = []

	# for each contour, apply the perspective transform to obtain a top-down view of the image
	for approx in kept_contours:
		# compute the bounding box of the contour and use the
		# bounding box to compute the aspect ratio
		# rect is the list of the 4 corners of the rectangle, in the order top-left, top-right, bottom-right, bottom-left
		rect = np.zeros((4, 2), dtype = np.float32)
		rect[0] = approx[0]
		rect[1] = approx[1]
		rect[2] = approx[2]
		rect[3] = approx[3]

		# display the contours in the image
		if show_rect:
			rected_img = cv2.drawContours(img, [approx], -1, (0, 255, 0), 5)
			found_images.append(rected_img)
			continue

		dst = four_point_transform(img, rect)

		# crop the image
		dst_h, dst_w = dst.shape[:2]
		# if the crop parameter is True, crop the image
		sub_img = dst[crop:dst_h - crop, crop:dst_w - crop]
		# otherwise, keep the whole image
		found_images.append(sub_img)

	# if no image is found, return 0
	return len(found_images), found_images


def autocrop(params: dict) -> None:
	"""
	Autocrop the images in the folder
	:param params: dictionary containing the parameters:
		- thresh: threshold to be used for the detection
		- crop: number of pixels to crop
		- filename: name of the file to be cropped
		- out_path: path to the output folder
		- black: boolean indicating if the background is black
		- rotation: integer indicating the rotation to be applied to the image
		- quality: integer indicating the quality of the output image
	:return: None
	"""
	# get the parameters
	thresh = params['thresh']
	crop = params['crop']
	filename = params['filename']
	out_path = params['out_path']
	black_bg = params['black']
	rotation = params['rotation']
	quality = params['quality']

	print(f"Opening: {filename}")
	# get the name of the file without the folder
	name = Path(filename).stem
	# read the image
	img = cv2.imread(filename)
	# if the background is black, invert the image
	if black_bg:
		img = ~img
	# if the image needs to be rotated, rotate it
	if rotation:
		img = cv2.rotate(img, rotation)

	# add white background (in case one side is cropped right already, otherwise script would fail finding contours)
	img = cv2.copyMakeBorder(img, 100, 100, 100, 100, cv2.BORDER_CONSTANT, value = [255, 255, 255])
	# get the dimensions of the image
	gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

	# get the dimensions of the image
	found, found_images = cont(img, gray, thresh, crop, show_rect = False)

	# if contours were found, save the images
	if found:
		# create the output folder if it does not exist
		if not os.path.exists(out_path):
			os.makedirs(out_path)
		for idx, img in enumerate(found_images):
			print(f"Saveing to: {out_path}/{name}-{idx}.jpg")
			try:
				# if the background is black, invert the image
				if black_bg:
					img = ~img
				cv2.imwrite(f"{out_path}/{name}-{idx}.jpg", img, [int(cv2.IMWRITE_JPEG_QUALITY), quality])
			except IOError as e:
				print(f"{out_path}/{name}-{idx}.jpg cannot be saved")
				return
		# TODO: this is always writing JPEG, no matter what was the input file type, can we detect this?

	else:
		# if no contours were found, write input file to "failed" folder
		print(f"Failed finding any contour. Saving original file to {out_path}/failed/{name}-0.jpg")
		# create the output folder if it does not exist
		if not os.path.exists(f"{out_path}/failed/"):
			os.makedirs(f"{out_path}/failed/")

		# copy the file to the failed folder, preserving the original name
		with open(filename, "rb") as in_f, open(f"{out_path}/failed/{name}-0.jpg", "wb") as out_f:
			# copy the file
			while True:
				# read the file in chunks of 1MB, to avoid memory issues
				buf = in_f.read(1024 ** 2)
				if not buf:
					break
				else:
					out_f.write(buf)


def main():
	parser = argparse.ArgumentParser(
			description = "Crop/Rotate images automatically. Images should be large enough on white background.")
	parser.add_argument("-i", metavar = "INPUT_PATH", default = ".",
	                    help = "Input path. Specify the folder containing the images you want be processed.")
	parser.add_argument("-o", metavar = "OUTPUT_PATH", default = "crop/",
	                    help = "Output path. Specify the folder name to which processed images will be written.")
	parser.add_argument("-r", metavar = "ROTATE", type = int, default = 0,
	                    help = "Rotation value.")
	parser.add_argument("-t", metavar = "THRESHOLD", type = int, default = 200,
	                    help = "Threshold value. Higher values represent less aggressive contour search. \
                                If it's chosen too high, a white border will be introduced")
	parser.add_argument("-c", metavar = "CROP", type = int, default = 0,
	                    help = "Standard extra crop. After crop/rotate often a small white border remains. \
                                This removes this. If it cuts off too much of your image, adjust this.")
	parser.add_argument("-b", "--black", action = "store_true",
	                    help = "Set this if you are using black/very dark (but uniform) backgrounds.")
	parser.add_argument("-q", "--quality", type = int, default = 92,
	                    help = "JPEG quality for output images (Default = 92).")

	parser.add_argument("-p", metavar = "THREADS", type = int, default = None,
	                    help = "Specify the number of threads to be used to process the images in parallel. \
                                If not provided, the script will try to find the value itself \
                                (which doesn't work on Windows or MacOS -> defaults to 1 thread only).")
	parser.add_argument("-s", "--single", action = "store_true",
	                    help = "Process single image. i.e.: -i img.jpg -o crop/")
	args = parser.parse_args()

	in_path = pathlib.PureWindowsPath(
			args.i).as_posix()  # since windows understands posix too: let's convert it to a posix path.
	out_path = pathlib.PureWindowsPath(
			args.o).as_posix()  # (works on all systems and conveniently also removes additional '/' on posix systems)

	thresh = args.t
	crop = args.c
	num_threads = args.p
	single = args.single
	black = args.black
	match args.r:
		case 180:
			rotation = cv2.ROTATE_180
		case 90:
			rotation = cv2.ROTATE_90_CLOCKWISE
		case -90:
			rotation = cv2.ROTATE_90_COUNTERCLOCKWISE
		case 0:
			rotation = None
		case _:
			print("Invalid roation")
			return
	quality = args.quality
	if quality < 0 or quality > 100:
		print("Invalid JPEG quality")
		return

	if not os.path.exists(out_path):
		os.makedirs(out_path)

	files = []

	if not single:
		types = ('*.bmp', '*.BMP', '*.tiff', '*.TIFF', '*.tif', '*.TIF', '*.jpg', '*.JPG', '*.JPEG', '*.jpeg', '*.png',
		         '*.PNG')  # all should work but only .jpg was tested

		for t in types:
			if glob.glob(f"{in_path}/{t}") != []:
				f_l = glob.glob(f"{in_path}/{t}")
				for f in f_l:
					files.append(f)
	else:
		files.append(in_path)

	if len(files) == 0:
		print(f"No image files found in {in_path}\n Exiting.")
	else:
		if num_threads == None:
			try:
				num_threads = len(os.sched_getaffinity(0))
				print(f"Using {num_threads} threads.")
			except:
				print("Automatic thread detection didn't work. Defaulting to 1 thread only. \
                        Please specify the correct number manually via the '-p' argument.")
				num_threads = 1

		params = []
		for f in files:
			params.append({ "thresh"  : thresh,
			                "crop"    : crop,
			                "filename": f,
			                "out_path": out_path,
			                "black"   : black,
			                "rotation": rotation,
			                "quality" : quality })

		with Pool(num_threads) as p:
			results = p.map(autocrop, params)


if __name__ == "__main__":
	# main()
	params = {
		"thresh"  : 210, # the higher, the less aggressive, 0 is most aggressive
		"crop"    : 0, # standard extra crop, after crop/rotate often a small white border remains. This removes this. If it cuts off too much of your image, adjust this.
		"filename": "../input/project (1).jpg", # input file
		"out_path": "../output/", # output folder
		"black"   : False, # set this if you are using black/very dark (but uniform) backgrounds
		"rotation": 0, # rotation value
		"quality" : 100 # jpeg quality for output images
	}
	autocrop(params)
