import cv2
import logging
import imutils
import numpy as np
import base64
from keras.models import load_model
from vision_extract import VisionExtract

logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)


class ModelDocLocalisation(VisionExtract):

    def __init__(self, doc_localisation_model):
        VisionExtract.__init__(self)
        self.doc_localisation_model = doc_localisation_model

    def get_mask(self, img):
        """
        To generate the mask from the image
        :param img: (np array) the raw RGB image
        :return: opt : (np array) the generated mask corresponding to image
        """
        img = cv2.resize(img, (256, 256), interpolation=cv2.INTER_AREA)
        img = (img - 127.5) / 127.5
        opt = self.doc_localisation_model.predict(img.reshape(1, 256, 256, 3))
        opt = (opt + 1) / 2.0
        opt = opt.reshape(256, 256, 3)
        opt = opt * 255
        opt = opt.astype(np.uint8)
        return opt

    def order_points(self, pts):
        """
        To arrange the points received in order to form a proper rectangle
        :param pts: (np array) detected four pair of points (4,2)
        :return: rect : (np array) arranged points
        """
        rect = np.zeros((4, 2), dtype="float32")
        s = pts.sum(axis=1)
        rect[0] = pts[np.argmin(s)]
        rect[2] = pts[np.argmax(s)]
        diff = np.diff(pts, axis=1)
        rect[1] = pts[np.argmin(diff)]
        rect[3] = pts[np.argmax(diff)]
        return rect

    def four_point_transform(self, image, pts):
        """
        Performs point correction and make image straight
        :param image: (np array) raw RGB image of document
        :param pts: (np array) detected four pair of points (4,2)
        :return: warped : (np array) corrected and localized image
        """
        rect = self.order_points(pts)
        (tl, tr, br, bl) = rect
        width_a = np.sqrt(((br[0] - bl[0]) ** 2) + ((br[1] - bl[1]) ** 2))
        width_b = np.sqrt(((tr[0] - tl[0]) ** 2) + ((tr[1] - tl[1]) ** 2))
        max_width = max(int(width_a), int(width_b))
        height_a = np.sqrt(((tr[0] - br[0]) ** 2) + ((tr[1] - br[1]) ** 2))
        height_b = np.sqrt(((tl[0] - bl[0]) ** 2) + ((tl[1] - bl[1]) ** 2))
        max_height = max(int(height_a), int(height_b))
        dst = np.array([
            [0, 0],
            [max_width - 1, 0],
            [max_width - 1, max_height - 1],
            [0, max_height - 1]], dtype="float32")
        m = cv2.getPerspectiveTransform(rect, dst)
        warped = cv2.warpPerspective(image, m, (max_width, max_height))
        return warped

    def apply_transform(self, mask, image):
        """
        Finds largest contour, extracts ROI and corrects the image orientation
        :param mask: (np array) generated mask from GAN model
        :param image: (np array) raw RGB image of document
        :return: warp : (np array) corrected and localized image
        """
        img = image.copy()
        orig = img.copy()
        _, thresh = cv2.threshold(mask, 200, 255, cv2.THRESH_BINARY)
        cnts = cv2.findContours(
            thresh.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
        cnts = imutils.grab_contours(cnts)
        cnts = sorted(cnts, key=cv2.contourArea, reverse=True)
        if len(cnts) != 0:
            i = 1
            max_area = cv2.contourArea(cnts[0])
            while i < len(cnts):
                if cv2.contourArea(cnts[i]) > 0.7 * max_area:
                    i += 1
                else:
                    break
            cnts = cnts[0:i]

        scr = []
        warp = []
        if len(cnts) > 0:
            for c in cnts:
                peri = cv2.arcLength(c, True)
                approx = cv2.approxPolyDP(c, 0.1 * peri, True)
                if len(approx) == 4:
                    scr.append(approx)
            if len(scr) > 0:
                for s in scr:
                    cv2.drawContours(img, [s], -1, (0, 255, 0), 2)
                    warp.append(self.four_point_transform(
                        orig, s.reshape(4, 2)))
                return warp
        return []

    def mod_mask(self, mask):
        """
        Processes mask and smoothens the edges for proper FPT
        :param mask: (np array) Grayscale mask of detected document
        :return: mask : (np array) smoothened mask
        """
        mask = cv2.cvtColor(mask, cv2.COLOR_BGR2GRAY)
        _, thresh = cv2.threshold(mask, 200, 255, cv2.THRESH_BINARY)
        cnts = cv2.findContours(
            thresh.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
        cnts = imutils.grab_contours(cnts)
        cnts = sorted(cnts, key=cv2.contourArea, reverse=True)
        for c in cnts:
            hull = cv2.convexHull(np.array(c))
            cv2.drawContours(mask, [hull], -1, (255, 255, 255), -1)
        return mask

    def process(self, image, border = 5):
        """
        Processes image, receives mask and transforms the image to the oriented format
        :param image: (np array) raw RGB image of document
        :return: transformed_image : (np array) corrected and localized image
        """
        mask = self.get_mask(image)
        mask = cv2.resize(
            mask, (image.shape[1], image.shape[0]), interpolation=cv2.INTER_AREA)
        mask = self.mod_mask(mask)
        mask[:border, :] = 0
        mask[-border:, :] = 0
        mask[:, :border] = 0
        mask[:, -border:] = 0
        transformed_image = self.apply_transform(image=image, mask=mask)
        return transformed_image, mask

    def get_doc_mask(self, doc_url: str):
        """
        Takes the DOC-Url and return the masked doc

        :param doc_url: (str) The document url
        :return: masked_images: (list) base64 images
        """
        masked_images = []
        images_b64 = self.image_load(doc_url)
        images = []
        for b64_image in images_b64:
            img_base64 = base64.b64decode(b64_image)
            img_str = np.fromstring(img_base64, np.uint8)
            images.append(cv2.imdecode(img_str, cv2.IMREAD_COLOR))

        for image in images:
            opt, _ = self.process(image)
            masked_images.extend(opt)

        for i in range(len(masked_images)):
            _, buffer = cv2.imencode('.png', masked_images[i])
            masked_images[i] = base64.b64encode(buffer).decode('utf-8')

        return {'localised_images': masked_images}
