
import cv2
from PIL import Image, ImageOps, PngImagePlugin
from PIL import ImageDraw
from watermarker.marker import add_mark
import numpy as np
import pandas as pd
import random

from eshoputils.http_client import HttpClient
# from eshoputils.iopaint.lama import LaMa
#from eshoputils.iopaint.schema import InpaintRequest
from eshoputils.utils import img2base64, data2base64

# lama_model = LaMa("cpu", **{})
# yolo_model = torch.hub.load('eshoputils/yolov5', 'custom', path='eshoputils/yolov5/best.pt', source='local')
# yolo_model = yolo_model.cpu()

class WaterMarkUtil:
    @staticmethod
    def add_watermark(source_path, target_path,add_text):
        add_mark(file = source_path, out = target_path,
        mark = add_text, opacity=0.4, angle=30, space=300, color = '#43E812')

    @staticmethod
    def pic_process(image):
        # 将图像转换为灰度图像
        gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
        t1 = cv2.getStructuringElement(cv2.MORPH_RECT, (30,9))
        t2 = cv2.getStructuringElement(cv2.MORPH_RECT, (25,8))
        dst1 = cv2.morphologyEx(src=gray, op=cv2.MORPH_DILATE, kernel=t2)
        dst3 = cv2.morphologyEx(src=dst1, op=cv2.MORPH_DILATE, kernel=t2)
        return dst3

    @staticmethod
    def remove_watermark_byapi(path, output):
        b64str = img2base64(path)
        headers = {
            "Content-Type": "application/json"
        }
        #调用水印模型
        resp = HttpClient.post("http://124.222.242.200:19993/process_image",headers = headers,body={"image_base64":b64str}, json=False)
        # b64str2 = data2base64(resp.content)
        # resp = HttpClient.post("http://124.222.242.200:19992/process_image", headers = headers, body={
        #     "image_base64": b64str2,
        #     "prompt": "no watermark, no light colored text, remove transparent text, remove crystal text",
        #     "negative_prompt": "",
        #     "strength": 0.1,
        #     "num_inference_steps": 50
        # }, json=False)
        with open(output, 'wb') as file:
            file.write(resp.content)

    # @staticmethod
    # def find_remove_watermark(path, output):
    #     matplotlib.use('Qt5Agg')
    #     image = Image.open(path).convert('RGB')
    #     ori_image = Image.open(path)
    #     results = yolo_model([image]).pandas().xyxy
    #     if len(results[0]) > 0:
    #         print(f"{path}有水印")
    #         test_wm, boxs = WaterMarkUtil.detect_watermark_from_img_result(image, results[0])
    #         mask = WaterMarkUtil.create_mask(boxs, ori_image)
    #         WaterMarkUtil.remove_watermark_bylama(ori_image, mask, output)
    #     else:
    #         #没找到不需要擦水印
    #         with open(path, 'rb') as s, open(output, 'wb') as t:
    #             t.write(s.read())

    # @staticmethod
    # def torch_gc():
    #     if torch.cuda.is_available():
    #         torch.cuda.empty_cache()
    #         torch.cuda.ipc_collect()
    #     gc.collect()

    @staticmethod
    def concat_alpha_channel(rgb_np_img, alpha_channel) -> np.ndarray:
        if alpha_channel is not None:
            if alpha_channel.shape[:2] != rgb_np_img.shape[:2]:
                alpha_channel = cv2.resize(
                    alpha_channel, dsize=(rgb_np_img.shape[1], rgb_np_img.shape[0])
                )
            rgb_np_img = np.concatenate(
                (rgb_np_img, alpha_channel[:, :, np.newaxis]), axis=-1
            )
        return rgb_np_img



    # @staticmethod
    # def remove_watermark_bylama(img, mask, output):
    #     image, alpha_channel, infos = WaterMarkUtil.get_image_infos(img)
    #     mask, _, _ = WaterMarkUtil.get_image_infos(mask, True)
    #     mask = cv2.threshold(mask, 127, 255, cv2.THRESH_BINARY)[1]
    #
    #     rgb_np_img = lama_model(image, mask, InpaintRequest()).astype(np.uint8)
    #     WaterMarkUtil.torch_gc()
    #
    #     rgb_np_img = cv2.cvtColor(rgb_np_img.astype(np.uint8), cv2.COLOR_BGR2RGB)
    #     rgb_res = WaterMarkUtil.concat_alpha_channel(rgb_np_img, alpha_channel)
    #
    #     WaterMarkUtil.rmimage_to_save(
    #         Image.fromarray(rgb_res),
    #         ext="png",
    #         quality=95,
    #         infos=infos,
    #         path=output
    #     )


    @staticmethod
    def rmimage_to_save(pil_img, ext: str, quality: int = 95, infos={}, path = "") -> bytes:
        with open(path, "wb") as output:
            kwargs = {k: v for k, v in infos.items() if v is not None}
            if ext == "jpg":
                ext = "jpeg"
            if "png" == ext.lower() and "parameters" in kwargs:
                pnginfo_data = PngImagePlugin.PngInfo()
                pnginfo_data.add_text("parameters", kwargs["parameters"])
                kwargs["pnginfo"] = pnginfo_data
            pil_img.save(output, format=ext, quality=quality, **kwargs)


    @staticmethod
    def get_image_infos(image, gray =False):
        alpha_channel = None
        try:
            image = ImageOps.exif_transpose(image)
        except:
            pass
        # exif_transpose will remove exif rotate info，we must call image.info after exif_transpose
        infos = image.info

        if gray:
            np_img = np.array(image)
        else:
            if image.mode == "RGBA":
                np_img = np.array(image)
                alpha_channel = np_img[:, :, -1]
                np_img = cv2.cvtColor(np_img, cv2.COLOR_RGBA2RGB)
            else:
                image = image.convert("RGB")
                np_img = np.array(image)
        return np_img, alpha_channel, infos

    @staticmethod
    def create_mask(boxs, original_image):
        mask = Image.new('L', original_image.size, color=0)
        draw = ImageDraw.Draw(mask)
        for box in boxs:
            point1 = tuple(box[:2])
            point2 = tuple(box[2:])
            draw.rectangle([point1, point2], fill=197)
        #result = Image.composite(original_image, Image.new('RGBA', original_image.size), mask)
        #mask.save(r"C:\Users\18222\Desktop\sm.png")
        return mask

    @staticmethod
    def detect_watermark_from_img_result(img, res, err_ratio=0.05, threshold=0.1):
        res: pd.DataFrame = res.sort_values(by='confidence', ascending=False)
        img_np = np.array(img)
        # 以最高置信度为主，假如有其他大小相当的检测框则合并
        width, height = None, None
        for i, box in res.iterrows():
            w, h = box['xmax'] - box['xmin'], box['ymax'] - box['ymin']
            if width is None:  # first run
                width, height = w, h
                continue
            if w > width * (1 + err_ratio) or w < width * (1 - err_ratio) \
                    or h > height * (1 + err_ratio) or h < height * (1 - err_ratio):
                res.loc[i, 'class'] = 1
            if box['confidence'] < threshold:
                res.loc[i, 'class'] = 1
        res_less = res.drop(index=res[res['class'] == 1].index)
        print("检测结果：\n", res)
        boxes = [list(map(int, i[1:5])) for i in res_less.itertuples()]
        # 假如少于等于5个，直接返回，否则根据多幅图像提取水印
        if len(res) <= 5:
            print("未使用增强")
            # w1, h1, w2, h2 = boxes[0]
            w1, h1, w2, h2 = random.choice(boxes)
            return img_np[h1:h2, w1:w2], boxes
        else:
            print("增强")
            # 把所有子图都resize到相同大小
            wms = []  # watermarks
            for w1, h1, w2, h2 in boxes:
                i = img_np[h1:h2, w1:w2]
                i = Image.fromarray(i).resize((int(width), int(height)))
                wms.append(np.array(i))
            # 增强水印
            wm = WaterMarkUtil.estimate_watermark_from_images(wms)
            return wm, [list(map(int, i[1:5])) for i in res.itertuples()]

    @staticmethod
    def poisson_reconstruct(gradx, grady, kernel_size=3, num_iters=100, h=0.1,
                            boundary_image=None, boundary_zero=True):
        """
        Iterative algorithm for Poisson reconstruction.
        Given the gradx and grady values, find laplacian, and solve for images
        Also return the squared difference of every step.
        h = convergence rate
        """
        fxx = cv2.Sobel(gradx, cv2.CV_64F, 1, 0, ksize=kernel_size)
        fyy = cv2.Sobel(grady, cv2.CV_64F, 0, 1, ksize=kernel_size)
        laplacian = fxx + fyy
        m, n, p = laplacian.shape

        if boundary_zero is True:
            est = np.zeros(laplacian.shape)
        else:
            assert (boundary_image is not None)
            assert (boundary_image.shape == laplacian.shape)
            est = boundary_image.copy()

        est[1:-1, 1:-1, :] = np.random.random((m - 2, n - 2, p))
        loss = []

        for i in range(num_iters):
            old_est = est.copy()
            est[1:-1, 1:-1, :] = 0.25 * (
                    est[0:-2, 1:-1, :] + est[1:-1, 0:-2, :] +
                    est[2:, 1:-1, :] + est[1:-1, 2:, :] -
                    h * h * laplacian[1:-1, 1:-1, :]
            )
            error = np.sum(np.square(est - old_est))
            loss.append(error)
        return est

    @staticmethod
    def estimate_watermark_from_images(imgs: list, enhance: int = 50):
        # 估计水印
        grad_x = list(map(lambda x: cv2.Sobel(x, cv2.CV_64F, 1, 0, ksize=3), imgs))
        grad_y = list(map(lambda x: cv2.Sobel(x, cv2.CV_64F, 0, 1, ksize=3), imgs))
        Wm_x = np.median(np.array(grad_x), axis=0)
        Wm_y = np.median(np.array(grad_y), axis=0)

        est = WaterMarkUtil.poisson_reconstruct(Wm_x, Wm_y)
        # 转换成255的
        est: np.ndarray = 255 * (est - np.min(est)) / (np.max(est) - np.min(est))
        est = est.astype(np.uint8)

        # 寻找增强区域的模版
        channels = []
        for i in range(est.shape[-1]):
            # 二值化
            blur = cv2.GaussianBlur(est[:, :, i], (5, 5), 0)
            ret, th = cv2.threshold(blur, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
            channels.append(th)
        mask = np.zeros_like(channels[0]).astype(bool)
        for c in channels:
            mask = mask | c.astype(bool)
        mask = mask[:, :, np.newaxis].repeat(3, axis=2)
        # 增强
        est = est + enhance * mask
        est: np.ndarray = 255 * (est - np.min(est)) / (np.max(est) - np.min(est))
        est = est.astype(np.uint8)
        return est

    # @staticmethod
    # def find_text_pos(path, find_text):
    #     reader = easyocr.Reader(['ch_sim', 'en'], gpu=False, model_storage_directory='./EasyOCRModel')
    #     img = cv2.imread(path)
    #     result_text = reader.readtext(img)
    #     for detection in result_text:
    #         bbox = detection[0]
    #         bbox = [(int(point[0]), int(point[1])) for point in bbox]
    #         text = detection[1]
    #         precision = detection[2]
    #         if find_text == text or text in find_text:
    #             cv2.polylines(img, [np.array(bbox)], isClosed=True, color=(0, 0, 255), thickness=2)
    #             # 以下是在方框上方加上识别的文字功能
    #             # Put the detected text on the image
    #             # cv2.putText(img,utf8_text.decode('utf-8'), (bbox[0][0], bbox[0][1] - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
    #     cv2.imshow('1', img)
    #     cv2.waitKey(0)
    #     cv2.destroyAllWindows()
