# image_preprocess.py

import cv2
import imutils
import numpy as np
import torch
from torchvision import transforms


# ------------------- 新增：自适应色彩增强 -------------------
def adaptive_enhance(img):
    hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV).astype(np.float32)
    h, s, v = cv2.split(hsv)

    def stretch(channel):
        min_val, max_val = np.percentile(channel, (2, 98))
        channel = np.clip((channel - min_val) * 255.0 / (max_val - min_val + 1e-5), 0, 255)
        return channel.astype(np.uint8)

    s_adj = stretch(s)
    v_adj = stretch(v)

    hsv_adj = cv2.merge([h.astype(np.uint8), s_adj, v_adj])
    return cv2.cvtColor(hsv_adj, cv2.COLOR_HSV2BGR)


# ------------------- 新增：CLAHE局部直方图均衡化 -------------------
def apply_CLAHE(img):
    yuv = cv2.cvtColor(img, cv2.COLOR_BGR2YUV)
    y, u, v = cv2.split(yuv)
    clahe = cv2.createCLAHE(clipLimit=3.0, tileGridSize=(8, 8))
    y_clahe = clahe.apply(y)
    return cv2.cvtColor(cv2.merge([y_clahe, u, v]), cv2.COLOR_YUV2BGR)


# ------------------- 新增：图像降噪 -------------------
def denoise_image(img):
    return cv2.fastNlMeansDenoisingColored(img, None, 10, 10, 7, 21)

# ------------------- 新增：倾斜文本矫正 -------------------
def correct_skew(image):
    gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    gray = cv2.bitwise_not(gray)
    thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)[1]

    coords = np.column_stack(np.where(thresh > 0))
    if coords.shape[0] == 0:
        return image
    angle = cv2.minAreaRect(coords)[-1]
    if angle < -45:
        angle = -(90 + angle)
    else:
        angle = -angle
    rotated = imutils.rotate_bound(image, angle)
    return rotated

# ------------------- 新增：超分辨率增强 -------------------
def super_resolve(img, scale=2):
    sr = cv2.dnn_superres.DnnSuperResImpl_create()
    # 示例模型路径（需提前下载 https://github.com/Saafke/ESPCN-super-resolution）
    model_path = "ESPCN_x2.pb"
    sr.readModel(model_path)
    sr.setModel("espcn", scale)
    return sr.upsample(img)

# ------------------- 新增：自动边界裁剪 -------------------
def auto_crop(image):
    gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    _, thresh = cv2.threshold(gray, 10, 255, cv2.THRESH_BINARY)
    contours, _ = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
    if not contours:
        return image
    x, y, w, h = cv2.boundingRect(np.vstack(contours))
    return image[y:y + h, x:x + w]


# --------- DeblurGAN类实现 ---------

class DeblurGAN:
    def __init__(self, model_path):
        self.model = torch.load(model_path, map_location='cpu')
        self.model.eval()
        self.transform = transforms.Compose([
            transforms.ToTensor(),
            transforms.Normalize(mean=[0.5, 0.5, 0.5],
                                 std=[0.5, 0.5, 0.5])
        ])

    def deblur(self, img):
        # 输入是BGR，转换到RGB
        img_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
        input_tensor = self.transform(img_rgb).unsqueeze(0)  # (1,3,H,W)
        with torch.no_grad():
            output = self.model(input_tensor)
        output = output.squeeze(0).cpu()
        output_img = output.permute(1, 2, 0).numpy()
        output_img = (output_img * 0.5 + 0.5) * 255
        output_img = np.clip(output_img, 0, 255).astype(np.uint8)
        # 转回BGR
        output_bgr = cv2.cvtColor(output_img, cv2.COLOR_RGB2BGR)
        return output_bgr

# --------- 透视校正函数 ---------

def perspective_correction(img, src_points=None):
    h, w = img.shape[:2]
    if src_points is None:
        # 默认四角，或自行传入检测到的四点
        src_points = np.array([
            [0, 0], [w - 1, 0], [w - 1, h - 1], [0, h - 1]
        ], dtype=np.float32)

    dst_points = np.array([
        [0, 0], [w - 1, 0], [w - 1, h - 1], [0, h - 1]
    ], dtype=np.float32)

    matrix = cv2.getPerspectiveTransform(src_points, dst_points)
    corrected = cv2.warpPerspective(img, matrix, (w, h))
    return corrected



# ------------------- 光照与反光鲁棒性增强（MSRCR） -------------------
def msrcr(img, sigma_list=[15, 80, 250], G=5, b=25, alpha=125, beta=46):
    img = img.astype(np.float32) + 1.0
    retinex = np.zeros_like(img)
    for sigma in sigma_list:
        blur = cv2.GaussianBlur(img, (0, 0), sigma)
        retinex += np.log10(img) - np.log10(blur + 1e-6)
    retinex /= len(sigma_list)

    img_sum = np.sum(img, axis=2, keepdims=True)
    color_restoration = beta * (np.log10(alpha * img) - np.log10(img_sum + 1e-6))

    msrcr_result = G * (retinex * color_restoration + b)
    msrcr_result = np.clip(msrcr_result, 0, 255).astype(np.uint8)
    return msrcr_result


# ------------------- 遮挡与变形文字识别增强 -------------------

# ------------------- 对图像中的文字进行形态学修复，修复断裂或不完整的文字结构，使文字更连贯。 -------------------
def text_morphology_repair(img):
    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    _, binary = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
    kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3))
    closed = cv2.morphologyEx(binary, cv2.MORPH_CLOSE, kernel, iterations=2)
    repaired = cv2.cvtColor(closed, cv2.COLOR_GRAY2BGR)
    return repaired
# ------------------- 对图像中被遮挡或损坏的区域（由掩码mask标识）进行图像修复，使得遮挡区域看起来更自然-------------------

def inpaint_text_regions(img, mask):
    inpainted = cv2.inpaint(img, mask, 3, cv2.INPAINT_TELEA)
    return inpainted


# ------------------- 整体调用接口 -------------------
# 全局DeblurGAN实例（避免多次加载）
_deblurgan_instance = None

def enhance_image(img, use_deblur=False, use_superres=False, apply_perspective=True,
                  enhance_light=True, repair_text=False, inpaint_mask=None):
    global _deblurgan_instance
    if img is None:
        raise ValueError("输入图像为空")

    if use_deblur:
        if _deblurgan_instance is None:
            _deblurgan_instance = DeblurGAN("DeblurGAN_model.pth")  # 替换为你的模型路径
        img = _deblurgan_instance.deblur(img)

    if apply_perspective:
        img = perspective_correction(img)

    if enhance_light:
        img = msrcr(img)

    img = adaptive_enhance(img)
    img = apply_CLAHE(img)
    img = denoise_image(img)
    img = correct_skew(img)
    img = auto_crop(img)

    if repair_text:
        img = text_morphology_repair(img)
        if inpaint_mask is not None:
            img = inpaint_text_regions(img, inpaint_mask)

    if use_superres:
        img = super_resolve(img)

    return img