import os
import cv2
import numpy as np
import torch
import math
from DHsystem.algorithms.DehazeNet.net import DehazeNet
import torchvision.transforms as transforms

def process_image(input_path, output_dir):
    # 加载模型
    model = DehazeNet()
    model_path = os.path.join(os.path.dirname(__file__), 'weights', 'best_indoor.pth')

    # 关键修改：加载 Checkpoint 并提取 state_dict
    checkpoint = torch.load(model_path)
    model.load_state_dict(checkpoint['state_dict'])

    model.eval()
    model = model.cuda()

    # 读取图像
    img = cv2.imread(input_path)
    if img is None:
        raise ValueError(f"无法读取图像: {input_path}")

    h_orig, w_orig = img.shape[:2]
    required_reduction = 15


    pad_h = ((h_orig+required_reduction+15) // 16 * 16 - h_orig)
    pad_w = ((w_orig + required_reduction + 15) // 16 * 16 - w_orig)
    padded = cv2.copyMakeBorder(img, 0, pad_h, 0, pad_w, cv2.BORDER_REFLECT)

    # 图像预处理
    transform = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5))
    ])
    tensor_in = transform(padded).unsqueeze(0).cuda()

    # 处理图像
    with torch.no_grad():
        te = model(tensor_in).cpu().numpy().squeeze()

    # 裁剪透射率图
    t = te[:h_orig, :w_orig]

    # 透射率细化
    t_refined = TransmissionRefine(img, t)

    # 计算大气光
    dark = DarkChannel(img, sz=15)
    A = AtmLight(img, dark)

    # 图像恢复
    result = Recover(img, t_refined, A)

    # 后处理
    result = enhance_pipeline(result)

    # 保存处理后的图像
    filename = os.path.basename(input_path)
    output_filename = f"processed_{filename}"
    output_path = os.path.join(output_dir, output_filename)
    cv2.imwrite(output_path, result)

    return output_filename


# 以下函数从 pre_trans.py 中提取
def DarkChannel(im, sz):
    b, g, r = cv2.split(im)
    dc = cv2.min(cv2.min(r, g), b)
    kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (sz, sz))
    return cv2.erode(dc, kernel)


def TransmissionRefine(im, et):
    gray = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
    gray = np.float64(gray) / 255
    return Guidedfilter(gray, et, 50, 0.001)


def Guidedfilter(im, p, r, eps):
    mean_I = cv2.boxFilter(im, cv2.CV_64F, (r, r))
    mean_p = cv2.boxFilter(p, cv2.CV_64F, (r, r))
    mean_Ip = cv2.boxFilter(im * p, cv2.CV_64F, (r, r))
    cov_Ip = mean_Ip - mean_I * mean_p

    mean_II = cv2.boxFilter(im * im, cv2.CV_64F, (r, r))
    var_I = mean_II - mean_I * mean_I

    a = cov_Ip / (var_I + eps)
    b = mean_p - a * mean_I

    mean_a = cv2.boxFilter(a, cv2.CV_64F, (r, r))
    mean_b = cv2.boxFilter(b, cv2.CV_64F, (r, r))

    q = mean_a * im + mean_b
    return q


def Recover(im, t, A):
    t = np.clip(t, 0.1, 1.0)
    return np.clip((im - A) / t[..., np.newaxis] + A, 0, 255).astype(np.uint8)


def enhance_pipeline(img):
    img = cv2.bilateralFilter(img, d=15, sigmaColor=50, sigmaSpace=50)
    lab = cv2.cvtColor(img, cv2.COLOR_BGR2LAB)
    clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(16, 16))
    lab[:, :, 0] = clahe.apply(lab[:, :, 0])
    img = cv2.cvtColor(lab, cv2.COLOR_LAB2BGR)
    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    avg_brightness = np.mean(gray)
    alpha = 1.8 if avg_brightness < 85 else (1.4 if avg_brightness < 170 else 1.1)
    return cv2.convertScaleAbs(img, alpha=alpha, beta=0)


def AtmLight(im, dark):
    h, w = im.shape[:2]
    imsz = h * w
    numpx = int(max(math.floor(imsz / 1000), 1))
    darkvec = dark.reshape(imsz)
    imvec = im.reshape(imsz, 3)
    indices = np.argpartition(darkvec, -numpx)[-numpx:]
    atmsum = np.mean(imvec[indices], axis=0)
    return atmsum.reshape(1, 3)


