import os
import glob
import argparse
import torch
from typing import List, Deque
from collections import deque
from pprint import pprint
import cv2
import numpy as np


def verbose_log(verbose: bool, message: str):
    """详细日志输出函数，仅在verbose模式下显示"""
    if verbose:
        print(f"[VERBOSE] {message}")
from basicsr.archs.rrdbnet_arch import RRDBNet
from realesrgan import RealESRGANer
from realesrgan.archs.srvgg_arch import SRVGGNetCompact

def list_images_sorted(input_dir: str) -> List[str]:
    exts = ("*.png", "*.jpg", "*.jpeg", "*.bmp", "*.tiff")
    files = []
    for e in exts:
        files.extend(glob.glob(os.path.join(input_dir, e)))
    files.sort()
    return files


def to_yuv_float(img_bgr: np.ndarray):
    # img_bgr uint8 -> float32 [0,1]
    img = img_bgr.astype(np.float32) / 255.0
    yuv = cv2.cvtColor(img, cv2.COLOR_BGR2YUV)
    y, u, v = cv2.split(yuv)
    return y, u, v


def from_yuv_float(y: np.ndarray, u: np.ndarray, v: np.ndarray) -> np.ndarray:
    yuv = cv2.merge([y, u, v])
    bgr = cv2.cvtColor(yuv, cv2.COLOR_YUV2BGR)
    bgr = np.clip(bgr, 0.0, 1.0)
    bgr_u8 = (bgr * 255.0 + 0.5).astype(np.uint8)
    return bgr_u8


def temporal_filter(frames: List[np.ndarray], weights: List[float]) -> np.ndarray:
    # frames: list of 2D arrays (float32), same shape
    # weights: same length, sum not necessarily 1
    wsum = np.sum(weights)
    acc = np.zeros_like(frames[0], dtype=np.float32)
    for f, w in zip(frames, weights):
        acc += f * w
    return acc / (wsum + 1e-8)


def bilateral_filter(channel: np.ndarray, spatial_sigma: float, color_sigma: float, diameter: int = 0) -> np.ndarray:
    # channel float32 [0,1], apply bilateral filter in OpenCV expects [0,255] uint8 or float?
    # OpenCV bilateralFilter supports float32. Let's scale to 0-255 to make sigmaColor meaningful.
    tmp = (channel * 255.0).astype(np.float32)
    # diameter: 0 -> computed from sigmaSpace
    out = cv2.bilateralFilter(tmp, d=diameter, sigmaColor=color_sigma, sigmaSpace=spatial_sigma, borderType=cv2.BORDER_REFLECT)
    out = np.clip(out / 255.0, 0.0, 1.0)
    return out


def build_temporal_weights(radius: int, strength: float) -> List[float]:
    # strength in [0,1]: higher -> stronger denoise -> wider weights and more smoothing
    # Use Gaussian weights with sigma depending on radius and strength
    if radius <= 0:
        return [1.0]
    # sigma smaller -> more emphasis on center, but we want more smoothing at high strength:
    # reduce sigma at low strength, increase sigma at high strength for more uniform averaging
    sigma = max(0.5, radius * (0.5 + strength))
    xs = np.arange(-radius, radius + 1, dtype=np.float32)
    w = np.exp(-0.5 * (xs / sigma) ** 2)
    return w.tolist()


def clamp_strength(x: float, lo=0.0, hi=1.0) -> float:
    return float(max(lo, min(hi, x)))


class TemporalDenoiser:
    def __init__(self, radius_y: int, radius_uv: int, strength: float):
        self.radius_y = radius_y
        self.radius_uv = radius_uv
        self.strength = clamp_strength(strength)
        self.buf_y: Deque[np.ndarray] = deque(maxlen=2 * radius_y + 1 if radius_y > 0 else 1)
        self.buf_u: Deque[np.ndarray] = deque(maxlen=2 * radius_uv + 1 if radius_uv > 0 else 1)
        self.buf_v: Deque[np.ndarray] = deque(maxlen=2 * radius_uv + 1 if radius_uv > 0 else 1)
        self.weights_y = build_temporal_weights(radius_y, self.strength)
        self.weights_uv = build_temporal_weights(radius_uv, self.strength)

    def push(self, y: np.ndarray, u: np.ndarray, v: np.ndarray):
        # Maintain all buffers with same number of frames; assume incoming frames order
        if len(self.buf_y) == 0:
            # Warm start: prefill with the first frame to avoid startup bias
            for _ in range(self.buf_y.maxlen):
                self.buf_y.append(y)
        else:
            self.buf_y.append(y)

        if len(self.buf_u) == 0:
            for _ in range(self.buf_u.maxlen):
                self.buf_u.append(u)
        else:
            self.buf_u.append(u)

        if len(self.buf_v) == 0:
            for _ in range(self.buf_v.maxlen):
                self.buf_v.append(v)
        else:
            self.buf_v.append(v)

    def get(self):
        # If buffer not full yet (beginning), still returns an average of what's present
        y_frames = list(self.buf_y)
        u_frames = list(self.buf_u)
        v_frames = list(self.buf_v)

        wy = self.weights_y
        wuv = self.weights_uv

        # If current buffer smaller than weights (startup), adjust weights
        if len(y_frames) != len(wy):
            wy = build_temporal_weights(radius=(len(y_frames) - 1) // 2, strength=self.strength)
        if len(u_frames) != len(wuv):
            wuv = build_temporal_weights(radius=(len(u_frames) - 1) // 2, strength=self.strength)

        y_out = temporal_filter(y_frames, wy)
        u_out = temporal_filter(u_frames, wuv)
        v_out = temporal_filter(v_frames, wuv)
        return y_out, u_out, v_out


def denoise_frame(y: np.ndarray, u: np.ndarray, v: np.ndarray,
                  denoiser: TemporalDenoiser,
                  uv_spatial_strength: float):
    # Push current frame into temporal buffers and get temporally filtered results
    denoiser.push(y, u, v)
    y_t, u_t, v_t = denoiser.get()

    # UV spatial filter: strength maps to bilateral sigmas
    # Map uv_spatial_strength [0..1] -> sigmaSpace, sigmaColor
    # Choose moderate ranges; you can tune
    sigma_space = 3 + uv_spatial_strength * 7   # 3..10
    sigma_color = 20 + uv_spatial_strength * 80 # 20..100

    u_ts = bilateral_filter(u_t, spatial_sigma=sigma_space, color_sigma=sigma_color)
    v_ts = bilateral_filter(v_t, spatial_sigma=sigma_space, color_sigma=sigma_color)

    return y_t, u_ts, v_ts



def main():
    parser = argparse.ArgumentParser(description="Multi-algorithm video/image-sequence denoise + quality enhancement (YUV temporal/spatial + Real-ESRGAN).")
    parser.add_argument("-i","--input_dir", type=str, required=True, help="Input directory of sequential images.")
    parser.add_argument("-o","--output_dir", type=str, required=True, help="Output directory.")
    parser.add_argument("-s","--strength", type=float, default=0.5, help="Denoise strength in [0,1]. Higher = stronger denoise.")
    parser.add_argument("-n","--no_real_esrgan", default=False, action="store_true", help="Disable real esrgan denoise.")
    parser.add_argument("--radius_y", type=int, default=2, help="Temporal radius for Y channel (frames on each side).")
    parser.add_argument("--radius_uv", type=int, default=1, help="Temporal radius for UV channels.")
    parser.add_argument("--model_path", type=str, default="D:/source/SR/Real-ESRGAN/weights/RealESRGAN_x4plus.pth", help="Path to Real-ESRGAN model.")
    parser.add_argument("--device", type=str, default="cuda", help="Device for Real-ESRGAN: 'cuda' or 'cpu'.")
    parser.add_argument("--fp32", action="store_true", help="Use fp32 (disable half precision) for Real-ESRGAN.")
    parser.add_argument("--tile", type=int, default=256, help="Tile size for Real-ESRGAN (0 means no tiling).")
    parser.add_argument('--tile_pad', type=int, default=10, help='Tile padding')
    parser.add_argument('--pre_pad', type=int, default=0, help='Pre padding size at each border')
    parser.add_argument('-v', '--verbose', action='store_true', help='Enable verbose logging for detailed execution information.')
    args = parser.parse_args()
    if args.verbose:
        pprint(vars(args))
    verbose_log(args.verbose, "=== Real-ESRGAN 图像降噪程序启动 ===")
    verbose_log(args.verbose, f"Python版本: {os.sys.version}")
    verbose_log(args.verbose, f"PyTorch版本: {torch.__version__}")
    verbose_log(args.verbose, f"CUDA可用: {torch.cuda.is_available()}")
    if torch.cuda.is_available():
        verbose_log(args.verbose, f"CUDA版本: {torch.version.cuda}")
        verbose_log(args.verbose, f"GPU设备: {torch.cuda.get_device_name(0)}")

    input_dir = args.input_dir
    output_dir = args.output_dir
    strength = clamp_strength(args.strength)
    radius_y = max(0, args.radius_y)
    radius_uv = max(0, args.radius_uv)
    verbose_log(args.verbose, f"输入目录: {input_dir}")
    verbose_log(args.verbose, f"输出目录: {output_dir}")
    verbose_log(args.verbose, f"降噪强度: {strength}")
    verbose_log(args.verbose, f"Y通道时域半径: {radius_y}")
    verbose_log(args.verbose, f"UV通道时域半径: {radius_uv}")
    verbose_log(args.verbose, f"启用Real-ESRGAN: {not args.no_real_esrgan}")
    
    os.makedirs(output_dir, exist_ok=True)
    verbose_log(args.verbose, f"创建输出目录: {output_dir}")

    files = list_images_sorted(input_dir)
    verbose_log(args.verbose, f"找到 {len(files)} 个图像文件")
    if not files:
        print("No images found in input_dir.")
        return

    # Map strength to temporal radii if user leaves defaults; otherwise use provided radii

    denoiser = TemporalDenoiser(radius_y=radius_y, radius_uv=radius_uv, strength=strength)

    upsampler = None
    model_name = None
    model = None
    netscale = 4
    if args.model_path is not None:
        model_name = os.path.splitext(os.path.basename(args.model_path))[0]
        verbose_log(args.verbose, f"使用模型: {model_name}")
        verbose_log(args.verbose, f"模型路径: {args.model_path}")
        verbose_log(args.verbose, f"设备: {args.device}")
        verbose_log(args.verbose, f"使用FP32: {args.fp32}")
        verbose_log(args.verbose, f"瓦片大小: {args.tile}")
        verbose_log(args.verbose, f"瓦片填充: {args.tile_pad}")
        verbose_log(args.verbose, f"预填充: {args.pre_pad}")
    if model_name == 'RealESRGAN_x4plus':  # x4 RRDBNet model
        model = RRDBNet(num_in_ch=3, num_out_ch=3, num_feat=64, num_block=23, num_grow_ch=32, scale=4)
        netscale = 4
        file_url = ['https://github.com/xinntao/Real-ESRGAN/releases/download/v0.1.0/RealESRGAN_x4plus.pth']
    elif model_name == 'RealESRNet_x4plus':  # x4 RRDBNet model
        model = RRDBNet(num_in_ch=3, num_out_ch=3, num_feat=64, num_block=23, num_grow_ch=32, scale=4)
        netscale = 4
        file_url = ['https://github.com/xinntao/Real-ESRGAN/releases/download/v0.1.1/RealESRNet_x4plus.pth']
    elif model_name == 'RealESRGAN_x4plus_anime_6B':  # x4 RRDBNet model with 6 blocks
        model = RRDBNet(num_in_ch=3, num_out_ch=3, num_feat=64, num_block=6, num_grow_ch=32, scale=4)
        netscale = 4
        file_url = ['https://github.com/xinntao/Real-ESRGAN/releases/download/v****/RealESRGAN_x4plus_anime_6B.pth']
    elif model_name == 'RealESRGAN_x2plus':  # x2 RRDBNet model
        model = RRDBNet(num_in_ch=3, num_out_ch=3, num_feat=64, num_block=23, num_grow_ch=32, scale=2)
        netscale = 2
        file_url = ['https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.1/RealESRGAN_x2plus.pth']
    elif model_name == 'realesr-animevideov3':  # x4 VGG-style model (XS size)
        model = SRVGGNetCompact(num_in_ch=3, num_out_ch=3, num_feat=64, num_conv=16, upscale=4, act_type='prelu')
        netscale = 4
        file_url = ['https://github.com/xinntao/Real-ESRGAN/releases/download/v****/realesr-animevideov3.pth']
    elif model_name == 'realesr-general-x4v3':  # x4 VGG-style model (S size)
        model = SRVGGNetCompact(num_in_ch=3, num_out_ch=3, num_feat=64, num_conv=32, upscale=4, act_type='prelu')
        netscale = 4
        file_url = [
            'https://github.com/xinntao/Real-ESRGAN/releases/download/v****/realesr-general-wdn-x4v3.pth',
            'https://github.com/xinntao/Real-ESRGAN/releases/download/v****/realesr-general-x4v3.pth'
        ]
    else:
        raise ValueError(f'Unsupported model_name: {model_name}')
    try:
        upsampler = RealESRGANer(
            scale=netscale,
            model_path=args.model_path,
            model=model,
            tile=args.tile,
            half=not args.fp32,
            tile_pad=args.tile_pad,
            pre_pad=args.pre_pad,
            device=args.device
        )
    except Exception as e:
        print(f"Failed to initialize Real-ESRGAN: {e}")
        print("Proceeding without enhancement.")
        upsampler = None

    with torch.no_grad():
        for idx, f in enumerate(files):
            verbose_log(args.verbose, f"处理文件 {idx+1}/{len(files)}: {os.path.basename(f)}")
            img_bgr = cv2.imread(f, cv2.IMREAD_COLOR)
            if img_bgr is None:
                print(f"Warning: failed to read {f}, skip.")
                continue
            
            img_shape = img_bgr.shape
            verbose_log(args.verbose, f"图像尺寸: {img_shape}")

            y, u, v = to_yuv_float(img_bgr)
            verbose_log(args.verbose, f"转换为YUV色彩空间完成")

            # 时域 + 空域降噪
            verbose_log(args.verbose, "开始时域+空域降噪处理")
            y_out, u_out, v_out = denoise_frame(
                y, u, v,
                denoiser=denoiser,
                uv_spatial_strength=strength
            )
            verbose_log(args.verbose, "降噪处理完成")

            # 合成并还原到 BGR
            bgr_dn = from_yuv_float(y_out, u_out, v_out)
            out_name = os.path.splitext(os.path.basename(f))[0]
            out_path = os.path.join(output_dir, f"{out_name}.png")
            verbose_log(args.verbose, f"输出文件: {out_path}")
            # Real-ESRGAN 增强（可选）
            if upsampler is not None and args.no_real_esrgan is False:
                verbose_log(args.verbose, "应用Real-ESRGAN增强")
                try:
                    # img_bgr_u8 uint8 HxWx3 (BGR)
                    bgr_out, _ = upsampler.enhance(bgr_dn, outscale=1)
                    cv2.imwrite(out_path, bgr_out)
                    # 显式释放与同步，缓解显存累积
                    del bgr_out
                    if torch.cuda.is_available():
                        torch.cuda.synchronize()
                        torch.cuda.empty_cache()
                        verbose_log(args.verbose, "CUDA显存已清理")
                except Exception as e:
                    print(f"Real-ESRGAN failed on {os.path.basename(f)}: {e}")
                    verbose_log(args.verbose, f"Real-ESRGAN增强失败，使用原始降噪结果")
            else:
                verbose_log(args.verbose, "跳过Real-ESRGAN增强")
                cv2.imwrite(out_path, bgr_dn)
            print(f"[{idx+1}/{len(files)}] -> {out_path}")
            verbose_log(args.verbose, f"文件处理完成: {os.path.basename(f)}")

    verbose_log(args.verbose, f"=== 处理完成，共处理 {len(files)} 个文件 ===")
    print("Done.")


if __name__ == "__main__":
    main()
