import cv2 
import numpy as np 
import os 
from tqdm import tqdm 
import random 

class DisplayDegradationSimulator: 
    """模拟LED显示特性的退化生成器"""
    
    def __init__(self): 
        self.degradation_types = [ 
            'motion_blur', 'color_shift', 'low_bit_depth', 
            'compression_artifacts', 'nonuniform_brightness' 
        ] 
    
    def simulate_color_shift(self, image, temperature_shift=1000): 
        """模拟色温偏移 - 基于低灰校正经验"""
        # 将BGR转换为LAB颜色空间 
        lab = cv2.cvtColor(image, cv2.COLOR_BGR2LAB) 
        
        # 模拟色温偏移（暖色或冷色） 
        if temperature_shift > 0: 
            # 向暖色偏移（增加红色，减少蓝色） 
            lab[:, :, 2] = np.clip(lab[:, :, 2] + random.randint(5, 15), 0, 255) 
        else: 
            # 向冷色偏移（增加蓝色，减少红色） 
            lab[:, :, 2] = np.clip(lab[:, :, 2] - random.randint(5, 15), 0, 255) 
        
        return cv2.cvtColor(lab, cv2.COLOR_LAB2BGR) 
    
    def simulate_low_bit_depth(self, image, target_bits=6): 
        """模拟低比特深度 - 基于灰度级数经验"""
        max_val = 2**target_bits - 1 
        scale = 255.0 / max_val 
        
        # 量化到低比特深度 
        quantized = (image / scale).astype(np.uint8) 
        # 重新缩放回8比特 
        restored = (quantized * scale).astype(np.uint8) 
        
        return restored 
    
    def simulate_nonuniform_brightness(self, image, nonuniformity_level=0.2): 
        """模拟亮度不均匀性 - 基于均匀性校正经验"""
        h, w = image.shape[:2] 
        
        # 创建亮度渐变（模拟LED屏亮度衰减） 
        x = np.linspace(-1, 1, w) 
        y = np.linspace(-1, 1, h) 
        xx, yy = np.meshgrid(x, y) 
        
        # 径向亮度衰减 
        radius = np.sqrt(xx**2 + yy**2) 
        brightness_map = 1 - nonuniformity_level * radius 
        brightness_map = np.clip(brightness_map, 0.7, 1.0) 
        
        # 应用亮度不均匀性 
        result = image.astype(np.float32) * brightness_map[:, :, np.newaxis] 
        return np.clip(result, 0, 255).astype(np.uint8) 
    
    def simulate_compression_artifacts(self, image, quality=20): 
        """模拟压缩伪影 - 基于传输优化经验"""
        encode_param = [int(cv2.IMWRITE_JPEG_QUALITY), quality] 
        result, encimg = cv2.imencode('.jpg', image, encode_param) 
        decoded = cv2.imdecode(encimg, 1) 
        return decoded 
    
    def simulate_motion_blur(self, image, kernel_size=15): 
        """模拟运动模糊 - 基于高刷新率经验"""
        # 创建运动模糊核 
        kernel = np.zeros((kernel_size, kernel_size)) 
        kernel[int((kernel_size-1)/2), :] = np.ones(kernel_size) 
        kernel = kernel / kernel_size 
        
        blurred = cv2.filter2D(image, -1, kernel) 
        return blurred 
    
    def apply_comprehensive_degradation(self, hr_image): 
        """应用综合退化 - 模拟真实LED显示场景"""
        degraded = hr_image.copy() 
        
        # 随机选择和应用多种退化 
        degradation_sequence = random.sample(self.degradation_types, 
                                            random.randint(2, 4)) 
        
        for deg_type in degradation_sequence: 
            if deg_type == 'motion_blur' and random.random() > 0.3: 
                degraded = self.simulate_motion_blur(degraded, 
                                                    random.randint(10, 20)) 
            elif deg_type == 'color_shift': 
                temp_shift = random.choice([-800, -500, 500, 800]) 
                degraded = self.simulate_color_shift(degraded, temp_shift) 
            elif deg_type == 'low_bit_depth': 
                bits = random.choice([5, 6, 7]) 
                degraded = self.simulate_low_bit_depth(degraded, bits) 
            elif deg_type == 'compression_artifacts': 
                quality = random.randint(15, 35) 
                degraded = self.simulate_compression_artifacts(degraded, quality) 
            elif deg_type == 'nonuniform_brightness': 
                level = random.uniform(0.1, 0.3) 
                degraded = self.simulate_nonuniform_brightness(degraded, level) 
        
        # 最后进行分辨率降级（模拟物理限制） 
        h, w = degraded.shape[:2] 
        lr = cv2.resize(degraded, (w//4, h//4), interpolation=cv2.INTER_CUBIC) 
        final_degraded = cv2.resize(lr, (w, h), interpolation=cv2.INTER_CUBIC) 
        
        return final_degraded 

def create_display_characteristic_dataset(): 
    """创建显示特性数据集"""
    simulator = DisplayDegradationSimulator() 
    
    # 使用已有的DIV2K数据（解压后）
    # 如果DIV2K_valid_HR.zip已解压，使用解压后的目录
    # 否则可以使用REDS数据集的样本
    
    # 尝试使用DIV2K验证集
    input_dir = "datasets/REDS/DIV2K_valid_HR"
    
    # 如果DIV2K目录不存在，使用REDS的train_samples
    if not os.path.exists(input_dir):
        input_dir = "datasets/REDS/train_samples"
        # 如果REDS的train_samples也不存在，尝试从zip解压DIV2K
        if not os.path.exists(input_dir):
            zip_path = "datasets/REDS/DIV2K_valid_HR.zip"
            if os.path.exists(zip_path):
                print(f"解压DIV2K数据集: {zip_path}")
                os.makedirs("datasets/REDS/DIV2K_valid_HR", exist_ok=True)
                # 解压逻辑可以根据需要添加
                # 这里假设已经解压或手动解压
                input_dir = "datasets/REDS/DIV2K_valid_HR"
            else:
                print("未找到有效数据源，请先准备高质量图像数据集")
                return
    
    output_hr_dir = "datasets/display_dataset/HR"
    output_lr_dir = "datasets/display_dataset/LR"
    
    os.makedirs(output_hr_dir, exist_ok=True)
    os.makedirs(output_lr_dir, exist_ok=True)
    
    # 收集所有图像文件，支持递归查找子文件夹
    image_files = []
    for root, _, files in os.walk(input_dir):
        for file in files:
            if file.lower().endswith(('.png', '.jpg', '.jpeg')):
                image_files.append(os.path.join(root, file))
    
    if not image_files:
        print(f"在 {input_dir} 中未找到图像文件")
        return
    
    # 限制处理的图像数量，避免处理过多
    max_images = 50  # 可以根据需要调整
    image_files = image_files[:max_images]
    
    print(f"找到 {len(image_files)} 个图像文件进行处理")
    
    for img_path in tqdm(image_files, desc="生成显示特性数据集"):
        # 使用文件名作为保存的基础名
        img_file = os.path.basename(img_path)
        hr_image = cv2.imread(img_path)
        
        if hr_image is None:
            print(f"无法读取图像: {img_path}")
            continue
        
        # 生成退化版本
        lr_image = simulator.apply_comprehensive_degradation(hr_image)
        
        # 保存结果
        cv2.imwrite(os.path.join(output_hr_dir, img_file), hr_image)
        cv2.imwrite(os.path.join(output_lr_dir, img_file), lr_image)
    
    print(f"数据集生成完成！保存在 {output_hr_dir} 和 {output_lr_dir}")

if __name__ == "__main__":
    create_display_characteristic_dataset()