#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
去水印模型推理脚本
支持单张图像和批量推理
"""

import os
import torch
import torch.nn as nn
from torchvision import transforms
from PIL import Image
import numpy as np
import matplotlib.pyplot as plt
import argparse
import time
from tqdm import tqdm
import json

# 导入网络模型
from watermark_removel import LightweightWatermarkRemover, UltraLightWatermarkRemover


class WatermarkRemovalInference:
    def __init__(self, model_path, model_type='lightweight', device=None):
        """
        初始化推理器
        
        Args:
            model_path: 模型文件路径
            model_type: 模型类型 ('lightweight' 或 'ultra')
            device: 设备 ('cuda' 或 'cpu')
        """
        self.device = device if device else torch.device('cuda' if torch.cuda.is_available() else 'cpu')
        print(f"使用设备: {self.device}")
        
        # 加载模型
        self.model = self.load_model(model_path, model_type)
        
        # 图像预处理
        self.transform = transforms.Compose([
            transforms.Resize((256, 256)),
            transforms.ToTensor(),
            transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])
        ])
        
        # 反归一化
        self.denormalize = transforms.Normalize(
            mean=[-1.0, -1.0, -1.0], 
            std=[2.0, 2.0, 2.0]
        )
        
        print("推理器初始化完成")
    
    def load_model(self, model_path, model_type):
        """加载模型"""
        print(f"加载模型: {model_path}")
        
        if not os.path.exists(model_path):
            raise FileNotFoundError(f"模型文件不存在: {model_path}")
        
        # 加载检查点
        checkpoint = torch.load(model_path, map_location=self.device, weights_only=False)
        
        # 获取模型配置
        if 'config' in checkpoint:
            config = checkpoint['config']
            base_channels = config.get('model', {}).get('base_channels', 32)
        else:
            base_channels = 32
            print("警告: 检查点中未找到配置信息，使用默认参数")
        
        # 创建模型
        if model_type == 'lightweight':
            model = LightweightWatermarkRemover(base_channels=base_channels)
        elif model_type == 'ultra':
            model = UltraLightWatermarkRemover(base_channels=base_channels)
        else:
            raise ValueError(f"不支持的模型类型: {model_type}")
        
        # 加载权重
        model.load_state_dict(checkpoint['model_state_dict'])
        model.to(self.device)
        model.eval()
        
        print(f"模型加载完成 - 参数量: {sum(p.numel() for p in model.parameters()):,}")
        return model
    
    def preprocess_image(self, image_path):
        """预处理图像"""
        if isinstance(image_path, str):
            image = Image.open(image_path).convert('RGB')
        else:
            image = image_path  # 已经是PIL Image对象
        
        # 保存原始尺寸
        original_size = image.size
        
        # 预处理
        tensor = self.transform(image).unsqueeze(0).to(self.device)
        
        return tensor, original_size
    
    def postprocess_image(self, tensor, original_size):
        """后处理图像"""
        # 反归一化
        tensor = self.denormalize(tensor.squeeze(0).cpu())
        tensor = torch.clamp(tensor, 0, 1)
        
        # 转换为PIL图像
        image = transforms.ToPILImage()(tensor)
        
        # 恢复原始尺寸
        image = image.resize(original_size, Image.LANCZOS)
        
        return image
    
    def inference_single(self, image_path, save_path=None):
        """
        单张图像推理
        
        Args:
            image_path: 输入图像路径
            save_path: 保存路径（可选）
        
        Returns:
            removed_image: 去水印后的PIL图像
        """
        print(f"处理图像: {image_path}")
        
        # 预处理
        input_tensor, original_size = self.preprocess_image(image_path)
        
        # 推理
        with torch.no_grad():
            start_time = time.time()
            output_tensor = self.model(input_tensor)
            inference_time = time.time() - start_time
        
        # 后处理
        removed_image = self.postprocess_image(output_tensor, original_size)
        
        # 保存结果
        if save_path:
            os.makedirs(os.path.dirname(save_path), exist_ok=True)
            removed_image.save(save_path)
            print(f"结果已保存: {save_path}")
        
        print(f"推理时间: {inference_time:.3f}秒")
        
        return removed_image
    
    def inference_batch(self, input_dir, output_dir, compare_dir=None):
        """
        批量推理
        
        Args:
            input_dir: 输入目录
            output_dir: 输出目录
            compare_dir: 对比原图目录（可选）
        """
        print(f"批量推理: {input_dir} -> {output_dir}")
        
        os.makedirs(output_dir, exist_ok=True)
        
        # 获取所有图像文件
        image_files = []
        for ext in ['*.jpg', '*.jpeg', '*.png', '*.bmp']:
            image_files.extend([f for f in os.listdir(input_dir) 
                              if f.lower().endswith(ext.replace('*', ''))])
        
        if not image_files:
            print("未找到图像文件")
            return
        
        print(f"找到 {len(image_files)} 个图像文件")
        
        # 处理统计
        total_time = 0
        results = []
        
        for filename in tqdm(image_files, desc="批量处理"):
            input_path = os.path.join(input_dir, filename)
            output_path = os.path.join(output_dir, filename)
            
            try:
                # 推理
                start_time = time.time()
                removed_image = self.inference_single(input_path, output_path)
                processing_time = time.time() - start_time
                total_time += processing_time
                
                # 记录结果
                result = {
                    'filename': filename,
                    'processing_time': processing_time,
                    'success': True
                }
                
                # 如果有对比图像，计算质量指标
                if compare_dir:
                    compare_path = os.path.join(compare_dir, filename)
                    if os.path.exists(compare_path):
                        quality_metrics = self.calculate_quality_metrics(
                            removed_image, compare_path
                        )
                        result.update(quality_metrics)
                
                results.append(result)
                
            except Exception as e:
                print(f"处理失败 {filename}: {e}")
                results.append({
                    'filename': filename,
                    'processing_time': 0,
                    'success': False,
                    'error': str(e)
                })
        
        # 保存处理报告
        self.save_batch_report(results, output_dir, total_time)
        
        print(f"批量处理完成，总计时间: {total_time:.2f}秒")
    
    def calculate_quality_metrics(self, removed_image, original_path):
        """计算图像质量指标"""
        try:
            original_image = Image.open(original_path).convert('RGB')
            
            # 确保尺寸一致
            if removed_image.size != original_image.size:
                removed_image = removed_image.resize(original_image.size, Image.LANCZOS)
            
            # 转换为numpy数组
            removed_np = np.array(removed_image).astype(np.float32) / 255.0
            original_np = np.array(original_image).astype(np.float32) / 255.0
            
            # 计算MSE
            mse = np.mean((removed_np - original_np) ** 2)
            
            # 计算PSNR
            if mse == 0:
                psnr = float('inf')
            else:
                psnr = 20 * np.log10(1.0 / np.sqrt(mse))
            
            # 计算SSIM (简化版本)
            def ssim(img1, img2):
                mu1 = np.mean(img1)
                mu2 = np.mean(img2)
                
                sigma1_sq = np.var(img1)
                sigma2_sq = np.var(img2)
                sigma12 = np.mean((img1 - mu1) * (img2 - mu2))
                
                c1 = (0.01) ** 2
                c2 = (0.03) ** 2
                
                ssim_score = ((2 * mu1 * mu2 + c1) * (2 * sigma12 + c2)) / \
                           ((mu1 ** 2 + mu2 ** 2 + c1) * (sigma1_sq + sigma2_sq + c2))
                
                return ssim_score
            
            # 计算各通道SSIM的平均值
            ssim_score = np.mean([ssim(removed_np[:,:,i], original_np[:,:,i]) 
                                for i in range(3)])
            
            return {
                'mse': float(mse),
                'psnr': float(psnr),
                'ssim': float(ssim_score)
            }
            
        except Exception as e:
            print(f"质量指标计算失败: {e}")
            return {}
    
    def save_batch_report(self, results, output_dir, total_time):
        """保存批量处理报告"""
        report_path = os.path.join(output_dir, 'processing_report.json')
        
        # 计算统计信息
        successful_count = sum(1 for r in results if r['success'])
        failed_count = len(results) - successful_count
        avg_time = total_time / len(results) if results else 0
        
        # 质量指标统计
        quality_stats = {}
        quality_results = [r for r in results if r['success'] and 'mse' in r]
        
        if quality_results:
            for metric in ['mse', 'psnr', 'ssim']:
                values = [r[metric] for r in quality_results if metric in r]
                if values:
                    quality_stats[metric] = {
                        'mean': np.mean(values),
                        'std': np.std(values),
                        'min': np.min(values),
                        'max': np.max(values)
                    }
        
        report = {
            'summary': {
                'total_images': len(results),
                'successful': successful_count,
                'failed': failed_count,
                'total_time': total_time,
                'average_time': avg_time
            },
            'quality_statistics': quality_stats,
            'detailed_results': results
        }
        
        with open(report_path, 'w') as f:
            json.dump(report, f, indent=2)
        
        print(f"处理报告已保存: {report_path}")
    
    def create_comparison_plot(self, watermarked_path, removed_path, original_path=None, save_path=None):
        """创建对比图"""
        fig_width = 15 if original_path else 10
        cols = 3 if original_path else 2
        
        fig, axes = plt.subplots(1, cols, figsize=(fig_width, 5))
        
        # 加载图像
        watermarked_img = Image.open(watermarked_path).convert('RGB')
        removed_img = Image.open(removed_path).convert('RGB')
        
        # 显示水印图像
        axes[0].imshow(watermarked_img)
        axes[0].set_title('Watermarked Image')
        axes[0].axis('off')
        
        # 显示去水印图像
        axes[1].imshow(removed_img)
        axes[1].set_title('Removed Image')
        axes[1].axis('off')
        
        # 如果有原图，显示对比
        if original_path and os.path.exists(original_path):
            original_img = Image.open(original_path).convert('RGB')
            axes[2].imshow(original_img)
            axes[2].set_title('Original Image')
            axes[2].axis('off')
            
            # 计算质量指标
            quality_metrics = self.calculate_quality_metrics(removed_img, original_path)
            if quality_metrics:
                title = f"Quality Metrics:\n"
                title += f"PSNR: {quality_metrics.get('psnr', 0):.2f} dB\n"
                title += f"SSIM: {quality_metrics.get('ssim', 0):.4f}"
                fig.suptitle(title, fontsize=12)
        
        plt.tight_layout()
        
        if save_path:
            plt.savefig(save_path, dpi=150, bbox_inches='tight')
            print(f"对比图已保存: {save_path}")
        
        plt.show()
    
    def benchmark_speed(self, test_image_path, num_runs=10):
        """性能基准测试"""
        print(f"性能测试 - 运行 {num_runs} 次")
        
        input_tensor, _ = self.preprocess_image(test_image_path)
        
        # 预热
        with torch.no_grad():
            for _ in range(3):
                _ = self.model(input_tensor)
        
        # 计时测试
        times = []
        with torch.no_grad():
            for i in range(num_runs):
                start_time = time.time()
                output = self.model(input_tensor)
                end_time = time.time()
                times.append(end_time - start_time)
                
                if self.device.type == 'cuda':
                    torch.cuda.synchronize()
        
        # 统计结果
        avg_time = np.mean(times)
        std_time = np.std(times)
        min_time = np.min(times)
        max_time = np.max(times)
        
        print(f"性能统计:")
        print(f"  平均时间: {avg_time:.4f}s")
        print(f"  标准差: {std_time:.4f}s")
        print(f"  最小时间: {min_time:.4f}s")
        print(f"  最大时间: {max_time:.4f}s")
        print(f"  FPS: {1.0/avg_time:.2f}")
        
        return {
            'avg_time': avg_time,
            'std_time': std_time,
            'min_time': min_time,
            'max_time': max_time,
            'fps': 1.0/avg_time
        }


def main(input_path):
    parser = argparse.ArgumentParser(description='去水印模型推理')
    parser.add_argument('--model', default='D:/deepfake/SepMark-main/checkpoints/best_removal_model.pth',
                        help='模型文件路径')

    # 输入路径保持不变
    parser.add_argument('--input', default=input_path, help='输入图像/目录路径')

    # 修改 output 和 compare 路径构建方式
    filename = os.path.splitext(os.path.basename(input_path))[0]  # 获取不带后缀的文件名
    output_dir = "Inference-outputs"
    compare_dir = "Inference-compare"

    # 构建输出路径
    parser.add_argument('--output', default=os.path.join(output_dir, f"{filename}_removed.jpg"),
                        help='输出图像/目录路径')
    parser.add_argument('--compare', default=os.path.join(compare_dir, f"{filename}_compare.jpg"),
                        help='对比原图目录（可选）')

    parser.add_argument('--model_type', default='lightweight',
                        choices=['lightweight', 'ultra'], help='模型类型')
    parser.add_argument('--device', choices=['cuda', 'cpu'], help='设备选择')
    parser.add_argument('--benchmark', action='store_true', help='运行性能测试')
    parser.add_argument('--plot', action='store_true', help='生成对比图')

    args = parser.parse_args()

    # 创建推理器
    inferencer = WatermarkRemovalInference(
        model_path=args.model,
        model_type=args.model_type,
        device=args.device
    )
    
    # 判断是单张图像还是批量处理
    if os.path.isfile(args.input):
        # 单张图像推理
        print("单张图像推理模式")
        removed_image = inferencer.inference_single(args.input, args.output)
        
        # 创建对比图
        if True:
            plot_path = args.output.replace('.jpg', '_comparison.jpg').replace('.png', '_comparison.png')
            inferencer.create_comparison_plot(
                args.input, args.output, args.compare, plot_path
            )
            print(f"对比图已保存: {plot_path}")
        
        # 性能测试
        if args.benchmark:
            inferencer.benchmark_speed(args.input)
            
    elif os.path.isdir(args.input):
        # 批量推理
        print("批量推理模式")
        inferencer.inference_batch(args.input, args.output, args.compare)
        
    else:
        print(f"错误: 输入路径不存在或无效: {args.input}")


if __name__ == '__main__':
    main(input_path="D:\deepfake\CelebAMask-HQ\CelebAMask-HQ\test")