import sys
sys.path.append(".")


import torch
from torchvision import transforms
from PIL import Image
import numpy as np
import os
import argparse
import time
from tqdm import tqdm
import pandas as pd
import cv2

from UnetCBAM.net import GeneratorWithCBAM as Generator, Discriminator


def test_model(model_path, test_png_dir, output_dir):

    # 加载模型
    checkpoint = torch.load(model_path, map_location=DEVICE)
    netG = Generator().to(DEVICE)
    netD = Discriminator().to(DEVICE)
    netG.load_state_dict(checkpoint['generator'])
    netG.eval()
    
    # 加载测试图像
    transform = transforms.Compose([
        transforms.Resize(256),
        transforms.ToTensor(),
        transforms.Normalize((0.5,), (0.5,))
    ])

    test_png_list = [png for png in os.listdir(test_png_dir) if png.endswith(".png")]
    test_png_list = sorted(test_png_list)
    time_start = time.time()
    for index, png in enumerate(test_png_list):
        test_img_path = os.path.join(test_png_dir, png)
        
        img = Image.open(test_img_path).convert('L')
        tensor = transform(img).unsqueeze(0).to(DEVICE)
        # 生成图像
        with torch.no_grad():
            output = netG(tensor)
        # 后处理
        
        output = output.squeeze().cpu().numpy()
        output = (output > 0).astype(np.uint8) * 255  # 二值化
        
        # 保存结果
        result_img = Image.fromarray(output)
        result_img = result_img.resize((250, 250), Image.NEAREST) 
        output_path = os.path.join(output_dir, png)
        result_img.save(output_path)
        
        img_basename = f"{os.path.basename(test_img_path):10s}"
        progress = f"{index: 5d}/{len(test_png_list):5d}"
        print(f"当前:{img_basename}| 进度:{progress}")
    time_end = time.time()
    print(f"总耗时: {time_end - time_start: 5.2f}s, 单帧耗时: {(time_end - time_start) * 1000 / len(test_png_list): 7.1f}ms")



def evaluate(output_dir, real_dir):
    """批量计算指标并生成报告"""
    
    def binarize_mask(img, threshold=127):
        """将图像转为二值掩膜（0/255）"""
        _, binary = cv2.threshold(img, threshold, 255, cv2.THRESH_BINARY)
        return binary
    
    def extract_boundary(mask):
        """提取边界像素（8连通）"""
        contours, _ = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
        boundary_mask = np.zeros_like(mask)
        cv2.drawContours(boundary_mask, contours, -1, 255, 1)
        return boundary_mask
    
    def calculate_iou(mask_pred, mask_true):
        """计算交并比IoU"""
        intersection = np.logical_and(mask_pred > 0, mask_true > 0).sum()
        union = np.logical_or(mask_pred > 0, mask_true > 0).sum()
        return intersection / max(union, 1e-6)  # 避免除零
    
    def calculate_boundary_f1(mask_pred, mask_true, tolerance=2):
        """计算边界F1分数（基于Hausdorff距离改进）"""
        pred_boundary = extract_boundary(mask_pred)
        true_boundary = extract_boundary(mask_true)
        # 计算匹配像素对
        dist_map = cv2.distanceTransform(255 - true_boundary, cv2.DIST_L2, 3)
        matched_pixels = (pred_boundary > 0) & (dist_map <= tolerance)
        
        precision = matched_pixels.sum() / max((pred_boundary > 0).sum(), 1e-6)
        recall = matched_pixels.sum() / max((true_boundary > 0).sum(), 1e-6)
        return 2 * precision * recall / max(precision + recall, 1e-6)
    
    results = []
    pred_images = [f for f in os.listdir(output_dir) if f.endswith('.png')]
    
    for img_name in tqdm(pred_images, desc='Evaluating'):
        # 加载图像对
        pred = cv2.imread(os.path.join(output_dir, img_name), 0)
        true = cv2.imread(os.path.join(real_dir, img_name), 0)
        # 二值化处理
        pred_mask = binarize_mask(pred)
        true_mask = binarize_mask(true)
        # 计算指标
        iou = calculate_iou(pred_mask, true_mask)
        boundary_f1 = calculate_boundary_f1(pred_mask, true_mask)
        
        results.append({
            'Image': img_name,
            'IoU': round(iou, 4),
            'Boundary_F1': round(boundary_f1, 4),
            'Pass': iou >= 0.85 and boundary_f1 >= 0.90
        })
    
    # 生成统计报告
    df = pd.DataFrame(results)
    summary = {
        'Mean_IoU': df['IoU'].mean(),
        'Mean_Boundary_F1': df['Boundary_F1'].mean(),
        'Pass_Rate': df['Pass'].mean()
    }
    df.to_csv(
        os.path.join(output_dir, 'evaluation_results.csv'), 
        index=False
    )
    print(f"""
        === 综合评价 ===
        平均IoU: {summary['Mean_IoU']:.4f} (目标≥0.85)
        平均边界F1: {summary['Mean_Boundary_F1']:.4f} (目标≥0.90)
        合格率: {summary['Pass_Rate']:.2%}
        """
    )


if __name__ == "__main__":

    DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    parser = argparse.ArgumentParser()
    parser.add_argument(
        "--checkpiont", type=str, default=None, 
        help="Path to checkpoint (e.g., 'UnetCBAM/weights/2023-01-01/pix2pix_checkpoint_1000.pth')"
    )
    parser.add_argument(
        "--real_dir", type=str, default="dataset/real",
        help="Path to real directory (e.g., 'dataset/real')"
    )
    parser.add_argument(
        "--sample_dir", type=str, default="dataset/sample",
        help="Path to sample directory (e.g., 'dataset/sample')"
    )
    parser.add_argument(
        "--output_dir", type=str, default="dataset/output",
        help="Path to output directory (e.g., 'dataset/output')"
    )
    args = parser.parse_args()
    
    os.makedirs(args.output_dir, exist_ok=True)

    print("Test model ...")
    test_model(
        model_path=args.checkpiont,
        test_png_dir=args.sample_dir,
        output_dir=args.output_dir
    )
    print("Evaluate ...")
    evaluate(
        output_dir=args.output_dir, 
        real_dir=args.real_dir
    )