import os
import time
import torch
import numpy as np
from sklearn.metrics import f1_score, accuracy_score, precision_recall_curve, auc
from torch import nn
from torch.fx.experimental.migrate_gradual_types.constraint import F
from torch.utils.data import DataLoader
from tqdm import tqdm
from skimage.filters import threshold_otsu
import matplotlib.pyplot as plt

# 从原始代码导入必要组件
from main import (
    config,
    TuSimpleDataset,
    LaneDetectionNet,
    transforms
)


# 定义Focal Loss
class FocalLoss(nn.Module):
    def __init__(self, alpha=0.8, gamma=2.0):
        super().__init__()
        self.alpha = alpha
        self.gamma = gamma

    def forward(self, pred, target):
        bce_loss = F.binary_cross_entropy_with_logits(pred, target, reduction='none')
        pt = torch.exp(-bce_loss)
        focal_loss = self.alpha * (1 - pt) ** self.gamma * bce_loss
        return focal_loss.mean()


# 改进的评估函数
def enhanced_evaluate():
    test_config = {
        'batch_size': 8,
        'device': config['device'],
        'warmup_runs': 10,
        'test_runs': 100,
        'focal_alpha': 0.7,  # 调整正样本权重
        'focal_gamma': 2.0
    }

    # 加载测试集（使用无增强的预处理）
    test_transform = transforms.Compose([
        transforms.Resize(config['input_size']),
        transforms.ToTensor(),
        transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
    ])

    _, test_files = TuSimpleDataset.split_dataset(config['data_root'], test_size=0.2)
    test_set = TuSimpleDataset(config['data_root'], test_files, transform=test_transform)
    test_loader = DataLoader(test_set,
                             batch_size=test_config['batch_size'],
                             num_workers=config['num_workers'],
                             pin_memory=True)

    # 加载模型并应用改进
    model = LaneDetectionNet().to(test_config['device'])
    model.load_state_dict(torch.load('best_model.pth'))
    model.eval()

    def calculate_enhanced_metrics(y_true, y_pred):
        y_true_np = y_true.flatten().cpu().numpy().astype(np.uint8)
        y_pred_np = y_pred.flatten().cpu().numpy()

        # 动态阈值优化
        try:
            thresh = threshold_otsu(y_pred_np)
        except:
            thresh = 0.3  # 默认更低阈值

        y_pred_bin = (y_pred_np > thresh).astype(np.uint8)

        # 计算基础指标
        accuracy = accuracy_score(y_true_np, y_pred_bin)
        f1 = f1_score(y_true_np, y_pred_bin, zero_division=0)

        # 计算PR曲线下面积
        precision, recall, _ = precision_recall_curve(y_true_np, y_pred_np)
        auprc = auc(recall, precision)

        return accuracy, f1, auprc, thresh

    # 指标收集
    metrics = {
        'accuracy': [],
        'f1': [],
        'auprc': [],
        'thresholds': []
    }

    # 特征图可视化函数
    def visualize_feature_maps(image, pred):
        """修复后的特征图可视化函数"""
        # 确保张量在CPU并转换为numpy
        image_np = image.detach().cpu().permute(1, 2, 0).numpy()
        pred_np = pred.detach().squeeze().cpu().numpy()

        # 反归一化图像
        mean = np.array([0.485, 0.456, 0.406])
        std = np.array([0.229, 0.224, 0.225])
        image_np = image_np * std + mean
        image_np = np.clip(image_np, 0, 1)

        # 创建可视化
        plt.figure(figsize=(18, 6))

        plt.subplot(1, 3, 1)
        plt.imshow(image_np)
        plt.title("Input Image")
        plt.axis('off')

        plt.subplot(1, 3, 2)
        plt.imshow(pred_np, cmap='jet')
        plt.title("Prediction Heatmap")
        plt.colorbar()
        plt.axis('off')

        plt.subplot(1, 3, 3)
        plt.hist(pred_np.flatten(), bins=50, range=(0, 1))
        plt.title("Prediction Distribution")
        plt.xlabel("Probability")
        plt.ylabel("Count")

        plt.tight_layout()
        plt.show()

    with torch.no_grad():
        for images, labels in tqdm(test_loader, desc="Enhanced Evaluation"):
            images = images.to(test_config['device'])
            labels = labels.to(test_config['device'])

            outputs = model(images)
            preds = torch.sigmoid(outputs)

            # 可视化首批次预测（添加detach和cpu转换）
            if len(metrics['accuracy']) == 0:
                visualize_feature_maps(
                    images[0].detach().cpu(),  # 显式转移到CPU
                    preds[0].detach().cpu()  # 显式转移到CPU
                )

            # 逐样本计算指标（避免批次平均偏差）
            for i in range(images.size(0)):
                acc, f1, auprc, thresh = calculate_enhanced_metrics(labels[i], preds[i])
                metrics['accuracy'].append(acc)
                metrics['f1'].append(f1)
                metrics['auprc'].append(auprc)
                metrics['thresholds'].append(thresh)

    # 统计结果
    print("\n================ 增强评估结果 ================")
    print(f"平均Accuracy: {np.mean(metrics['accuracy']):.4f} ± {np.std(metrics['accuracy']):.4f}")
    print(f"平均F1 Score: {np.mean(metrics['f1']):.4f} ± {np.std(metrics['f1']):.4f}")
    print(f"平均AUPRC:    {np.mean(metrics['auprc']):.4f}")
    print(f"阈值分布: {np.min(metrics['thresholds']):.2f}~{np.max(metrics['thresholds']):.2f}")
    print("=============================================")

    # 阈值-F1关系分析
    thresholds = np.linspace(0, 1, 11)
    f1_scores = []
    for th in thresholds:
        y_pred_bin = (preds.cpu().numpy() > th).astype(int)
        f1 = f1_score(labels.cpu().numpy().flatten(), y_pred_bin.flatten(), zero_division=0)
        f1_scores.append(f1)

    plt.plot(thresholds, f1_scores, marker='o')
    plt.xlabel("Threshold")
    plt.ylabel("F1 Score")
    plt.title("Threshold vs F1 Score")
    plt.grid()
    plt.show()

 
if __name__ == "__main__":
    enhanced_evaluate()