# train_unet.py
from sklearn.metrics import confusion_matrix
import torch
import torch.nn as nn
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from tqdm import tqdm
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import roc_curve
from torch.utils.data import DataLoader
import albumentations as A
from albumentations.pytorch.transforms import ToTensorV2
import os
from data_read import mf_data_seg_A
from u_net_mf import U_Net
from loss_function_mf2 import generate_auxiliary_points
import cv2
from torch import sigmoid
class UNetTrainer:
    def __init__(self, recorder, device):
        self.recorder = recorder
        self.device = device
    
    def setup_data(self, root_path, batch_size=5):
        """设置数据加载器"""
        data_trans_train = A.Compose([
            A.Resize(height=512, width=512),
            A.Flip(p=0.5),
            A.ShiftScaleRotate(shift_limit=0.2, scale_limit=0.2, rotate_limit=180, p=0.5, border_mode=cv2.BORDER_CONSTANT),
            A.HorizontalFlip(p=0.5),
            ToTensorV2()
        ])

        data_trans_val = A.Compose([
            A.Resize(height=512, width=512),
            ToTensorV2()
        ])

        data_trans_test = A.Compose([
            A.Resize(height=512, width=512),
            ToTensorV2()
        ])
        train_data = mf_data_seg_A(
            root_path=os.path.join(root_path, 'train'), 
            flag='train', 
            transforms=data_trans_val
        )
        val_data = mf_data_seg_A(
            root_path=os.path.join(root_path, 'val'), 
            flag='val', 
            transforms=data_trans_val
        )
        test_data = mf_data_seg_A(
            root_path=os.path.join(root_path, 'test'), 
            flag='test', 
            transforms=data_trans_test
        )
        
        self.train_loader = DataLoader(train_data, batch_size=batch_size, shuffle=True)
        self.val_loader = DataLoader(val_data, batch_size=batch_size, shuffle=False)
        self.test_loader = DataLoader(test_data, batch_size=batch_size, shuffle=False)
        
        return self
    
    def train_layer(self, layer, epochs, target_point, center_point, max_layers):
        """训练单个层级"""
        model = U_Net().to(self.device)
        model.apply(self._reset_parameters)
        optimizer = torch.optim.Adam(model.parameters(), lr=0.001, betas=(0.9, 0.999))
        
        best_val_loss = float('inf')
        train_losses, val_losses = [], []
        
        for epoch in tqdm(range(epochs), desc=f"层 {layer} 训练"):
            # 训练阶段
            model.train()
            epoch_loss = 0
            for data in self.train_loader:
                # 安全解包数据，兼容返回2个或3个值的情况
                if len(data) == 2:
                    img, label = data
                elif len(data) == 3:
                    img, label, _ = data  # 忽略第三个值（如文件名）
                else:
                    raise ValueError(f"训练数据返回了不期望的元素数量: {len(data)}")
                img, label = img.to(self.device), label.to(self.device)
                pred = model(img)                  
                # 计算损失
                loss= self._calculate_loss(
                    pred, label, layer, 
                    target_point, center_point, 
                    max_layers, img
                )
                
                optimizer.zero_grad()
                loss.backward()
                optimizer.step()
                epoch_loss += loss.item()
            train_losses.append(epoch_loss / len(self.train_loader))
            
           
            
            # 验证阶段
            val_loss = self._validate(model, target_point)
            val_losses.append(val_loss)
            
            # 记录指标
            self.recorder.log_metrics({
                f'layer_{layer}_train_loss': train_losses[-1],
                f'layer_{layer}_val_loss': val_losses[-1]
            }, epoch=epoch)
            
            # 保存最佳模型
            if val_loss < best_val_loss:
                best_val_loss = val_loss
                self.recorder.save_model(model, f'layer_{layer}_best')
        
        # 绘制训练曲线
        fig = self._plot_training_curve(train_losses, val_losses, layer)
        self.recorder.save_figure(fig, f'layer_{layer}_training_curve')
        
        return model
    
    def _calculate_loss(self, pred, label, layer, target_point, center_point, max_layers, img):
        """计算损失函数"""
        target_loss = nn.functional.binary_cross_entropy_with_logits(
            pred[:, :, target_point[0], target_point[1]],
            label[:, :, target_point[0], target_point[1]].float()
        )
        
        if layer == 0:
            return target_loss
        
        # 生成辅助点并计算辅助损失
        auxiliary_points = generate_auxiliary_points(
            center_x=center_point[0],
            center_y=center_point[1],
            current_layer=layer,
            image_size=512,
            exclude_point=target_point,
            image=img,
            threshold=0.1
        )
        
        aux_y = auxiliary_points[:, 0].long()
        aux_x = auxiliary_points[:, 1].long()
        pred_aux = pred[:, :, aux_y, aux_x].contiguous().to(self.device)
        label_aux = label[:, :, aux_y, aux_x].float().contiguous().to(self.device)
        aux_loss = nn.functional.binary_cross_entropy_with_logits(pred_aux, label_aux)
        print(target_loss,aux_loss)
      #  return target_loss + aux_loss
        return  target_loss+aux_loss
    
    def _validate(self, model, target_point):
        """验证模型"""
        model.eval()
        total_loss = 0
        with torch.no_grad():
            for data in self.val_loader:
                if len(data) == 2:
                        img, label = data
                elif len(data) == 3:
                        img, label, _ = data  # 忽略第三个值（如文件名）
                else:
                        raise ValueError(f"训练数据返回了不期望的元素数量: {len(data)}")
                img, label = img.to(self.device), label.to(self.device)
                pred = model(img)
                loss = nn.functional.binary_cross_entropy_with_logits(
                    pred[:, :, target_point[0], target_point[1]],
                    label[:, :, target_point[0], target_point[1]].float()
                )
                total_loss += loss.item()
        return total_loss / len(self.val_loader)
    
    def evaluate(self, models, target_point):
        """评估模型"""
        results = []
        for i, model in enumerate(tqdm(models, desc="模型评估")):
            sensitivity, specificity = self._evaluate_model(model, target_point)
            avg_value = (sensitivity + specificity) / 2
            results.append({
                "Layer": i,
                "Sensitivity": sensitivity,
                "Specificity": specificity,
                "Average": avg_value
            })
        
        # 保存结果
        results_df = pd.DataFrame(results)
        self.recorder.save_results(results_df, 'evaluation_results')
        
        # 记录最终指标
        self.recorder.log_metrics({
            'final_metrics': {
                'best_sensitivity': max(results_df['Sensitivity']),
                'best_specificity': max(results_df['Specificity']),
                'best_average': max(results_df['Average'])
            }
        })
        
        # 绘制结果
        fig = self._plot_results(results_df)
        self.recorder.save_figure(fig, 'performance_by_layer')
        
        return results_df
    
    def _evaluate_model(self, model, target_point):
        """评估单个模型"""
        all_labels = []
        all_predictions = []
        model.eval()
        with torch.no_grad():
            for data in self.test_loader:
                if len(data) == 3:
                    img, label, _ = data
                else:
                    img, label = data
                img, label = img.to(self.device), label.to(self.device)
                pred = sigmoid(model(img))
                pred_np = pred.detach().cpu().numpy()
                label_np = label.detach().cpu().numpy()
                pred_target = pred_np[:, :, target_point[0], target_point[1]]
                label_target = label_np[:, :, target_point[0], target_point[1]]
                
                # 归一化处理
                all_predictions.extend(pred_target)
                all_labels.extend((label_target > 0.5).flatten().astype(int))
        
        # 计算ROC指标
        fpr, tpr, thresholds = roc_curve(all_labels, all_predictions)

        # 计算特异性
        specificities = 1 - fpr

        # 初始化结果
        best_threshold = None
        best_sensitivity = None
        best_specificity = None
        best_avg_diff = None

        # 遍历所有阈值，计算敏感度和特异性
        fpr, tpr, thresholds = roc_curve(all_labels, all_predictions)
        if len(np.unique(all_labels)) == 1:
            # 全正样本或全负样本的特殊处理
            tpr = 1.0 if np.all(all_labels == 1) else 0.0
            fpr = 0.0 if np.all(all_labels == 1) else 1.0
            return tpr, 1 - fpr
        else:
                sensitivities = tpr
                specificities = 1 - fpr
                avg_diff = abs(sensitivities - specificities)
                best_index = avg_diff.argmin()
                best_threshold = thresholds[best_index]
                predicted_labels = (all_predictions >= best_threshold).astype(int)
                tn, fp, fn, tp = confusion_matrix(all_labels, predicted_labels).ravel()
            # 输出结果
                print(f"Best Threshold: {best_threshold}")
                print(f"TPR (Sensitivity) at best threshold: {sensitivities[best_index]}")
                print(f"Specificity at best threshold: {specificities[best_index]}")
                print(f"Confusion Matrix at best threshold:")
                print(f"TP: {tp}, TN: {tn}, FP: {fp}, FN: {fn}")
                return sensitivities[best_index], specificities[best_index]

    def _plot_training_curve(self, train_losses, val_losses, layer):
        """绘制训练曲线"""
        fig, ax = plt.subplots(figsize=(10, 6))
        ax.plot(train_losses, label='Train Loss')
        ax.plot(val_losses, label='Validation Loss')
        ax.set_xlabel('Epoch')
        ax.set_ylabel('Loss')
        ax.set_title(f'Layer {layer} Training Curve')
        ax.legend()
        ax.grid()
        return fig
    
    def _plot_results(self, results_df):
        """绘制结果图表"""
        fig, ax = plt.subplots(figsize=(10, 6))
        ax.plot(results_df['Average'], marker='o')
        ax.set_xlabel('Layer')
        ax.set_ylabel('Performance')
        ax.set_title('Model Performance by Layer')
        ax.grid()
        return fig
    
    def _reset_parameters(self, m):
        """重置模型参数"""
        if hasattr(m, 'reset_parameters'):
            m.reset_parameters()
    def set_loaders(self, train_loader, val_loader, test_loader):
        self.train_loader = train_loader
        self.val_loader = val_loader
        self.test_loader = test_loader
    
   