import torch
import torch.optim as optim
from torch.utils.tensorboard import SummaryWriter
import time
import numpy as np
from tqdm import tqdm
import torch.nn as nn
import os
import sys
sys.path.append('')
from unet_3d import UNet3D
# from unet_3d_aspp import UNet3D_ASPP
from dataloader_segment import create_data_loaders, create_test_data_loaders
from data_utils.rib_dataset import cut_ribfrac_patches
import scipy

os.environ['CUDA_VISIBLE_DEVICES'] = '1'

class DiceLoss(nn.Module):
    def __init__(self, smooth=1e-5):
        super(DiceLoss, self).__init__()
        self.smooth = smooth
        
    def forward(self, inputs, targets):
        inputs = inputs.sigmoid()  # 确保输入在0-1范围
        
        inputs = inputs.view(-1)
        targets = targets.view(-1)
        
        intersection = (inputs * targets).sum()
        dice = (2. * intersection + self.smooth) / (
            inputs.sum() + targets.sum() + self.smooth)
        return 1 - dice

class CombinedLoss(nn.Module):
    def __init__(self, dice_weight=0.5, bce_weight=0.5, smooth=1e-5):
        super(CombinedLoss, self).__init__()
        self.dice_loss = DiceLoss(smooth)
        self.bce_loss = nn.BCEWithLogitsLoss()
        self.dice_weight = dice_weight
        self.bce_weight = bce_weight
        
    def forward(self, inputs, targets):
        dice_loss = self.dice_loss(inputs, targets)
        bce_loss = self.bce_loss(inputs, targets)
        return self.dice_weight * dice_loss + self.bce_weight * bce_loss

# class BoundaryLoss3D(nn.Module):
#     def __init__(self, theta=10, reduction='mean'):
#         super(BoundaryLoss3D, self).__init__()
#         self.theta = theta
#         self.reduction = reduction

#     def compute_3d_distance_map(self, mask):
#         eroded = scipy.ndimage.binary_erosion(mask, iterations=1)
#         boundary = mask.astype(np.uint8) - eroded.astype(np.uint8)
#         distance_map = scipy.ndimage.distance_transform_edt(1 - boundary)
#         return distance_map

#     def forward(self, inputs, targets):
#         # 将logits转换为概率
#         inputs_sigmoid = inputs.sigmoid()
#         device = inputs.device
#         targets = targets.squeeze(1)

#         targets_np = targets.cpu().numpy().astype(np.uint8)
#         gt_dist_maps = []
#         for i in range(len(targets)):
#             gt_dist_map = self.compute_3d_distance_map(targets_np[i])
#             gt_dist_maps.append(torch.from_numpy(gt_dist_map).float())
        
#         gt_dist_tensor = torch.stack(gt_dist_maps).to(device)
        
#         pred_boundaries = torch.abs(
#             inputs_sigmoid.squeeze(1) - 
#             torch.nn.functional.avg_pool3d(inputs_sigmoid, kernel_size=3, padding=1, stride=1).squeeze(1)
#         )
#         boundary_loss = torch.abs(gt_dist_tensor - pred_boundaries)
#         boundary_loss = torch.exp(-boundary_loss / self.theta)
        
#         boundary_mask = (gt_dist_tensor < 2.0)
#         boundary_loss = boundary_loss[boundary_mask]
        
#         if boundary_loss.numel() == 0:
#             return torch.tensor(0.0).to(device)
        
#         if self.reduction == 'mean':
#             return boundary_loss.mean()
#         elif self.reduction == 'sum':
#             return boundary_loss.sum()
#         else:
#             return boundary_loss.mean()

# class CombinedLoss(nn.Module):
#     def __init__(self, dice_weight=0.7, boundary_weight=0.3, boundary_theta=10):
#         super(CombinedLoss, self).__init__()
#         self.dice_loss = DiceLoss()
#         self.boundary_loss = BoundaryLoss3D(theta=boundary_theta)
#         self.dice_weight = dice_weight
#         self.boundary_weight = boundary_weight

#     def forward(self, inputs, targets):
#         dice_loss = self.dice_loss(inputs, targets)
#         boundary_loss = self.boundary_loss(inputs, targets)
        
#         total_loss = self.dice_weight * dice_loss + self.boundary_weight * boundary_loss
#         return total_loss

# class DiceLoss(nn.Module):
#     def __init__(self, smooth=1e-5):
#         super(DiceLoss, self).__init__()
#         self.smooth = smooth
        
#     def forward(self, inputs, targets):
#         inputs = inputs.sigmoid()
#         inputs = inputs.view(-1)
#         targets = targets.view(-1)
        
#         intersection = (inputs * targets).sum()
#         dice = (2. * intersection + self.smooth) / (
#             inputs.sum() + targets.sum() + self.smooth)
#         return 1 - dice

def dice_score(inputs, targets, threshold=0.5, smooth=1e-5):
    inputs = (inputs > threshold).float()
    targets = (targets > 0.5).float()
    
    intersection = (inputs * targets).sum()
    union = inputs.sum() + targets.sum()
    
    dice = (2. * intersection + smooth) / (union + smooth)
    return dice.item()

def iou_score(inputs, targets, threshold=0.5, smooth=1e-5):
    inputs = (inputs > threshold).float()
    targets = (targets > 0.5).float()
    
    intersection = (inputs * targets).sum()
    union = inputs.sum() + targets.sum() - intersection
    
    iou = (intersection + smooth) / (union + smooth)
    return iou.item()

def train_model(model, train_loader, val_loader, device, 
                epochs=100, lr=1e-4, save_path=None):

    optimizer = optim.AdamW(model.parameters(), lr=lr)
    scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='max', factor=0.5, patience=5, verbose=True)
    criterion = CombinedLoss()#dice_weight=0.7, bce_weight=0.3
    
    writer = SummaryWriter()
    best_val_dice = 0.0
    
    for epoch in range(epochs):
        start_time = time.time()
        model.train()
        epoch_train_loss = 0.0
        epoch_train_dice = 0.0
        epoch_train_iou = 0.0
        for i, (ct_blocks, mask_blocks) in enumerate(tqdm(train_loader, desc=f"Epoch {epoch+1}/{epochs}")):
            ct_blocks = ct_blocks.to(device)
            mask_blocks = mask_blocks.to(device)
            
            outputs = model(ct_blocks)
            loss = criterion(outputs, mask_blocks)
            
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
            
            epoch_train_loss += loss.item()
            dice = dice_score(outputs.detach(), mask_blocks)
            iou = iou_score(outputs.detach(), mask_blocks)
            epoch_train_dice += dice
            epoch_train_iou += iou
            if i % 100 == 0:
                writer.add_scalar('Loss/train_batch', loss.item(), epoch * len(train_loader) + i)
                writer.add_scalar('Dice/train_batch', dice, epoch * len(train_loader) + i)
                writer.add_scalar('IoU/train_batch', iou, epoch * len(train_loader) + i)
        avg_train_loss = epoch_train_loss / len(train_loader)
        avg_train_dice = epoch_train_dice / len(train_loader)
        avg_train_iou = epoch_train_iou / len(train_loader)
        
        model.eval()
        val_loss = 0.0
        val_dice = 0.0
        val_iou = 0.0
        
        with torch.no_grad():
            for ct_blocks, mask_blocks in val_loader:
                ct_blocks = ct_blocks.to(device)
                mask_blocks = mask_blocks.to(device)
                
                outputs = model(ct_blocks)
                loss = criterion(outputs, mask_blocks)
                val_loss += loss.item()
                val_dice += dice_score(outputs, mask_blocks)
                val_iou += iou_score(outputs, mask_blocks)
        
        avg_val_loss = val_loss / len(val_loader)
        avg_val_dice = val_dice / len(val_loader)
        avg_val_iou = val_iou / len(val_loader) 
        
        scheduler.step(avg_val_dice)
        if avg_val_dice > best_val_dice:
            best_val_dice = avg_val_dice
            torch.save(model.state_dict(), save_path)
            print(f"✅ 保存新的最佳模型，Dice: {best_val_dice:.4f}, IoU: {avg_val_iou:.4f}")
        
        writer.add_scalars('Loss', {
            'train': avg_train_loss,
            'val': avg_val_loss
        }, epoch)
        
        writer.add_scalars('Dice', {
            'train': avg_train_dice,
            'val': avg_val_dice
        }, epoch)
        
        writer.add_scalars('IoU', {
            'train': avg_train_iou,
            'val': avg_val_iou
        }, epoch)
        
        epoch_time = time.time() - start_time
        lr = optimizer.param_groups[0]['lr']
        
        print(f"Epoch {epoch+1}/{epochs} | Time: {epoch_time:.2f}s")
        print(f"LR: {lr:.1e} | Train Loss: {avg_train_loss:.4f} | Val Loss: {avg_val_loss:.4f}")
        print(f"Train Dice: {avg_train_dice:.4f} | Val Dice: {avg_val_dice:.4f} | Train IoU: {avg_train_iou:.4f} | Val IoU: {avg_val_iou:.4f}")
        print("-" * 60)
        with open('./train_unet/train_log.txt', 'a') as f:
            f.write(f"Epoch {epoch+1}/{epochs} | Time: {epoch_time:.2f}s | LR: {lr:.1e} | Train Loss: {avg_train_loss:.4f} | Val Loss: {avg_val_loss:.4f} | Train Dice: {avg_train_dice:.4f} | Val Dice: {avg_val_dice:.4f} | Train IoU: {avg_train_iou:.4f} | Val IoU: {avg_val_iou:.4f}\n")

    writer.close()
    print(f"训练完成！最佳验证集Dice系数: {best_val_dice:.4f}")
    
    return model

def test_model(model, test_loader, device, save_dir=None):
    os.makedirs(save_dir, exist_ok=True)  # 创建保存目录
    # os.makedirs(os.path.join(save_dir, 'ct'), exist_ok=True)
    # os.makedirs(os.path.join(save_dir, 'mask'), exist_ok=True)
    os.makedirs(os.path.join(save_dir, 'pred'), exist_ok=True)
    
    model.eval()
    test_dice = 0.0
    test_iou = 0.0
    criterion = CombinedLoss()#dice_weight=0.7, bce_weight=0.3
    test_loss = 0.0
    
    with torch.no_grad():
        for ct_blocks, mask_blocks, ct_ids in tqdm(test_loader, desc="测试"):
            ct_blocks = ct_blocks.to(device)
            mask_blocks = mask_blocks.to(device)
            
            outputs = model(ct_blocks)
            loss = criterion(outputs, mask_blocks)
            test_loss += loss.item()
            test_dice += dice_score(outputs, mask_blocks)
            test_iou += iou_score(outputs, mask_blocks)

            pred_probs = torch.sigmoid(outputs).cpu().numpy()  # 概率值
            pred_binary = (pred_probs > 0.5).astype(np.float32)  # 二值化预测
            ct_numpy = ct_blocks.cpu().numpy()
            mask_numpy = mask_blocks.cpu().numpy()
            
            for i in range(len(ct_ids)):
                ct_id = ct_ids[i]
                
                # ct_path = os.path.join(save_dir, 'ct', f'{ct_id}_ct.npy')
                # np.save(ct_path, ct_numpy[i, 0]) 
                
                # mask_path = os.path.join(save_dir, 'mask', f'{ct_id}_mask.npy')
                # np.save(mask_path, mask_numpy[i, 0])
                
                # pred_prob_path = os.path.join(save_dir, 'pred', f'{ct_id}_prob.npy')
                # np.save(pred_prob_path, pred_probs[i, 0])
                
                pred_binary_path = os.path.join(save_dir, 'pred', f'{ct_id}_binary.npy')
                np.save(pred_binary_path, pred_binary[i, 0])
    
    avg_test_loss = test_loss / len(test_loader)
    avg_test_dice = test_dice / len(test_loader)
    avg_test_iou = test_iou / len(test_loader)

    print(f"\n测试结果 - Loss: {avg_test_loss:.4f} | Dice: {avg_test_dice:.4f} | IoU: {avg_test_iou:.4f}")
    print(f"预测结果已保存至: {save_dir}")
    return avg_test_dice

if __name__ == "__main__":
    #采用命令行参数的方式
    import argparse
    parser = argparse.ArgumentParser(description="3D UNet 训练脚本")
    parser.add_argument("--mode", type=str, default="test", help="训练模式: train, test")
    parser.add_argument("--original_data_dir", type=str, default="./ribfrac-dataset/test", help="原始数据目录")
    parser.add_argument("--data_dir", type=str, default="./ribfrac-dataset/test", help="处理后数据目录")# 正式测试时换成对应路径ribfrac-dataset/exam_data
    parser.add_argument("--model_dir", type=str, default="./train_unet/best_ribfrac_model.pth", help="模型文件路径")
    parser.add_argument("--batch_size", type=int, default=8, help="批次大小")
    parser.add_argument("--num_workers", type=int, default=2, help="数据加载线程数")
    parser.add_argument("--epochs", type=int, default=50, help="训练轮数")
    parser.add_argument("--lr", type=float, default=1e-4, help="学习率")
    args = parser.parse_args()
    
    original_data_dir = args.original_data_dir
    data_dir = args.data_dir
    model_dir = args.model_dir
    batch_size = args.batch_size   #8
    num_workers = args.num_workers
    epochs = args.epochs
    lr = args.lr

    train_dir = os.path.join(data_dir, "train")
    val_dir = os.path.join(data_dir, "val")
    test_dir = os.path.join(data_dir)
    # cut_ribfrac_patches(original_data_dir, data_dir) #数据已切块就不运行这段

    if args.mode == "train":
        train_loader, val_loader= create_data_loaders(
            train_dir, val_dir, 
            batch_size=batch_size, num_workers=num_workers
        )
    if args.mode == "test":
        test_loader = create_test_data_loaders(
            test_dir, 
            batch_size=batch_size, num_workers=num_workers
        )

    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    print(f"使用设备: {device} - {torch.cuda.get_device_name(0)}")
    model = UNet3D(n_channels=1, n_classes=1).to(device)
    num_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
    print(f"模型参数数量: {num_params/1e6:.2f}M") 

    if args.mode == "train":
        best_model = train_model(
            model, 
            train_loader, 
            val_loader, 
            device, 
            epochs=epochs, 
            lr=lr,
            save_path='./train_unet/best_ribfrac_model.pth'
        )
    # 批次16，dice为0.8170,8170和8150的区别是损失函数的计算不同
    if args.mode == "test":
        model.load_state_dict(torch.load(model_dir))
        print("\n在测试集上评估最佳模型...")
        test_model(model, test_loader, device, save_dir='./segment_test_results/3dunet')
    
    # python train_unet/train_3dunet.py --mode test --original_data_dir "./ribfrac-patches/exam_data" --data_dir "./ribfrac-dataset/test" --model_dir "./model_trained_seg/3dunet_ribfrac_model_final.pth"

