from __future__ import print_function

import argparse
import os
import time, platform
from collections import OrderedDict
import numpy as np

# Fix matplotlib backend to avoid Qt GUI errors
import matplotlib
matplotlib.use('Agg')  # Use non-GUI backend

import cv2
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader
from torch.cuda.amp import autocast, GradScaler

from datasets import DATASET_NAMES, BipedDataset, TestDataset, dataset_info
from losses import *
from model import DexiNed
from utils import (image_normalization, save_image_batch_to_disk,
                   visualize_result,count_parameters)

LOSS_WEIGHTS = [0.7, 0.7, 1.1, 1.1, 0.3, 0.3, 1.3]

IS_LINUX = True if platform.system()=="Linux" else False


def _load_state_dict_fail_fast(model: nn.Module, state_dict: dict) -> None:
    """以fail-fast方式加载权重，自动处理DataParallel前缀差异。"""
    assert isinstance(state_dict, dict), "state_dict必须是字典"
    assert len(state_dict) > 0, "state_dict为空"
    candidate_model_keys = ('state_dict', 'model', 'weights')
    for candidate_key in candidate_model_keys:
        if candidate_key in state_dict:
            nested_state = state_dict[candidate_key]
            assert isinstance(nested_state, dict), f"{candidate_key}键对应对象不是权重字典"
            state_dict = nested_state
            break
    incoming_keys = list(state_dict.keys())
    assert len(incoming_keys) > 0, "权重字典不包含任何键"
    model_keys = list(model.state_dict().keys())
    assert len(model_keys) > 0, "模型state_dict为空"
    needs_module_prefix = all(key.startswith('module.') for key in model_keys)
    has_module_prefix = all(key.startswith('module.') for key in incoming_keys)
    if needs_module_prefix and not has_module_prefix:
        state_dict = OrderedDict((f'module.{key}', value) for key, value in state_dict.items())
    elif not needs_module_prefix and has_module_prefix:
        state_dict = OrderedDict((key[len('module.'):] if key.startswith('module.') else key, value)
                                 for key, value in state_dict.items())
    model_key_set = set(model.state_dict().keys())
    state_key_set = set(state_dict.keys())
    missing = sorted(model_key_set - state_key_set)
    unexpected = sorted(state_key_set - model_key_set)
    assert not missing, f"缺失权重键: {missing[:5]}"  # 仅显示前5个以保持输出可读
    assert not unexpected, f"存在多余权重键: {unexpected[:5]}"
    model.load_state_dict(state_dict)
def train_one_epoch(epoch, dataloader, model, criterion, optimizer, device,
                    log_interval_vis, tb_writer, args=None, loss_weights=None,
                    scaler=None, use_amp=False):
    imgs_res_folder = os.path.join(args.output_dir, 'current_res')
    os.makedirs(imgs_res_folder,exist_ok=True)

    # Put model in training mode
    model.train()
    # l_weight = [0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 1.1]  # for bdcn ori loss
     # before [0.6,0.6,1.1,1.1,0.4,0.4,1.3] [0.4,0.4,1.1,1.1,0.6,0.6,1.3],[0.4,0.4,1.1,1.1,0.8,0.8,1.3]
    weights = loss_weights if loss_weights is not None else LOSS_WEIGHTS
    # l_weight = [[0.05, 2.], [0.05, 2.], [0.05, 2.],
    #             [0.1, 1.], [0.1, 1.], [0.1, 1.],
    #             [0.01, 4.]]  # for cats loss
    loss_avg =[]
    scaler_enabled = bool(use_amp and scaler is not None)
    for batch_id, sample_batched in enumerate(dataloader):
        images = sample_batched['images'].to(device)  # BxCxHxW
        labels = sample_batched['labels'].to(device)  # BxHxW
        optimizer.zero_grad()
        with autocast(enabled=scaler_enabled):
            preds_list = model(images)
        assert len(preds_list) == len(weights), "网络输出数量与损失权重不一致"
        preds_for_loss = [pred.float() for pred in preds_list]
        # loss = sum([criterion(preds, labels, l_w, device) for preds, l_w in zip(preds_list, l_weight)])  # cats_loss
        loss = sum([criterion(preds, labels,l_w) for preds, l_w in zip(preds_for_loss,weights)]) # bdcn_loss
        # loss = sum([criterion(preds, labels) for preds in preds_list])  #HED loss, rcf_loss
        if scaler_enabled:
            scaler.scale(loss).backward()
            scaler.step(optimizer)
            scaler.update()
        else:
            loss.backward()
            optimizer.step()
        loss_avg.append(loss.item())
        
        # Clear memory after optimizer step
        if device.type == 'cuda' and batch_id % 10 == 0:
            torch.cuda.empty_cache()
        
        if epoch==0 and (batch_id==100 and tb_writer is not None):
            tmp_loss = np.array(loss_avg).mean()
            tb_writer.add_scalar('loss', tmp_loss,epoch)

        if batch_id % 5 == 0:
            print(time.ctime(), 'Epoch: {0} Sample {1}/{2} Loss: {3}'
                  .format(epoch, batch_id, len(dataloader), loss.item()))
        if batch_id % log_interval_vis == 0:
            res_data = []

            # Use first sample or middle sample (safe for any batch size)
            sample_idx = min(images.size(0) - 1, images.size(0) // 2)
            
            img = images.cpu().numpy()
            res_data.append(img[sample_idx])

            ed_gt = labels.cpu().numpy()
            res_data.append(ed_gt[sample_idx])

            # tmp_pred = tmp_preds[sample_idx,...]
            for i in range(len(preds_list)):
                tmp = preds_list[i]
                tmp = tmp[sample_idx]
                # print(tmp.shape)
                tmp = torch.sigmoid(tmp).unsqueeze(dim=0)
                tmp = tmp.cpu().detach().numpy()
                res_data.append(tmp)

            vis_imgs = visualize_result(res_data, arg=args)
            del tmp, res_data

            vis_imgs = cv2.resize(vis_imgs,
                                  (int(vis_imgs.shape[1]*0.8), int(vis_imgs.shape[0]*0.8)))
            img_test = 'Epoch: {0} Sample {1}/{2} Loss: {3}' \
                .format(epoch, batch_id, len(dataloader), loss.item())

            BLACK = (0, 0, 255)
            font = cv2.FONT_HERSHEY_SIMPLEX
            font_size = 1.1
            font_color = BLACK
            font_thickness = 2
            x, y = 30, 30
            vis_imgs = cv2.putText(vis_imgs,
                                   img_test,
                                   (x, y),
                                   font, font_size, font_color, font_thickness, cv2.LINE_AA)
            cv2.imwrite(os.path.join(imgs_res_folder, 'results.png'), vis_imgs)
    loss_avg = np.array(loss_avg).mean()
    return loss_avg

def validate_one_epoch(epoch, dataloader, model, device, output_dir, arg=None,
                       criterion=None, loss_weights=None, tb_writer=None,
                       global_step=None, stage='val', log_to_writer=False,
                       use_amp=False):
    # XXX This is not really validation, but testing

    # Put model in eval mode
    model.eval()

    total_loss = 0.0
    total_batches = 0
    true_positives = 0
    false_positives = 0
    false_negatives = 0
    weights = loss_weights if loss_weights is not None else LOSS_WEIGHTS
    amp_enabled = bool(use_amp and device.type == 'cuda')

    with torch.no_grad():
        for _, sample_batched in enumerate(dataloader):
            images = sample_batched['images'].to(device)
            labels = sample_batched['labels'].to(device)
            file_names = sample_batched['file_names']
            image_shape = sample_batched['image_shape']
            
            # Clear cache before forward pass to ensure maximum memory availability
            if device.type == 'cuda':
                torch.cuda.empty_cache()

            with autocast(enabled=amp_enabled):
                preds_list = model(images)
            assert len(preds_list) == len(weights), "验证阶段网络输出数量与损失权重不一致"
            preds_for_loss = [pred.float() for pred in preds_list]

            batch_loss = None
            if criterion is not None:
                batch_loss = sum([criterion(preds, labels, l_w) for preds, l_w in zip(preds_for_loss, weights)])
            final_pred = preds_for_loss[-1]

            if batch_loss is not None:
                total_loss += batch_loss.item()
                del batch_loss  # Free memory

            prob = torch.sigmoid(final_pred)
            pred_mask = (prob >= 0.5).long()
            label_mask = (labels >= 0.5).long()
            assert torch.all(label_mask >= 0), "标签存在非法取值"
            true_positives += torch.sum((pred_mask == 1) & (label_mask == 1)).item()
            false_positives += torch.sum((pred_mask == 1) & (label_mask == 0)).item()
            false_negatives += torch.sum((pred_mask == 0) & (label_mask == 1)).item()
            total_batches += 1

            save_image_batch_to_disk(final_pred,
                                     output_dir,
                                     file_names,img_shape=image_shape,
                                     arg=arg)
            
            # Free memory after each batch
            del images, labels, preds_list, final_pred, prob, pred_mask, label_mask
            if device.type == 'cuda':
                torch.cuda.empty_cache()

    assert total_batches > 0, "验证集中无数据"

    metrics = {}
    if criterion is not None:
        metrics['loss'] = total_loss / total_batches

    precision_den = true_positives + false_positives
    recall_den = true_positives + false_negatives
    assert precision_den > 0, "验证集中无正样本用于计算Precision"
    assert recall_den > 0, "验证集中无正样本用于计算Recall"
    precision = true_positives / precision_den
    recall = true_positives / recall_den
    assert precision + recall > 0, "Precision与Recall之和为零，无法计算F1"
    f1_score = 2 * precision * recall / (precision + recall)
    metrics['precision'] = precision
    metrics['recall'] = recall
    metrics['f1'] = f1_score

    if tb_writer is not None and log_to_writer:
        assert global_step is not None, "记录TensorBoard日志需要提供global_step"
        for key, value in metrics.items():
            tb_writer.add_scalar(f'{stage}/{key}', value, global_step)

    return metrics

def test(checkpoint_path, dataloader, model, device, output_dir, args, use_amp=False):
    if not os.path.isfile(checkpoint_path):
        raise FileNotFoundError(
            f"Checkpoint filte note found: {checkpoint_path}")
    print(f"Restoring weights from: {checkpoint_path}")
    weights = torch.load(checkpoint_path, map_location=device)
    _load_state_dict_fail_fast(model, weights)

    # Put model in evaluation mode
    model.eval()
    amp_enabled = bool(use_amp and device.type == 'cuda')

    with torch.no_grad():
        total_duration = []
        for batch_id, sample_batched in enumerate(dataloader):
            images = sample_batched['images'].to(device)
            if not args.test_data == "CLASSIC":
                labels = sample_batched['labels'].to(device)
            file_names = sample_batched['file_names']
            image_shape = sample_batched['image_shape']
            print(f"input tensor shape: {images.shape}")
            # images = images[:, [2, 1, 0], :, :]

            end = time.perf_counter()
            if device.type == 'cuda':
                torch.cuda.synchronize()
            with autocast(enabled=amp_enabled):
                preds = model(images)
            preds = [p.float() for p in preds] if isinstance(preds, (list, tuple)) else preds.float()
            if device.type == 'cuda':
                torch.cuda.synchronize()
            tmp_duration = time.perf_counter() - end
            total_duration.append(tmp_duration)

            save_image_batch_to_disk(preds,
                                     output_dir,
                                     file_names,
                                     image_shape,
                                     arg=args)
            torch.cuda.empty_cache()

    total_duration = np.sum(np.array(total_duration))
    print("******** Testing finished in", args.test_data, "dataset. *****")
    print("FPS: %f.4" % (len(dataloader)/total_duration))

def testPich(checkpoint_path, dataloader, model, device, output_dir, args, use_amp=False):
    # a test model plus the interganged channels
    if not os.path.isfile(checkpoint_path):
        raise FileNotFoundError(
            f"Checkpoint filte note found: {checkpoint_path}")
    print(f"Restoring weights from: {checkpoint_path}")
    weights = torch.load(checkpoint_path, map_location=device)
    _load_state_dict_fail_fast(model, weights)

    # Put model in evaluation mode
    model.eval()
    amp_enabled = bool(use_amp and device.type == 'cuda')

    with torch.no_grad():
        total_duration = []
        for batch_id, sample_batched in enumerate(dataloader):
            images = sample_batched['images'].to(device)
            if not args.test_data == "CLASSIC":
                labels = sample_batched['labels'].to(device)
            file_names = sample_batched['file_names']
            image_shape = sample_batched['image_shape']
            print(f"input tensor shape: {images.shape}")
            start_time = time.time()
            # images2 = images[:, [1, 0, 2], :, :]  #GBR
            images2 = images[:, [2, 1, 0], :, :] # RGB
            with autocast(enabled=amp_enabled):
                preds = model(images)
                preds2 = model(images2)
            preds = [p.float() for p in preds] if isinstance(preds, (list, tuple)) else preds.float()
            preds2 = [p.float() for p in preds2] if isinstance(preds2, (list, tuple)) else preds2.float()
            tmp_duration = time.time() - start_time
            total_duration.append(tmp_duration)
            save_image_batch_to_disk([preds,preds2],
                                     output_dir,
                                     file_names,
                                     image_shape,
                                     arg=args, is_inchannel=True)
            torch.cuda.empty_cache()

    total_duration = np.array(total_duration)
    print("******** Testing finished in", args.test_data, "dataset. *****")
    print("Average time per image: %f.4" % total_duration.mean(), "seconds")
    print("Time spend in the Dataset: %f.4" % total_duration.sum(), "seconds")

def parse_args():
    """Parse command line arguments."""
    parser = argparse.ArgumentParser(description='DexiNed trainer.')
    parser.add_argument('--choose_test_data',
                        type=int,
                        default=-1,
                        help='Already set the dataset for testing choice: 0 - 8')
    # ----------- test -------0--

    # 先解析已知参数，避免过早 parse 引发“未识别参数”错误
    _partial = parser.parse_known_args()[0]
    TEST_DATA = DATASET_NAMES[_partial.choose_test_data]  # max 8
    test_inf = dataset_info(TEST_DATA, is_linux=IS_LINUX)
    test_dir = test_inf['data_dir']
    # 训练为默认模式；如需测试，将此改为 True 或使用命令行覆盖
    is_testing = False  # default: training mode

    # Training settings
    # 使用 BSDS 配置（基于 pair 列表），与导出数据兼容
    TRAIN_DATA = 'BSDS'
    train_inf = dataset_info(TRAIN_DATA, is_linux=IS_LINUX)
    train_dir = train_inf['data_dir']

    # Data parameters
    parser.add_argument('--input_dir',
                        type=str,
                        default=train_dir,
                        help='the path to the directory with the input data.')
    parser.add_argument('--input_val_dir',
                        type=str,
                        default=test_inf['data_dir'],
                        help='the path to the directory with the input data for validation.')
    parser.add_argument('--output_dir',
                        type=str,
                        default='checkpoints',
                        help='the path to output the results.')
    parser.add_argument('--train_data',
                        type=str,
                        choices=DATASET_NAMES,
                        default=TRAIN_DATA,
                        help='Name of the dataset.')
    parser.add_argument('--test_data',
                        type=str,
                        choices=DATASET_NAMES,
                        default=TEST_DATA,
                        help='Name of the dataset.')
    parser.add_argument('--test_list',
                        type=str,
                        default=test_inf['test_list'],
                        help='Dataset sample indices list.')
    parser.add_argument('--train_list',
                        type=str,
                        default=train_inf['train_list'],
                        help='Dataset sample indices list.')
    parser.add_argument('--is_testing',type=str2bool,
                        default=is_testing,
                        help='Script in testing mode.')
    parser.add_argument('--double_img',
                        type=str2bool,
                        default=False,
                        help='True: use same 2 imgs changing channels')  # Just for test
    parser.add_argument('--resume',
                        type=str2bool,
                        default=False,
                        help='是否从指定 checkpoint 继续训练（需配合 --checkpoint_data）')
    parser.add_argument('--resume-latest',
                        action='store_true',
                        help='自动从输出目录下最新的 checkpoint 继续训练（忽略 --checkpoint_data）')
    parser.add_argument('--checkpoint_data',
                        type=str,
                        default='../data/10_model.pth',# 4 6 7 9 14
                        help='Checkpoint path from which to restore model weights from.')
    parser.add_argument('--test_img_width',
                        type=int,
                        default=test_inf['img_width'],
                        help='Image width for testing.')
    parser.add_argument('--test_img_height',
                        type=int,
                        default=test_inf['img_height'],
                        help='Image height for testing.')
    parser.add_argument('--disable_val_size_alignment',
                        action='store_true',
                        help='关闭验证集与训练集尺寸自动对齐。')
    parser.add_argument('--res_dir',
                        type=str,
                        default='result',
                        help='Result directory')
    parser.add_argument('--log_interval_vis',
                        type=int,
                        default=10,
                        help='The number of batches to wait before printing test predictions.')

    parser.add_argument('--epochs',
                        type=int,
                        default=17,
                        metavar='N',
                        help='Number of training epochs (default: 25).')
    parser.add_argument('--save_interval',
                        type=int,
                        default=1,
                        help='Save checkpoint every N epochs. Set <=0 to disable periodic saves; final epoch is always saved.')
    parser.add_argument('--lr',
                        default=1e-3,
                        type=float,
                        help='Initial learning rate.')
    parser.add_argument('--wd',
                        type=float,
                        default=1e-8,
                        metavar='WD',
                        help='weight decay (Good 1e-8) in TF1=0') # 1e-8 -> BIRND/MDBD, 0.0 -> BIPED
    parser.add_argument('--adjust_lr',
                        default=[110,1500,1900],
                        type=int,
                        nargs='+',
                        help='Learning rate step size.') #[5,10]BIRND [10,15]BIPED/BRIND
    parser.add_argument('--reset_lr_on_resume',
                        action='store_true',
                        help='重新计算学习率以匹配新的 adjust_lr 调度（忽略 checkpoint 中的优化器学习率）')
    parser.add_argument('--batch_size',
                        type=int,
                        default=8,
                        metavar='B',
                        help='the mini-batch size (default: 8)')
    parser.add_argument('--workers',
                        default=0,
                        type=int,
                        help='The number of workers for the dataloaders.')
    parser.add_argument('--tensorboard',type=str2bool,
                        default=True,
                        help='Use Tensorboard for logging.'),
    parser.add_argument('--amp', dest='amp',
                        action='store_true',
                        help='Enable automatic mixed precision training (CUDA only).')
    parser.add_argument('--no-amp', dest='amp',
                        action='store_false',
                        help='Disable automatic mixed precision training.')
    parser.set_defaults(amp=True)
    parser.add_argument('--img_width',
                        type=int,
                        default=train_inf['img_width'],
                        help='Image width for training.') # BIPED 400 BSDS 768/320 MDBD 480
    parser.add_argument('--img_height',
                        type=int,
                        default=train_inf['img_height'],
                        help='Image height for training.') # BIPED 480 BSDS 256/320
    parser.add_argument('--channel_swap',
                        default=[2, 1, 0],
                        type=int)
    parser.add_argument('--crop_img',
                        default=0.0,
                        type=float,
                        help='Probability of cropping training images (0.0-1.0). If 0, always resize.')
    parser.add_argument('--grayscale',
                        action='store_true',
                        help='训练灰度图模式（自动调整模型输入通道为1）')
    parser.add_argument('--gray_method',
                        type=str,
                        default='gradient_fusion',
                        choices=['luminance', 'max_gradient', 'gradient_fusion', 'max_channel', 'desaturation'],
                        help='灰度转换方法: luminance(标准), max_gradient(最大梯度), gradient_fusion(梯度融合,默认), max_channel(最大通道), desaturation(平均)')
    parser.add_argument('--mean_pixel_values',
                        default=[103.939,116.779,123.68],
                        type=float)  # [103.939,116.779,123.68] [104.00699, 116.66877, 122.67892]
    parser.add_argument('--hy_data_dir',
                        type=str,
                        default='/home/dd/working/data/dexined_hy',
                        help='额外数据集根目录，用于重复扩充训练集。')
    parser.add_argument('--hy_train_list',
                        type=str,
                        default='train_pair.lst',
                        help='额外数据集的样本列表文件名。')
    parser.add_argument('--hy_val_list',
                        type=str,
                        default='val_pair.lst',
                        help='额外数据集的验证样本列表文件名。')
    parser.add_argument('--hy_repeat',
                        type=int,
                        default=100,
                        help='额外数据集重复次数。')
    parser.add_argument('--hy_output_name',
                        type=str,
                        default='train_pair_hybrid.lst',
                        help='混合训练列表输出文件名。')
    parser.add_argument('--hy_val_output_name',
                        type=str,
                        default='val_pair_hybrid.lst',
                        help='混合验证列表输出文件名。')
    parser.add_argument('--base_data_dir',
                        type=str,
                        default='',
                        help='基础数据集根目录，留空则使用默认配置。')
    parser.add_argument('--base_train_list',
                        type=str,
                        default='',
                        help='基础数据集训练列表文件，留空沿用默认值。')
    parser.add_argument('--base_val_list',
                        type=str,
                        default='',
                        help='基础数据集验证列表文件，留空沿用默认值。')
    
    # 数据增强参数
    parser.add_argument('--use_augmentation',
                        action='store_true',
                        help='启用数据增强功能')
    parser.add_argument('--aug_flip',
                        action='store_true',
                        default=True,
                        help='启用镜像翻转增强')
    parser.add_argument('--aug_rotation',
                        action='store_true', 
                        default=True,
                        help='启用旋转增强')
    parser.add_argument('--aug_brightness',
                        action='store_true',
                        default=True,
                        help='启用亮度增强')
    parser.add_argument('--aug_contrast',
                        action='store_true',
                        default=True,
                        help='启用对比度增强')
    parser.add_argument('--aug_noise',
                        action='store_true',
                        default=True,
                        help='启用噪声增强')
    
    # 数据增强概率参数
    parser.add_argument('--aug_flip_prob',
                        type=float,
                        default=0.5,
                        help='镜像翻转应用概率 (0.0-1.0)')
    parser.add_argument('--aug_rotation_prob',
                        type=float,
                        default=0.3,
                        help='旋转应用概率 (0.0-1.0)')
    parser.add_argument('--aug_brightness_prob',
                        type=float,
                        default=0.3,
                        help='亮度调整应用概率 (0.0-1.0)')
    parser.add_argument('--aug_contrast_prob',
                        type=float,
                        default=0.3,
                        help='对比度调整应用概率 (0.0-1.0)')
    parser.add_argument('--aug_noise_prob',
                        type=float,
                        default=0.2,
                        help='噪声添加应用概率 (0.0-1.0)')
    
    # 数据增强强度参数
    parser.add_argument('--aug_rotation_range',
                        type=float,
                        default=15.0,
                        help='旋转角度范围 (度)')
    parser.add_argument('--aug_brightness_range',
                        type=float,
                        default=0.2,
                        help='亮度调整范围')
    parser.add_argument('--aug_contrast_range',
                        type=float,
                        default=0.2,
                        help='对比度调整范围')
    parser.add_argument('--aug_noise_std',
                        type=float,
                        default=10.0,
                        help='噪声标准差')
    
    # 外围背景替换增强参数
    parser.add_argument('--aug_peripheral_bg',
                        action='store_true',
                        default=False,
                        help='启用外围背景替换增强（仅替换最外层轮廓以外区域）')
    parser.add_argument('--aug_peripheral_bg_dir',
                        type=str,
                        default='',
                        help='外围背景图像目录路径')
    parser.add_argument('--aug_peripheral_bg_prob',
                        type=float,
                        default=0.3,
                        help='外围背景替换应用概率 (0.0-1.0)')
    parser.add_argument('--aug_peripheral_blur_kernel',
                        type=int,
                        default=15,
                        help='高斯模糊核大小（像素），用于平滑边界过渡（奇数，建议11-21）')
    parser.add_argument('--aug_shadow_preservation',
                        action='store_true',
                        help='启用影子保留（正片叠底模式）在背景增强中')
    parser.add_argument('--aug_invert_mask',
                        action='store_true',
                        help='反转Mask的前景/背景定义（默认：255=前景/保护，0=背景/替换；启用后：0=前景，255=背景）')
    
    # Mask参数：用于限制背景增强区域
    parser.add_argument('--use_mask',
                        action='store_true',
                        help='是否使用mask限制背景增强区域（仅增强mask=0的区域，保护mask=1的前景物体）')
    
    args = parser.parse_args()
    return args


def prepare_hybrid_training_dataset(args):
    """基于主数据集与额外数据集生成混合训练列表。"""
    hy_root = args.hy_data_dir.strip() if hasattr(args, 'hy_data_dir') else ''
    if hy_root == '':
        return

    hy_root_abs = os.path.abspath(hy_root)
    assert os.path.isdir(hy_root_abs), f"额外数据集目录不存在: {hy_root_abs}"

    base_root_cfg = args.base_data_dir.strip() if hasattr(args, 'base_data_dir') else ''
    base_root_abs = os.path.abspath(base_root_cfg) if base_root_cfg else os.path.abspath(args.input_dir)
    
    # Determine validation root: if base_data_dir is specified, use it; otherwise use input_val_dir
    val_root_abs = base_root_abs if base_root_cfg else os.path.abspath(args.input_val_dir)

    base_train_cfg = args.base_train_list.strip() if hasattr(args, 'base_train_list') else ''
    base_list_name = base_train_cfg if base_train_cfg else args.train_list
    base_list_path = base_list_name if os.path.isabs(base_list_name) else os.path.join(base_root_abs, base_list_name)

    base_val_cfg = args.base_val_list.strip() if hasattr(args, 'base_val_list') else ''
    base_val_list_name = base_val_cfg if base_val_cfg else args.test_list
    base_val_list_path = base_val_list_name if os.path.isabs(base_val_list_name) else os.path.join(val_root_abs, base_val_list_name)

    hy_list_path = os.path.join(hy_root_abs, args.hy_train_list)
    hy_val_list_path = os.path.join(hy_root_abs, args.hy_val_list)

    assert os.path.isfile(base_list_path), f"基础数据集列表缺失: {base_list_path}"
    assert os.path.isfile(base_val_list_path), f"基础验证列表缺失: {base_val_list_path}"
    assert os.path.isfile(hy_list_path), f"额外数据集列表缺失: {hy_list_path}"
    assert args.hy_repeat > 0, "额外数据集重复次数必须大于 0"

    with open(base_list_path, 'r') as f:
        base_lines = [line.strip() for line in f if line.strip()]
    with open(hy_list_path, 'r') as f:
        raw_hy_lines = [line.strip() for line in f if line.strip()]

    assert len(base_lines) > 0, f"基础数据集列表为空: {base_list_path}"
    assert len(raw_hy_lines) > 0, f"额外数据集列表为空: {hy_list_path}"

    def _resolve_pair_paths(pair_line: str, root_dir: str) -> str:
        parts = pair_line.split()
        if len(parts) < 2 or len(parts) > 3:
             raise AssertionError(f"数据列表格式错误 (应为2或3列): {pair_line}")

        def _repair_path(path_str: str) -> str:
            if os.path.isabs(path_str) and os.path.isfile(path_str):
                return path_str
            
            # Try direct join first
            direct = os.path.join(root_dir, path_str)
            if os.path.isfile(direct):
                return direct

            markers = ['/train/', '/val/', '/test/']
            candidate = ''
            for marker in markers:
                idx = path_str.find(marker)
                if idx != -1:
                    relative = path_str[idx + 1:]
                    candidate = os.path.join(root_dir, relative)
                    if os.path.isfile(candidate):
                        return candidate

            basename_candidate = os.path.join(root_dir, os.path.basename(path_str))
            if os.path.isfile(basename_candidate):
                return basename_candidate

            alt_dirs = ['train/images', 'train/gt', 'val/images', 'val/gt', 'test/images', 'test/gt']
            for alt in alt_dirs:
                alt_candidate = os.path.join(root_dir, alt, os.path.basename(path_str))
                if os.path.isfile(alt_candidate):
                    return alt_candidate

            # If we can't find it, return the absolute path we tried first, so it fails later or we can see what happened
            return direct 

        img_path = _repair_path(parts[0])
        gt_path = _repair_path(parts[1])
        
        # Verify existence to be safe, or let it fail later? 
        # The original code raised AssertionError.
        if not os.path.isfile(img_path):
             raise AssertionError(f"图像文件不存在: {img_path} (origin: {parts[0]})")
        if not os.path.isfile(gt_path):
             raise AssertionError(f"GT文件不存在: {gt_path} (origin: {parts[1]})")

        if len(parts) == 3:
            mask_path = _repair_path(parts[2])
            # Mask is optional for existence check? Let's enforce it if provided.
            if not os.path.isfile(mask_path):
                 raise AssertionError(f"Mask文件不存在: {mask_path} (origin: {parts[2]})")
            return f"{img_path} {gt_path} {mask_path}"

        return f"{img_path} {gt_path}"

    hy_lines = [_resolve_pair_paths(line, hy_root_abs) for line in raw_hy_lines]

    repeat_count = int(args.hy_repeat)
    combined_lines = list(base_lines)
    for _ in range(repeat_count):
        combined_lines.extend(hy_lines)

    repo_root = os.path.dirname(os.path.abspath(__file__))
    hybrid_dir = os.path.join(repo_root, 'results', 'hybrid_train_lists')
    os.makedirs(hybrid_dir, exist_ok=True)

    hybrid_list_path = os.path.join(hybrid_dir, args.hy_output_name)
    with open(hybrid_list_path, 'w') as f:
        f.write('\n'.join(combined_lines))
        f.write('\n')

    args.train_list = hybrid_list_path
    args.input_dir = base_root_abs
    args.input_val_dir = base_root_abs

    # Handle Validation Data
    if os.path.isfile(hy_val_list_path):
        print(f"发现额外验证集列表: {hy_val_list_path}")
        with open(base_val_list_path, 'r') as f:
            base_val_lines = [line.strip() for line in f if line.strip()]
        with open(hy_val_list_path, 'r') as f:
            raw_hy_val_lines = [line.strip() for line in f if line.strip()]
        
        # Resolve base val lines to absolute paths to ensure consistency
        # We assume base val lines are relative to val_root_abs
        def _resolve_base_val(line):
            parts = line.split()
            if len(parts) >= 2:
                p1 = os.path.join(val_root_abs, parts[0])
                p2 = os.path.join(val_root_abs, parts[1])
                if len(parts) == 3:
                    p3 = os.path.join(val_root_abs, parts[2])
                    return f"{p1} {p2} {p3}"
                return f"{p1} {p2}"
            return line # Should not happen for BSDS style

        base_val_lines_abs = [_resolve_base_val(line) for line in base_val_lines]
        
        # Resolve hy val lines
        hy_val_lines_abs = [_resolve_pair_paths(line, hy_root_abs) for line in raw_hy_val_lines]
        
        combined_val_lines = base_val_lines_abs + hy_val_lines_abs
        
        hybrid_val_list_path = os.path.join(hybrid_dir, args.hy_val_output_name)
        with open(hybrid_val_list_path, 'w') as f:
            f.write('\n'.join(combined_val_lines))
            f.write('\n')
        
        args.test_list = hybrid_val_list_path
        print(f"混合验证列表已生成: {hybrid_val_list_path}, 基础: {len(base_val_lines)}, 额外: {len(hy_val_lines_abs)}, 总计: {len(combined_val_lines)}")
        
    else:
        if os.path.isabs(base_val_list_path) and base_val_list_path.startswith(base_root_abs):
            rel_val_list = os.path.relpath(base_val_list_path, base_root_abs)
            args.test_list = rel_val_list
        else:
            args.test_list = base_val_list_path

    args.test_data = 'BSDS'
    args.choose_test_data = DATASET_NAMES.index('BSDS')
    
    # Only override test image dimensions if user hasn't explicitly set them via --disable_val_size_alignment
    # If disable_val_size_alignment is set, preserve user-provided test dimensions
    if not args.disable_val_size_alignment:
        # Auto-align will be handled later in main(), so set to BSDS defaults for now
        bsds_info = dataset_info('BSDS', is_linux=IS_LINUX)
        args.test_img_width = bsds_info['img_width']
        args.test_img_height = bsds_info['img_height']
    else:
        # User wants custom test dimensions, preserve them
        print(f"保持用户指定的测试图像尺寸: {args.test_img_width}x{args.test_img_height}")

    total_base = len(base_lines)
    total_hy = len(hy_lines) * repeat_count
    print(f"基础样本数: {total_base}, 额外样本 (重复后): {total_hy}, 总计: {len(combined_lines)}")
    print(f"混合训练列表已生成: {hybrid_list_path}")


def main(args):
    """Main function."""

    # 整合混合训练数据集
    prepare_hybrid_training_dataset(args)

    if not args.disable_val_size_alignment:
        assert args.img_width == args.img_height, "当前仅支持方形训练图像尺寸自动对齐验证集尺寸"
        args.test_img_width = args.img_width
        args.test_img_height = args.img_height
        print(f"验证集图像尺寸已对齐训练尺寸: {args.test_img_width}x{args.test_img_height}")

    print(f"Number of GPU's available: {torch.cuda.device_count()}")
    print(f"Pytorch version: {torch.__version__}")
    
    # 输出数据增强配置信息
    if args.use_augmentation:
        print("\n=== 数据增强配置 ===")
        print(f"启用状态: {'✓' if args.use_augmentation else '✗'}")
        if args.aug_flip:
            print(f"镜像翻转: 概率={args.aug_flip_prob:.2f}")
        if args.aug_rotation:
            print(f"旋转: 概率={args.aug_rotation_prob:.2f}, 角度范围=±{args.aug_rotation_range}°")
        if args.aug_brightness:
            print(f"亮度调整: 概率={args.aug_brightness_prob:.2f}, 范围=±{args.aug_brightness_range}")
        if args.aug_contrast:
            print(f"对比度调整: 概率={args.aug_contrast_prob:.2f}, 范围=±{args.aug_contrast_range}")
        if args.aug_noise:
            print(f"高斯噪声: 概率={args.aug_noise_prob:.2f}, 标准差≤{args.aug_noise_std}")
        print("================\n")
    else:
        print("\n数据增强: 未启用\n")

    # Tensorboard summary writer与输出目录
    tb_writer = None
    training_dir = os.path.join(args.output_dir, args.train_data)
    os.makedirs(training_dir, exist_ok=True)

    # 解析断点恢复位置
    def _resolve_resume_path() -> str:
        if args.resume_latest:
            # 查找 training_dir 下按 epoch 编号的子目录，选择最大的一个
            subs = [d for d in os.listdir(training_dir) if os.path.isdir(os.path.join(training_dir, d))]
            # 仅保留纯数字目录
            epochs = [int(s) for s in subs if s.isdigit()]
            assert len(epochs) > 0, f"未在 {training_dir} 下找到可用的 checkpoint 目录"
            ep = max(epochs)
            ck = os.path.join(training_dir, str(ep), f"{ep}_model.pth")
            assert os.path.isfile(ck), f"缺少 checkpoint 文件：{ck}"
            return ck
        if args.resume:
            ck = os.path.join(args.output_dir, args.train_data, args.checkpoint_data)
            assert os.path.isfile(ck), f"指定的 checkpoint 不存在：{ck}"
            return ck
        return ''

    # checkpoint_path = 'data/10_model.pth' if args.is_testing else _resolve_resume_path()
    checkpoint_path = args.checkpoint_data if args.is_testing else _resolve_resume_path()
    if args.tensorboard and not args.is_testing:
        from torch.utils.tensorboard import SummaryWriter # for torch 1.4 or greather
        tb_writer = SummaryWriter(log_dir=training_dir)
        # saving Model training settings
        training_notes = ['DexiNed, Xavier Normal Init, LR= ' + str(args.lr) + ' WD= '
                          + str(args.wd) + ' image size = ' + str(args.img_width)
                          + ' adjust LR='+ str(args.adjust_lr) + ' Loss Function= BDCNloss2. '
                          +'Trained on> '+args.train_data+' Tested on> '
                          +args.test_data+' Batch size= '+str(args.batch_size)+' '+str(time.asctime())]
        info_txt = open(os.path.join(training_dir, 'training_settings.txt'), 'w')
        info_txt.write(str(training_notes))
        info_txt.close()

    # Get computing device
    device = torch.device('cpu' if torch.cuda.device_count() == 0
                          else 'cuda')
    use_amp_requested = bool(getattr(args, 'amp', False))
    if use_amp_requested and device.type != 'cuda':
        print("Automatic mixed precision requested but CUDA device not available; falling back to float32 training.")
    use_amp = use_amp_requested and device.type == 'cuda'
    scaler = GradScaler(enabled=use_amp) if not args.is_testing else None

    # Instantiate model and move it to the computing device
    in_channels = 1 if args.grayscale else 3
    model = DexiNed(in_channels=in_channels).to(device)
    if args.grayscale:
        print("灰度图训练模式：模型输入通道 = 1")
    # 自动启用 DataParallel（多卡）
    if device.type == 'cuda' and torch.cuda.device_count() > 1:
        print(f"Using DataParallel on {torch.cuda.device_count()} GPUs")
        model = nn.DataParallel(model)
    ini_epoch = 0
    loaded_opt_state = None
    if not args.is_testing and (args.resume or args.resume_latest):
        obj = torch.load(checkpoint_path, map_location=device)
        # 兼容纯 state_dict 或包含更多信息的字典
        if isinstance(obj, dict) and 'state_dict' in obj and 'model' not in obj:
            # 兼容旧格式 {'epoch':..,'state_dict':..}
            state = obj.get('state_dict')
            _load_state_dict_fail_fast(model, state)
            if 'epoch' in obj:
                ini_epoch = int(obj['epoch']) + 1
        elif isinstance(obj, dict) and 'model' in obj:
            _load_state_dict_fail_fast(model, obj['model'])
            if 'epoch' in obj:
                ini_epoch = int(obj['epoch']) + 1
            if 'optimizer' in obj:
                loaded_opt_state = obj['optimizer']
        else:
            # 纯 state_dict
            _load_state_dict_fail_fast(model, obj)
            # 若无法从文件推断 epoch，尝试从父目录名解析
            parent = os.path.basename(os.path.dirname(checkpoint_path))
            if parent.isdigit():
                ini_epoch = int(parent) + 1
        print('Training resumed from> ', checkpoint_path, ' start_epoch=', ini_epoch)
        dataset_train = BipedDataset(args.input_dir,
                                     img_width=args.img_width,
                                     img_height=args.img_height,
                                     mean_bgr=args.mean_pixel_values[0:3] if len(
                                         args.mean_pixel_values) == 4 else args.mean_pixel_values,
                                     train_mode='train',
                                     crop_img=args.crop_img,
                                     arg=args
                                     )
        dataloader_train = DataLoader(dataset_train,
                                      batch_size=args.batch_size,
                                      shuffle=True,
                                      num_workers=args.workers)
    else:
        # 常规训练：未启用 resume 时也需构建训练集
        dataset_train = BipedDataset(args.input_dir,
                                     img_width=args.img_width,
                                     img_height=args.img_height,
                                     mean_bgr=args.mean_pixel_values[0:3] if len(
                                         args.mean_pixel_values) == 4 else args.mean_pixel_values,
                                     train_mode='train',
                                     crop_img=args.crop_img,
                                     arg=args
                                     )
        dataloader_train = DataLoader(dataset_train,
                                      batch_size=args.batch_size,
                                      shuffle=True,
                                      num_workers=args.workers)

    dataset_val = TestDataset(args.input_val_dir,
                              test_data=args.test_data,
                              img_width=args.test_img_width,
                              img_height=args.test_img_height,
                              mean_bgr=args.mean_pixel_values[0:3] if len(
                                  args.mean_pixel_values) == 4 else args.mean_pixel_values,
                              test_list=args.test_list, arg=args
                              )
    dataloader_val = DataLoader(dataset_val,
                                batch_size=10,
                                shuffle=False,
                                num_workers=args.workers)
    # Testing
    if args.is_testing:

        output_dir = os.path.join(args.res_dir, args.train_data+"2"+ args.test_data)
        print(f"output_dir: {output_dir}")
        if args.double_img:
            # predict twice an image changing channels, then mix those results
            testPich(checkpoint_path, dataloader_val, model, device, output_dir, args, use_amp=use_amp)
        else:
            test(checkpoint_path, dataloader_val, model, device, output_dir, args, use_amp=use_amp)

        num_param = count_parameters(model)
        print('-------------------------------------------------------')
        print('DexiNed # of Parameters:')
        print(num_param)
        print('-------------------------------------------------------')
        return

    criterion = bdcn_loss2 # hed_loss2 #bdcn_loss2

    optimizer = optim.Adam(model.parameters(),
                           lr=args.lr,
                           weight_decay=args.wd)
    if loaded_opt_state is not None:
        optimizer.load_state_dict(loaded_opt_state)

    # Main training loop
    seed=1021
    adjust_lr = args.adjust_lr
    lr2= args.lr
    
    # 当启用断点恢复且设置了 reset_lr_on_resume 时，根据新的 adjust_lr 调度重新计算学习率
    if (args.resume or args.resume_latest) and args.reset_lr_on_resume and adjust_lr is not None:
        # 计算到当前 epoch 为止，学习率应该经过多少次衰减
        decay_count = sum(1 for milestone in adjust_lr if milestone < ini_epoch)
        lr2 = args.lr * (0.1 ** decay_count)
        # 更新优化器中的学习率
        for param_group in optimizer.param_groups:
            param_group['lr'] = lr2
        print(f"恢复训练时重置学习率: epoch={ini_epoch}, lr={lr2}, adjust_lr={adjust_lr}, decay_count={decay_count}")
    
    for epoch in range(ini_epoch,args.epochs):
        # Clear GPU cache at the start of each epoch
        if torch.cuda.is_available():
            torch.cuda.empty_cache()
            
        if epoch%7==0:

            seed = seed+1000
            np.random.seed(seed)
            torch.manual_seed(seed)
            torch.cuda.manual_seed(seed)
            print("------ Random seed applied-------------")
        # Create output directories
        if adjust_lr is not None:
            if epoch in adjust_lr:
                lr2 = lr2*0.1
                for param_group in optimizer.param_groups:
                    param_group['lr'] = lr2

        output_dir_epoch = os.path.join(args.output_dir,args.train_data, str(epoch))
        img_test_dir = os.path.join(output_dir_epoch, args.test_data + '_res')
        os.makedirs(output_dir_epoch,exist_ok=True)
        os.makedirs(img_test_dir,exist_ok=True)
        validate_one_epoch(epoch,
                           dataloader_val,
                           model,
                           device,
                           img_test_dir,
                           arg=args,
                           criterion=criterion,
                           loss_weights=LOSS_WEIGHTS,
                           tb_writer=tb_writer,
                           global_step=epoch,
                           stage='val_preview',
                           log_to_writer=tb_writer is not None,
                           use_amp=use_amp)

        avg_loss =train_one_epoch(epoch,
                        dataloader_train,
                        model,
                        criterion,
                        optimizer,
                        device,
                        args.log_interval_vis,
                        tb_writer,
                        args=args,
                        loss_weights=LOSS_WEIGHTS,
                        scaler=scaler,
                        use_amp=use_amp)
        val_metrics = validate_one_epoch(epoch,
                           dataloader_val,
                           model,
                           device,
                           img_test_dir,
                           arg=args,
                           criterion=criterion,
                           loss_weights=LOSS_WEIGHTS,
                           tb_writer=tb_writer,
                           global_step=epoch,
                           stage='val',
                           log_to_writer=tb_writer is not None,
                           use_amp=use_amp)

        # Save checkpoint according to save_interval (包含 epoch、模型与优化器状态)
        save_interval = getattr(args, 'save_interval', 1)
        should_save = False
        # If save_interval > 0, save periodically when epoch % save_interval == 0
        if save_interval > 0:
            if (epoch % save_interval) == 0:
                should_save = True
        # Always save at the last epoch
        if epoch == (args.epochs - 1):
            should_save = True

        if should_save:
            state_obj = {
                'epoch': int(epoch),
                'model': model.module.state_dict() if hasattr(model, 'module') else model.state_dict(),
                'optimizer': optimizer.state_dict(),
            }
            torch.save(state_obj,
                       os.path.join(output_dir_epoch, '{0}_model.pth'.format(epoch)))
        if tb_writer is not None:
            tb_writer.add_scalar('loss',
                                 avg_loss,
                                 epoch+1)
            tb_writer.add_scalar('val/loss_epoch',
                                 val_metrics['loss'],
                                 epoch)
        print('Current learning rate> ', optimizer.param_groups[0]['lr'])
    num_param = count_parameters(model)
    print('-------------------------------------------------------')
    print('DexiNed, # of Parameters:')
    print(num_param)
    print('-------------------------------------------------------')

def str2bool(v):
    if isinstance(v, bool):
        return v
    if v.lower() in ('yes', 'true', 't', 'y', '1'):
        return True
    elif v.lower() in ('no', 'false', 'f', 'n', '0'):
        return False
    else:
        raise argparse.ArgumentTypeError('Boolean value expected.')

if __name__ == '__main__':
    args = parse_args()
    main(args)
