import datetime
import os
import time

import torch
import torch.utils.data
import torchvision
import matplotlib.pyplot as plt
from torchvision.models.detection.faster_rcnn import FastRCNNPredictor
from torchvision.models.detection.mask_rcnn import MaskRCNNPredictor

from data.apple_dataset import AppleDataset
from utility.engine import train_one_epoch, evaluate
import segmentation_eval  # 新增分割评估模块导入
import detection_eval  # 新增检测评估模块导入

import utility.utils as utils
import utility.transforms as T

######################################################
# Train either a Faster-RCNN or Mask-RCNN predictor
# using the MinneApple dataset
######################################################


def get_transform(train):
    """获取数据预处理转换
    Args:
        train: 是否为训练模式
    Returns:
        数据预处理转换流水线
    """
    transforms = []
    # 转换为张量格式
    transforms.append(T.ToTensor())
    # 训练时添加随机水平翻转
    if train:
        transforms.append(T.RandomHorizontalFlip(0.5))
    return T.Compose(transforms)


def get_maskrcnn_model_instance(num_classes):
    # load an instance segmentation model pre-trained pre-trained on COCO
    # model = torchvision.models.detection.maskrcnn_resnet50_fpn(pretrained=True)
    #
    # # get number of input features for the classifier
    # in_features = model.roi_heads.box_predictor.cls_score.in_features
    # # replace the pre-trained head with a new one
    # model.roi_heads.box_predictor = FastRCNNPredictor(in_features, num_classes)
    #
    # # now get the number of input features for the mask classifier
    # in_features_mask = model.roi_heads.mask_predictor.conv5_mask.in_channels
    # hidden_layer = 256
    # # and replace the mask predictor with a new one
    # model.roi_heads.mask_predictor = MaskRCNNPredictor(in_features_mask, hidden_layer, num_classes)
    # return model
    # 更换为 resnet101 预训练模型
    #      """获取Mask RCNN模型实例
    # Args:
    #     num_classes: 类别数量
    # Returns:
    #     预训练的Mask RCNN模型
    # """
    # model = torchvision.models.detection.maskrcnn_resnet101_fpn(pretrained=True)
  
    # 修改此处，将maskrcnn_resnet101_fpn替换为maskrcnn_resnet50_fpn
    model = torchvision.models.detection.maskrcnn_resnet50_fpn(pretrained=True)
    # 获取分类头的输入特征数
    in_features = model.roi_heads.box_predictor.cls_score.in_features
    # 替换分类头
    model.roi_heads.box_predictor = FastRCNNPredictor(in_features, num_classes)
    
    # 获取掩码预测器的输入通道数
    in_features_mask = model.roi_heads.mask_predictor.conv5_mask.in_channels
    hidden_layer = 256
    # 替换掩码预测器
    model.roi_heads.mask_predictor = MaskRCNNPredictor(in_features_mask, hidden_layer, num_classes)
    
    return model


def get_frcnn_model_instance(num_classes):
    """获取Faster RCNN模型实例
    Args:
        num_classes: 类别数量
    Returns:
        预训练的Faster RCNN模型
    """
    # 加载ResNet50预训练模型
    model = torchvision.models.detection.fasterrcnn_resnet50_fpn(pretrained=True)
    
    # 获取分类头的输入特征数
    in_features = model.roi_heads.box_predictor.cls_score.in_features
    # 替换分类头
    model.roi_heads.box_predictor = FastRCNNPredictor(in_features, num_classes)
    
    return model


def main(args):
    """主训练函数
    Args:
        args: 命令行参数
    """
    print(args)
    device = args.device
    args.data_path="D:/Jupyter/pytorch/d2l-zh/pytorch/appppppp/detection2"
    
    # 数据加载
    print("Loading data")
    num_classes = 2
    dataset = AppleDataset(os.path.join(args.data_path, 'train'), get_transform(train=True))
    dataset_test = AppleDataset(os.path.join(args.data_path, 'test'), get_transform(train=False))

    # 创建数据加载器
    print("Creating data loaders")
    data_loader = torch.utils.data.DataLoader(dataset, batch_size=args.batch_size, shuffle=True,
                                              num_workers=args.workers, collate_fn=utils.collate_fn)
    data_loader_test = torch.utils.data.DataLoader(dataset_test, batch_size=1,
                                                   shuffle=False, num_workers=0,
                                                   collate_fn=utils.collate_fn)

    # 创建模型
    print("Creating model")
    if args.model == 'maskrcnn':
        model = get_maskrcnn_model_instance(num_classes)
    else:
        model = get_frcnn_model_instance(num_classes)

    # 将模型移动到指定设备
    model.to(device)

    # 设置优化器 x想要用 adam
    params = [p for p in model.parameters() if p.requires_grad]
    optimizer = torch.optim.SGD(params, lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay)
# optimizer = torch.optim.Adam(params, lr=args.lr, weight_decay=args.weight_decay)
    # 设置学习率调度器
    lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=args.lr_steps, gamma=args.lr_gamma)

    # 如果指定了checkpoint，则加载
    if args.resume:
        checkpoint = torch.load(args.resume, map_location='cpu')
        model.load_state_dict(checkpoint['model'])
        optimizer.load_state_dict(checkpoint['optimizer'])
        lr_scheduler.load_state_dict(checkpoint['lr_scheduler'])

    # 创建结果目录
    timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
    result_dir = os.path.join(args.output_dir, f"results_{timestamp}")
    os.makedirs(result_dir, exist_ok=True)
    
    # 初始化记录变量
    train_losses = []
    eval_metrics = {
        'segmentation': {'miou': [], 'fwiou': [], 'macc': [], 'pacc': []},
        'detection': {'AP': [], 'AP_50': [], 'AP_75': []}
    }
    # 新增：记录最佳评估指标和最佳模型保存路径
    best_metric = -float('inf')
    best_model_path = os.path.join(result_dir, 'best_model.pth')
    
    # 开始训练
    print("Start training")
    start_time = time.time()
    for epoch in range(args.epochs):
        # 训练一个epoch
        train_one_epoch(model, optimizer, data_loader, device, epoch, args.print_freq)
        lr_scheduler.step()
    
        # 保存模型
        if args.output_dir:
            torch.save({
            'epoch': epoch,
            'model': model.state_dict(),
            'optimizer': optimizer.state_dict(),
            'lr_scheduler': lr_scheduler.state_dict(),
            },  os.path.join(args.output_dir, 'model_{}.pth'.format(epoch)))
    
        # 评估模型（修改评估部分）
        print("Running segmentation evaluation...")
        seg_results = segmentation_eval.evaluate(
            model=model,
            data_loader=data_loader_test,
            device=device,
            print_freq=args.print_freq
        )
        
        print("Running detection evaluation...")
        det_results = detection_eval.evaluate(
            model=model,
            data_loader=data_loader_test,
            device=device,
            print_freq=args.print_freq
        )

        # 记录评估结果（原有代码结构调整）
        # 分割指标
        eval_metrics['segmentation']['miou'].append(seg_results.miou)
        eval_metrics['segmentation']['fwiou'].append(seg_results.fwiou)
        eval_metrics['segmentation']['macc'].append(seg_results.macc)
        eval_metrics['segmentation']['pacc'].append(seg_results.pacc)
        
        # # 检测指标
        # eval_metrics['detection']['AP'].append(det_results.map)
        # eval_metrics['detection']['AP_50'].append(det_results.map_50)
        # eval_metrics['detection']['AP_75'].append(det_results.map_75)
        #
        # eval_metrics['detection']['AP_75'].append(det_results.map_75)
        # eval_metrics['detection']['AP_75'].append(det_results.map_75)
        # eval_metrics['detection']['AP_75'].append(det_results.map_75)
        # 移除原有的文件写入操作，仅保留结果记录
        torch.cuda.empty_cache()

        # 保存当前epoch的评估结果
        with open(os.path.join(result_dir, f'epoch_{epoch}_results.txt'), 'w') as f:
            f.write(f"Epoch {epoch} Results:\n")
            # f.write("Segmentation Metrics:\n")
            f.write(f"mIoU: {seg_results.stats[0]}\n")
            f.write(f"fwIoU: {seg_results.stats[1]}\n")
            f.write(f"mAcc: {seg_results.stats[2]}\n")
            f.write(f"pAcc: {seg_results.stats[3]}\n")
            # f.write("\nDetection Metrics:\n")
            # f.write(f"AP: {det_results.stats[0]}\n")
            # f.write(f"AP_50: {det_results.stats[1]}\n")
            # f.write(f"AP_75: {det_results.stats[2]}\n")

        current_metric = seg_results.stats[0]
        if current_metric > best_metric:
            best_metric = current_metric
            torch.save({
                'epoch': epoch,
                'model': model.state_dict(),
                'optimizer': optimizer.state_dict(),
                'lr_scheduler': lr_scheduler.state_dict(),
            }, best_model_path)
    
        torch.cuda.empty_cache()

    # 绘制训练曲线
    # plt.figure(figsize=(15, 10))
    
    # # 绘制分割评估指标
    # for metric, values in eval_metrics['segmentation'].items():
    #     plt.figure(figsize=(10, 6))
    #     plt.plot(range(args.epochs), values, label=metric, marker='o')
    #     plt.title(f'Segmentation {metric} over Epochs')
    #     plt.xlabel('Epoch')
    #     plt.ylabel('Value')
    #     plt.legend()
    #     plt.grid(True)
    #     plt.tight_layout()
    #     plt.savefig(os.path.join(result_dir, f'segmentation_{metric}_{timestamp}.png'))
    #     plt.close()
    #
    # # 绘制检测评估指标
    # for metric, values in eval_metrics['detection'].items():
    #     plt.figure(figsize=(10, 6))
    #     plt.plot(range(args.epochs), values, label=metric, marker='o')
    #     plt.title(f'Detection {metric} over Epochs')
    #     plt.xlabel('Epoch')
    #     plt.ylabel('Value')
    #     plt.legend()
    #     plt.grid(True)
    #     plt.tight_layout()
    #     plt.savefig(os.path.join(result_dir, f'detection_{metric}_{timestamp}.png'))
    #     plt.close()
    #
    # # 保存图像
    # plt.tight_layout()
    # plt.savefig(os.path.join(result_dir, 'training_metrics.png'))
    # plt.close()

    # 输出训练时间
    total_time = time.time() - start_time
    total_time_str = str(datetime.timedelta(seconds=int(total_time)))
    print('Training time {}'.format(total_time_str))
    
    # 保存最终模型
    torch.save(model.state_dict(), 'myapple_maskrcnn_final_dict.pth')
    torch.save(model, 'myapple_maskrcnn_final.pth')


if __name__ == "__main__":
    import argparse
    parser = argparse.ArgumentParser(description='PyTorch Detection Training')
    parser.add_argument('--data_path', default="D:/Jupyter/pytorch/d2l-zh/pytorch/appppppp/detection2", help='dataset')
    parser.add_argument('--dataset', default='AppleDataset', help='dataset')
    parser.add_argument('--model', default='maskrcnn', help='model')
    parser.add_argument('--device', default='cuda', help='device')
    parser.add_argument('-b', '--batch-size', default=2, type=int)
    parser.add_argument('--epochs', default=10, type=int, metavar='N', help='number of total epochs to run')
    parser.add_argument('-j', '--workers', default=4, type=int, metavar='N', help='number of data loading workers (default: 16)')
    parser.add_argument('--lr', default=0.02, type=float, help='initial learning rate')
    parser.add_argument('--momentum', default=0.9, type=float, metavar='M', help='momentum')
    parser.add_argument('--wd', '--weight-decay', default=1e-4, type=float, metavar='W', help='weight decay (default: 1e-4)', dest='weight_decay')
    parser.add_argument('--lr-step-size', default=8, type=int, help='decrease lr every step-size epochs')
    parser.add_argument('--lr-steps', default=[8, 11], nargs='+', type=int, help='decrease lr every step-size epochs')
    parser.add_argument('--lr-gamma', default=0.1, type=float, help='decrease lr by a factor of lr-gamma')
    parser.add_argument('--print-freq', default=20, type=int, help='print frequency')
    parser.add_argument('--output-dir', default='./modeloutput', help='path where to save')
    parser.add_argument('--resume', default='', help='resume from checkpoint')
 
    parser.add_argument('--result_dir', default='./modeloutput', help='path where to save')
  
    args = parser.parse_args()
    print(args.model)
    # assert(args.model in ['mrcnn', 'frcnn'])


    # 在每个批次结束后添加以下代码
    torch.cuda.empty_cache()

    if args.output_dir:
        utils.mkdir(args.output_dir)

    main(args)
