import torch
import torch.nn as nn
import torch.nn.functional as F
import datetime
import os
import time
import random
import numpy as np
import torch
import torch.utils.data
import torchvision
import matplotlib.pyplot as plt
from torchvision.models.detection.faster_rcnn import FastRCNNPredictor
from torchvision.models.detection.mask_rcnn import MaskRCNNPredictor
import detection_eval
import segmentation_eval
from data.apple_dataset import AppleDataset
from utility.engine import train_one_epoch, evaluate
import pandas as pd
from collections import OrderedDict
from datetime import datetime
import utility.utils as utils
import utility.transforms as T
# 在原有导入部分添加
from torchvision.models.detection import MaskRCNN
from torchvision.ops import FeaturePyramidNetwork
from timm import create_model

# 替换原有get_maskrcnn_with_cbam函数
def get_maskrcnn_with_swin(num_classes):
    # 创建Swin Transformer骨干网络
    swin = create_model('swin_base_patch4_window7_224', pretrained=False, features_only=True,
        dynamic_img_size=True)
    
    # 创建FPN网络
    in_channels_list = [128, 256, 512, 1024]  # Swin各阶段的输出通道
    out_channels = 256  # 定义输出通道数
    fpn = FeaturePyramidNetwork(
        in_channels_list=in_channels_list,
        out_channels=out_channels  # 该参数会被FPN内部使用
    )
    
    # 构建Mask R-CNN模型时传递out_channels
    model = MaskRCNN(
        backbone=SwinFPN(swin, fpn, out_channels),  # 添加out_channels参数
        num_classes=num_classes,
        min_size=800, max_size=1333  # 根据数据集调整
    )
    
    # 保持原有预测头修改逻辑
    in_features = model.roi_heads.box_predictor.cls_score.in_features
    model.roi_heads.box_predictor = FastRCNNPredictor(in_features, num_classes)
    
    in_features_mask = model.roi_heads.mask_predictor.conv5_mask.in_channels
    model.roi_heads.mask_predictor = MaskRCNNPredictor(in_features_mask, 256, num_classes)
    
    return model

# 新增自定义SwinFPN类
class SwinFPN(nn.Module):
    def __init__(self, backbone, fpn, out_channels):  # 新增out_channels参数
        super().__init__()
        self.backbone = backbone
        self.fpn = fpn
        self.out_channels = out_channels  # 直接使用传入的通道数
        self.stage_indices = [f'features.{i}' for i in [1,3,5,7]]
        
    def forward(self, x):
        # 获取Swin的多级特征
        features = self.backbone(x)
        selected_features = [features[k] for k in self.stage_indices]
        
        # 构建FPN所需特征字典
        fpn_features = {}
        for i, feat in enumerate(selected_features):
            fpn_features[f'feat{i}'] = feat
            
        # 通过FPN
        fpn_features = self.fpn(fpn_features)
        return list(fpn_features.values())

class ChannelAttention(nn.Module):
    def __init__(self, in_channels, reduction_ratio=16):
        super(ChannelAttention, self).__init__()
        self.avg_pool = nn.AdaptiveAvgPool2d(1)
        self.max_pool = nn.AdaptiveMaxPool2d(1)
        self.fc = nn.Sequential(
            nn.Linear(in_channels, in_channels // reduction_ratio),
            nn.ReLU(),
            nn.Linear(in_channels // reduction_ratio, in_channels)
        )
        self.sigmoid = nn.Sigmoid()

    def forward(self, x):
        avg_out = self.fc(self.avg_pool(x).view(x.size(0), -1))
        max_out = self.fc(self.max_pool(x).view(x.size(0), -1))
        out = avg_out + max_out
        return self.sigmoid(out).view(x.size(0), x.size(1), 1, 1)

class SpatialAttention(nn.Module):
    def __init__(self, kernel_size=7):
        super(SpatialAttention, self).__init__()
        self.conv = nn.Conv2d(2, 1, kernel_size, padding=kernel_size//2)
        self.sigmoid = nn.Sigmoid()

    def forward(self, x):
        avg_out = torch.mean(x, dim=1, keepdim=True)
        max_out, _ = torch.max(x, dim=1, keepdim=True)
        out = torch.cat([avg_out, max_out], dim=1)
        out = self.conv(out)
        return self.sigmoid(out)

class CBAM(nn.Module):
    def __init__(self, in_channels, reduction_ratio=16, kernel_size=7):
        super(CBAM, self).__init__()
        self.channel_attention = ChannelAttention(in_channels, reduction_ratio)
        self.spatial_attention = SpatialAttention(kernel_size)

    def forward(self, x):
        out = self.channel_attention(x) * x
        out = self.spatial_attention(out) * out
        return out


def get_maskrcnn_with_cbam(num_classes):
    # 加载预训练的 Mask R-CNN 模型
    model = torchvision.models.detection.maskrcnn_resnet50_fpn(pretrained=True)

    # 在 ResNet 的每个残差块后添加 CBAM
    for layer in [model.backbone.body.layer1, model.backbone.body.layer2,
                  model.backbone.body.layer3, model.backbone.body.layer4]:
        for block in layer:
            block.cbam = CBAM(block.conv3.out_channels)

    # 替换分类头和掩码预测器
    in_features = model.roi_heads.box_predictor.cls_score.in_features
    model.roi_heads.box_predictor = FastRCNNPredictor(in_features, num_classes)

    in_features_mask = model.roi_heads.mask_predictor.conv5_mask.in_channels
    hidden_layer = 256
    model.roi_heads.mask_predictor = MaskRCNNPredictor(in_features_mask, hidden_layer, num_classes)

    return model



def get_transform(train):
    """获取数据预处理转换
    Args:
        train: 是否为训练模式
    Returns:
        数据预处理转换流水线
    """
    transforms = []
    # 转换为张量格式
    transforms.append(T.ToTensor())
    # 训练时添加随机水平翻转
    if train:
        transforms.append(T.RandomHorizontalFlip(0.5))
    return T.Compose(transforms)


def main(args):
    """主训练函数
    Args:
        args: 命令行参数
    """
    print(args)
    device = args.device
    args.data_path = r"D:/Jupyter/pytorch/d2l-zh/pytorch/appppppp/detection2"

    # 数据加载
    print("Loading data")
    num_classes = 2
    dataset = AppleDataset(os.path.join(args.data_path, 'train'), get_transform(train=True))
    dataset_test = AppleDataset(os.path.join(args.data_path, 'test'), get_transform(train=False))

    # 创建数据加载器
    print("Creating data loaders")
    data_loader = torch.utils.data.DataLoader(dataset, batch_size=args.batch_size, shuffle=True,
                                              num_workers=args.workers, collate_fn=utils.collate_fn)
    data_loader_test = torch.utils.data.DataLoader(dataset_test, batch_size=1,
                                                   shuffle=False, num_workers=0,
                                                   collate_fn=utils.collate_fn)

    # 创建模型
    print("Creating model")
    if args.model == 'maskrcnn':
        model = get_maskrcnn_with_swin(num_classes)
    # else:
    #     model = get_frcnn_model_instance(num_classes)

    # 将模型移动到指定设备
    model.to(device)

    # 设置优化器 x想要用 adam
    params = [p for p in model.parameters() if p.requires_grad]
    optimizer = torch.optim.SGD(params, lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay)
    # optimizer = torch.optim.Adam(params, lr=args.lr, weight_decay=args.weight_decay)
    # 设置学习率调度器
    lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=args.lr_steps, gamma=args.lr_gamma)

    # 如果指定了checkpoint，则加载
    if args.resume:
        checkpoint = torch.load(args.resume, map_location='cpu')
        model.load_state_dict(checkpoint['model'])
        optimizer.load_state_dict(checkpoint['optimizer'])
        lr_scheduler.load_state_dict(checkpoint['lr_scheduler'])

    # 创建结果目录
    timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
    result_dir = os.path.join(args.output_dir, f"results_{timestamp}")
    os.makedirs(result_dir, exist_ok=True)

    # 初始化记录变量
    train_losses = []
    eval_metrics = {
        'segmentation': {'miou': [], 'fwiou': [], 'macc': [], 'pacc': []},
        'detection': {'AP': [], 'AP_50': [], 'AP_75': []}
    }
    # 新增：记录最佳评估指标和最佳模型保存路径
    best_metric = -float('inf')
    best_model_path = os.path.join(result_dir, 'best_model.pth')

    # 开始训练
    print("Start training")
    start_time = time.time()
    metrics_list = []
    best_ap = 0
    for epoch in range(args.epochs):
        # 训练一个epoch
        train_one_epoch(model, optimizer, data_loader, device, epoch, args.print_freq)
        lr_scheduler.step()

        # 评估模型
        print("Running detection evaluation...")
        coco_evaluator = evaluate(model, data_loader_test, device=device)  # 修改mIoU计算部分

        # 获取bbox评估指标
        bbox_stats = coco_evaluator.coco_eval['bbox'].stats
        # 获取segm评估指标
        segm_stats = coco_evaluator.coco_eval['segm'].stats

        # 构建metrics字典
        metrics = OrderedDict({
            'epoch': epoch,
            # bbox metrics
            'bbox_AP': bbox_stats[0],  # AP@[0.5:0.95]
            'bbox_AP50': bbox_stats[1],  # AP@0.5
            'bbox_AP75': bbox_stats[2],  # AP@0.75
            'bbox_AP_small': bbox_stats[3],  # AP small
            'bbox_AP_medium': bbox_stats[4],  # AP medium
            'bbox_AP_large': bbox_stats[5],  # AP large
            'bbox_AR_max1': bbox_stats[6],  # AR@1
            'bbox_AR_max10': bbox_stats[7],  # AR@10
            'bbox_AR_max100': bbox_stats[8],  # AR@100
            'bbox_AR_small': bbox_stats[9],  # AR small
            'bbox_AR_medium': bbox_stats[10],  # AR medium
            'bbox_AR_large': bbox_stats[11],  # AR large
            # segm metrics
            'segm_AP': segm_stats[0],  # AP@[0.5:0.95]
            'segm_AP50': segm_stats[1],  # AP@0.5
            'segm_AP75': segm_stats[2],  # AP@0.75
            'segm_AP_small': segm_stats[3],  # AP small
            'segm_AP_medium': segm_stats[4],  # AP medium
            'segm_AP_large': segm_stats[5],  # AP large
            'segm_AR_max1': segm_stats[6],  # AR@1
            'segm_AR_max10': segm_stats[7],  # AR@10
            'segm_AR_max100': segm_stats[8],  # AR@100
            'segm_AR_small': segm_stats[9],  # AR small
            'segm_AR_medium': segm_stats[10],  # AR medium
            'segm_AR_large': segm_stats[11]  # AR large
        })
        # 将mIoU添加到metrics中

        metrics_list.append(metrics)
        metrics_df = pd.DataFrame(metrics_list)
        metrics_df.to_csv(os.path.join(result_dir, 'evaluation_metrics.csv'), index=False)
        # 获取当前AP指标
        current_ap = coco_evaluator.coco_eval['segm'].stats[0]  # AP@[0.5:0.95]

        # 保存最佳模型
        if current_ap > best_ap:
            best_ap = current_ap
            best_epoch = epoch
            # 直接保存模型对象
            torch.save(model, best_model_path)
            print(f"New best AP {best_ap:.4f} saved at epoch {epoch}")

        # 保存常规checkpoint
        torch.save(model, os.path.join(args.output_dir, f'model_{epoch}.pth'))

    # 输出训练时间
    total_time = time.time() - start_time
    total_time_str = str(datetime.timedelta(seconds=int(total_time)))
    print('Training time {}'.format(total_time_str))
    print(f'Best AP@[0.5:0.95] = {best_ap:.4f} at epoch {best_epoch}')

    # 保存最终模型
    torch.save(model.state_dict(), 'myapple_maskrcnn_final_dict.pth')
    torch.save(model, 'myapple_maskrcnn_final.pth')


if __name__ == "__main__":
    import argparse

    parser = argparse.ArgumentParser(description='PyTorch Detection Training')
    parser.add_argument('--data_path', default="D:/Jupyter/pytorch/d2l-zh/pytorch/appppppp/detection2", help='dataset')
    parser.add_argument('--dataset', default='AppleDataset', help='dataset')
    parser.add_argument('--model', default='maskrcnn', help='model')
    parser.add_argument('--device', default='cuda', help='device')
    parser.add_argument('-b', '--batch-size', default=1, type=int)
    parser.add_argument('--epochs', default=10, type=int, metavar='N', help='number of total epochs to run')
    parser.add_argument('-j', '--workers', default=4, type=int, metavar='N',
                        help='number of data loading workers (default: 16)')
    parser.add_argument('--lr', default=0.02, type=float, help='initial learning rate')
    parser.add_argument('--momentum', default=0.9, type=float, metavar='M', help='momentum')
    parser.add_argument('--wd', '--weight-decay', default=1e-4, type=float, metavar='W',
                        help='weight decay (default: 1e-4)', dest='weight_decay')
    parser.add_argument('--lr-step-size', default=8, type=int, help='decrease lr every step-size epochs')
    parser.add_argument('--lr-steps', default=[8, 11], nargs='+', type=int, help='decrease lr every step-size epochs')
    parser.add_argument('--lr-gamma', default=0.1, type=float, help='decrease lr by a factor of lr-gamma')
    parser.add_argument('--print-freq', default=20, type=int, help='print frequency')
    parser.add_argument('--output-dir', default='./modeloutput', help='path where to save')
    parser.add_argument('--resume', default='', help='resume from checkpoint')

    parser.add_argument('--result_dir', default='./modeloutput', help='path where to save')

    args = parser.parse_args()
    print(args.model)
    # assert(args.model in ['mrcnn', 'frcnn'])

    # 在每个批次结束后添加以下代码
    torch.cuda.empty_cache()
    random.seed(42)
    if args.output_dir:
        utils.mkdir(args.output_dir)

    main(args)
