import torch
import os
import pickle
import numpy as np
import zipfile
import os
import tarfile
import albumentations as A
import cv2
import numpy as np
import torch
import matplotlib.pyplot as plt



def calculate_iou(box1, box2):
    """计算两个边界框之间的交并比（IoU）"""
    x1, y1, x2, y2 = box1
    x1g, y1g, x2g, y2g = box2

    xi1 = max(x1, x1g)
    yi1 = max(y1, y1g)
    xi2 = min(x2, x2g)
    yi2 = min(y2, y2g)
    inter_area = max(xi2 - xi1, 0) * max(yi2 - yi1, 0)

    box1_area = (x2 - x1) * (y2 - y1)
    box2_area = (x2g - x1g) * (y2g - y1g)
    union_area = box1_area + box2_area - inter_area

    iou = inter_area / union_area
    return iou


def calculate_map(predictions, ground_truths, iou_threshold=0.5):
    """计算 mAP"""
    # 按类别存储预测和真实框
    gt_per_class = defaultdict(list)
    pred_per_class = defaultdict(list)

    # 分类处理真实标注
    for gt in ground_truths:
        for label, box in zip(gt['labels'], gt['boxes']):
            gt_per_class[label].append(box)

    # 分类处理预测结果
    for pred in predictions:
        for score, label, box in zip(pred['scores'], pred['labels'], pred['boxes']):
            pred_per_class[label].append((box, score))

    aps = []
    for class_id in gt_per_class.keys():
        # 对当前类别的预测按得分降序排序
        preds_sorted = sorted(pred_per_class[class_id], key=lambda x: x[1], reverse=True)
        tp = np.zeros(len(preds_sorted))
        fp = np.zeros(len(preds_sorted))
        for i, pred in enumerate(preds_sorted):
            box, _ = pred
            ious = [calculate_iou(box, gt_box) for gt_box in gt_per_class[class_id]]
            if len(ious) > 0 and max(ious) > iou_threshold:
                tp[i] = 1
            else:
                fp[i] = 1
        cum_fp = np.cumsum(fp)
        cum_tp = np.cumsum(tp)
        recalls = cum_tp / len(gt_per_class[class_id])
        precisions = np.divide(cum_tp, (cum_fp + cum_tp))

        # 计算 AP
        ap = np.trapz(precisions, recalls)
        aps.append(ap)

    # 计算 mAP
    mAP = np.mean(aps)
    return mAP




# 导入PyTorch的图像转换工具
from albumentations.pytorch import ToTensorV2
def save_checkpoint(state, filename="checkpoint.pth.tar"):
    """保存当前的模型和训练状态"""
    torch.save(state, filename)

def load_checkpoint(checkpoint_path, model, optimizer):
    """从检查点文件加载模型和优化器状态，并返回历史数据和开始的epoch"""
    if os.path.isfile(checkpoint_path):
        print("=> Loading checkpoint")
        checkpoint = torch.load(checkpoint_path)
        model.load_state_dict(checkpoint['state_dict'])
        optimizer.load_state_dict(checkpoint['optimizer'])
        history = checkpoint.get('history', {'train_loss': [], 'test_loss': []})  # 加载或初始化历史数据
        return checkpoint['epoch'], history
    else:
        print("=> No checkpoint found at '{}'".format(checkpoint_path))
        return 0, {'train_loss': [], 'test_loss': []}


def load_dataset_splits(split_datasets_file_idx):
    with open(split_datasets_file_idx, 'rb') as file:
        data = pickle.load(file)
    return data['train_imgs'], data['valid_imgs'], data['train_masks'], data['valid_masks']


def load_test_set(split_datasets_file_idx):
    with open(split_datasets_file_idx, 'rb') as file:
        data = pickle.load(file)
    return data['test_imgs'], data['test_masks']


def is_directory_empty(directory):
    """Check if a directory is empty."""
    if os.path.exists(directory):
        # 列出目录中的所有文件和子目录
        if not os.listdir(directory):  # 如果列表为空，返回True
            return True
        else:
            return False
    else:
        # raise FileNotFoundError(f"The directory {directory} does not exist.")
        return True

def onehot_to_single_channel(onehot_array):

    # 使用argmax函数沿着第一个维度（通道维度）找到最大值的索引
    single_channel = np.argmax(onehot_array, axis=0)

    return single_channel



def extract_zip(file_path, extract_to):
    """Extract a ZIP file to a specified directory."""
    expected_file_folder = file_path[:-4]
    if is_directory_empty(expected_file_folder):
        with zipfile.ZipFile(file_path, 'r') as zip_ref:
            try:
                 zip_ref.extractall(extract_to)
            except zipfile.BadZipFile:
                print("Error: The file is not a zip file or it is corrupted.")
        print(f"File extracted successfully to {extract_to}")
    else:
        print(f"Extraction skipped for {file_path}, {file_path} already exists.")




def extract_tar_file(tar_path, extract_to):
    # 假设我们通过检查目录内是否有特定文件来确定是否已经解压过
    expected_file_folder = os.path.basename(tar_path)[:-4]

    if is_directory_empty(expected_file_folder):
        with tarfile.open(tar_path) as tar:
            tar.extractall(path=extract_to)
            print(f"Extracted {tar_path} to {extract_to}")
    else:
        print(f"Extraction skipped for {tar_path}, {tar_path} already exists.")


# 导入必要的库
import albumentations as A
import cv2
import numpy as np
import torch
import matplotlib.pyplot as plt

# 导入PyTorch的图像转换工具
from albumentations.pytorch import ToTensorV2
# 从config导入配置项

# 设置绘图风格
plt.style.use('ggplot')

import matplotlib.pyplot as plt
plt.switch_backend('Agg')

# 这个类用于跟踪训练和验证损失值，并帮助获取每个周期的平均值
class Averager:
    def __init__(self):
        self.current_total = 0.0
        self.iterations = 0.0

    def send(self, value):
        self.current_total += value
        self.iterations += 1

    @property
    def value(self):
        if self.iterations == 0:
            return 0
        else:
            return 1.0 * self.current_total / self.iterations

    def reset(self):
        self.current_total = 0.0
        self.iterations = 0.0


# class SaveBestModel:
#     """
#     保存最佳模型的类。如果当前周期的验证损失小于先前的最小损失，
#     则保存模型状态。
#     """
#
#     def __init__(
#             self, best_valid_loss=float('inf')
#     ):
#         self.best_valid_loss = best_valid_loss
#
#     def __call__(
#             self, current_valid_loss,
#             epoch, model, optimizer
#     ):
#         if current_valid_loss < self.best_valid_loss:
#             self.best_valid_loss = current_valid_loss
#             print(f"\nBest validation loss: {self.best_valid_loss}")
#             print(f"\nSaving best model for epoch: {epoch + 1}\n")
#             torch.save({
#                 'epoch': epoch + 1,
#                 'model_state_dict': model.state_dict(),
#                 'optimizer_state_dict': optimizer.state_dict(),
#             }, OUT_DIR+'/best_model.pth')


def collate_fn(batch):
    """
    处理数据加载，因为不同图像可能具有不同数量的对象，
    也要处理不同大小的张量。
    """
    return tuple(zip(*batch))


# 定义训练转换
def get_train_transform():
    return A.Compose([
        A.Flip(0.5),
        A.RandomRotate90(0.5),
        A.MotionBlur(p=0.2),
        A.MedianBlur(blur_limit=3, p=0.1),
        A.Blur(blur_limit=3, p=0.1),
        ToTensorV2(p=1.0),
    ], bbox_params={
        'format': 'pascal_voc',
        'label_fields': ['labels']
    })


# 定义验证转换
def get_valid_transform():
    return A.Compose([
        ToTensorV2(p=1.0),
    ], bbox_params={
        'format': 'pascal_voc',
        'label_fields': ['labels']
    })

#
# def show_tranformed_image(train_loader):
#     """
#     这个函数显示来自`train_loader`的转换图像。
#     有助于检查转换图像以及相应标签是否正确。
#     仅在config.py中的`VISUALIZE_TRANSFORMED_IMAGES = True`时运行。
#     """
#     if len(train_loader) > 0:
#         for i in range(1):
#             images, targets = next(iter(train_loader))
#             images = list(image.to(DEVICE) for image in images)
#             targets = [{k: v.to(DEVICE) for k, v in t.items()} for t in targets]
#             boxes = targets[i]['boxes'].cpu().numpy().astype(np.int32)
#             labels = targets[i]['labels'].cpu().numpy().astype(np.int32)
#             sample = images[i].permute(1, 2, 0).cpu().numpy()
#             for box_num, box in enumerate(boxes):
#                 cv2.rectangle(sample,
#                               (box[0], box[1]),
#                               (box[2], box[3]),
#                               (0, 0, 255), 2)
#                 cv2.putText(sample, CLASSES[labels[box_num]],
#                             (box[0], box[1] - 10), cv2.FONT_HERSHEY_SIMPLEX,
#                             1.0, (0, 0, 255), 2)
#             cv2.imshow('Transformed image', sample)
#             cv2.waitKey(0)
#             cv2.destroyAllWindows()

#
# def save_model(epoch, model, optimizer):
#     """
#     保存训练的模型直到当前周期，或者每次调用时。
#     """
#     torch.save({
#         'epoch': epoch + 1,
#         'model_state_dict': model.state_dict(),
#         'optimizer_state_dict': optimizer.state_dict(),
#     }, OUT_DIR + '/checkpoint.pth')


def save_loss_plot(OUT_DIR, train_loss, val_loss):
    figure_1, train_ax = plt.subplots()
    figure_2, valid_ax = plt.subplots()
    train_ax.plot(train_loss, color='tab:blue')
    train_ax.set_xlabel('iterations')
    train_ax.set_ylabel('train loss')
    valid_ax.plot(val_loss, color='tab:red')
    valid_ax.set_xlabel('iterations')
    valid_ax.set_ylabel('validation loss')
    figure_1.savefig(f"{OUT_DIR}/train_loss.png")
    figure_2.savefig(f"{OUT_DIR}/valid_loss.png")
    print('SAVING PLOTS COMPLETE...')

    plt.close('all')



def collate_fn(batch):
    """
    处理数据加载，因为不同图像可能具有不同数量的对象，
    也要处理不同大小的张量。
    """
    return tuple(zip(*batch))

class Averager:
    def __init__(self):
        self.current_total = 0.0
        self.iterations = 0.0

    def send(self, value):
        self.current_total += value
        self.iterations += 1

    @property
    def value(self):
        if self.iterations == 0:
            return 0
        else:
            return 1.0 * self.current_total / self.iterations

    def reset(self):
        self.current_total = 0.0
        self.iterations = 0.0