import os
import time

import cv2
import numpy as np
from PIL import Image
from requests.compat import chardet
from torch.utils.data.dataset import Dataset
from torchvision.transforms import InterpolationMode
from tqdm import tqdm
from torchvision import transforms
from torchvision import models
import torch
from torch.utils.tensorboard import SummaryWriter
from sklearn.metrics import precision_score, recall_score, f1_score
from torch import nn
from torch.utils.data.dataloader import DataLoader
from matplotlib import pyplot as plt
from numpy import printoptions
import requests
import tarfile
import random
import json
import chardet
from shutil import copyfile
import openpyxl
from openpyxl.drawing.image import Image as OpenpyxlImage
from openpyxl.styles import Alignment, Font
from sklearn.metrics import confusion_matrix, classification_report
import pandas as pd
from torchvision import transforms
from timm.data import Mixup
from torchvision.transforms.v2 import MixUp
from timm.data.mixup import Mixup as TimmMixup

# Fix all seeds to make experiments reproducible
torch.manual_seed(2020)
torch.cuda.manual_seed(2020)
np.random.seed(2020)
random.seed(2020)
torch.backends.cudnn.deterministic = True


# Simple dataloader and label binarization, that is converting test labels into binary arrays of length 27 (number of classes) with 1 in places of applicable labels).
class NusDataset(Dataset):
    def __init__(self, data_path, anno_path, transforms):
        self.transforms = transforms

        # Detect encoding
        with open(anno_path, 'rb') as fp:
            raw_data = fp.read()
            result = chardet.detect(raw_data)
            detected_encoding = result['encoding']

        # Load with detected encoding
        with open(anno_path, 'r', encoding=detected_encoding) as fp:
            json_data = json.load(fp)

        samples = json_data['samples']
        self.classes = json_data['labels']

        self.imgs = []
        self.annos = []
        self.data_path = data_path
        print('loading', anno_path)
        for sample in samples:
            self.imgs.append(sample['image_name'])
            self.annos.append(sample['image_labels'])
        for item_id in range(len(self.annos)):
            item = self.annos[item_id]
            vector = [cls in item for cls in self.classes]
            self.annos[item_id] = np.array(vector, dtype=float)

    def __getitem__(self, item):
        try:
            anno = self.annos[item]
            img_path = os.path.join(self.data_path, self.imgs[item])
            img = Image.open(img_path)
            if self.transforms is not None:
                img = self.transforms(img)
            return img, anno
        except Exception as e:
            print(f'Error while loading image {self.imgs[item]}: {str(e)}')
            return None, None

    def __len__(self):
        return len(self.imgs)


# Use the torchvision's implementation of ResNeXt, but add FC layer for a different number of classes (27) and a Sigmoid instead of a default Softmax.
class Resnext50(nn.Module):
    def __init__(self, n_classes):
        super().__init__()
        resnet = models.resnext50_32x4d(pretrained=True)
        resnet.fc = nn.Sequential(
            nn.Dropout(p=0.2),
            nn.Linear(in_features=resnet.fc.in_features, out_features=n_classes)
        )
        self.base_model = resnet
        #self.sigm = nn.Sigmoid()

    def forward(self, x):
        #return self.sigm(self.base_model(x))
        return self.base_model(x)  # outputs raw logits


# 自定义转换：将 PIL 图像转 RGB + HSV 并拼接通道
class RGBHSVConcatenate:
    def __call__(self, pil_img):
        # 1. PIL → NumPy 数组（RGB 顺序）
        img_np = np.array(pil_img)  # shape: (H, W, 3), 值范围 0-255

        # 2. RGB → HSV
        img_hsv = cv2.cvtColor(img_np, cv2.COLOR_RGB2HSV)

        # 3. 归一化到 [0, 1]（和 ToTensor 逻辑一致）
        img_rgb_norm = img_np.astype(np.float32) / 255.0
        img_hsv_norm = img_hsv.astype(np.float32) / 255.0  # HSV 也归一化

        # 4. 转为 PyTorch 张量并拼接通道
        # RGB 张量 shape: (3, H, W)，HSV 同理 → 拼接后 (6, H, W)
        tensor_rgb = torch.from_numpy(img_rgb_norm).permute(2, 0, 1)  # (H,W,3)→(3,H,W)
        tensor_hsv = torch.from_numpy(img_hsv_norm).permute(2, 0, 1)
        merged_tensor = torch.cat([tensor_rgb, tensor_hsv], dim=0)  # 通道维度（dim=0）拼接

        return merged_tensor

# A simple function for visualization.
def show_sample(img, binary_img_labels):
    # Convert the binary labels back to the text representation.
    img_labels = np.array(dataset_val.classes)[np.argwhere(binary_img_labels > 0)[:, 0]]
    plt.imshow(img)
    plt.title("{}".format(', '.join(img_labels)))
    plt.axis('off')
    plt.show()


# Use threshold to define predicted labels and invoke sklearn's metrics with different averaging strategies.
def calculate_metrics_bak(pred, target, threshold=0.5):
    pred = np.array(pred > threshold, dtype=float)
    return {'micro/precision': precision_score(y_true=target, y_pred=pred, average='micro', zero_division=0),
            'micro/recall': recall_score(y_true=target, y_pred=pred, average='micro', zero_division=0),
            'micro/f1': f1_score(y_true=target, y_pred=pred, average='micro', zero_division=0),
            'macro/precision': precision_score(y_true=target, y_pred=pred, average='macro', zero_division=0),
            'macro/recall': recall_score(y_true=target, y_pred=pred, average='macro', zero_division=0),
            'macro/f1': f1_score(y_true=target, y_pred=pred, average='macro', zero_division=0),
            'samples/precision': precision_score(y_true=target, y_pred=pred, average='samples', zero_division=0),
            'samples/recall': recall_score(y_true=target, y_pred=pred, average='samples', zero_division=0),
            'samples/f1': f1_score(y_true=target, y_pred=pred, average='samples', zero_division=0),
            }

def calculate_metrics(pred, targets, threshold=0.5):
    # logits: numpy array or torch tensor

    pred = np.array(pred > threshold, dtype=float)

    true = np.array(targets).astype(int)        # 确保 int

    return {
        'micro/precision': precision_score(true, pred, average='micro', zero_division=0),
        'micro/recall':    recall_score(true, pred, average='micro', zero_division=0),
        'micro/f1':        f1_score(true, pred, average='micro', zero_division=0),
        'macro/precision': precision_score(true, pred, average='macro', zero_division=0),
        'macro/recall':    recall_score(true, pred, average='macro', zero_division=0),
        'macro/f1':        f1_score(true, pred, average='macro', zero_division=0),
        'samples/precision': precision_score(true, pred, average='samples', zero_division=0),
        'samples/recall':    recall_score(true, pred, average='samples', zero_division=0),
        'samples/f1':        f1_score(true, pred, average='samples', zero_division=0),
    }

# Here is an auxiliary function for checkpoint saving.
def checkpoint_save(model, save_path, epoch,optimizer, model_n = 'checkpoint-OK.pth'):
    f = os.path.join(save_path, model_n.format(epoch))
    if 'module' in dir(model):
        torch.save(model.module.state_dict(), f)
    else:
        torch.save({
    'model_state_dict': model.state_dict(),
    'optimizer_state_dict': optimizer.state_dict(),
    'epoch': epoch,
    # 其他需要保存的信息
}, f)
    print('saved checkpoint:', f)


# img_folder = r'.\Data\images'
# img_folder = r'\\192.168.1.137\share\Test\livan\峰云一致性模型训练图片库\分割图片及结果'
img_folder = r'.\\Data\第二批_备份_提取整理'
# mean = [0.485, 0.456, 0.406]
# std = [0.229, 0.224, 0.225]

mean = [0.520, 0.342, 0.357]
std = [0.281, 0.200, 0.206]

num_workers = 0  # Number of CPU processes for data preprocessing
lr = 1e-6  # Learning rate
batch_size = 32
image_wh = 512

dataset_val = NusDataset(img_folder, os.path.join(img_folder, 'small_test.json'), None)
dataset_train = NusDataset(img_folder, os.path.join(img_folder, 'small_train.json'), None)

# for sample_id in range(5):
#     show_sample(*dataset_val[sample_id])

# Calculate label distribution for the entire dataset (train + test)
samples = dataset_val.annos + dataset_train.annos
samples = np.array(samples)

class_counts = np.sum(samples, axis=0)

#mixup_transform = RandomMixUp(num_classes=class_counts, p=0.5, alpha=0.8)

mixup_fn = TimmMixup(
    mixup_alpha=0.8,         # MixUp 强度 >0 即启用
    cutmix_alpha=0.0,        # 关闭 CutMix
    prob=0.5,                # 50% 概率应用 MixUp
    switch_prob=0.0,         # 仅 MixUp，无需切换
    mode='batch',            # 对整个 batch 做混合 :contentReference[oaicite:4]{index=4}
    label_smoothing=0.0,
    num_classes=8
)

def mixup_data(x, y, alpha=0.8, device='cuda'):
    """
    Returns:
      mixed_x: 混合后的图像张量 (batch_size, C, H, W)
      mixed_y: 混合后的 soft-label 张量 (batch_size, num_classes)
      lam:      本次插值系数
    """
    if alpha > 0:
        lam = float(np.random.beta(alpha, alpha))
    else:
        lam = 1.0
    batch_size = x.size(0)
    index = torch.randperm(batch_size, device=device)

    mixed_x = lam * x + (1 - lam) * x[index]
    mixed_y = lam * y + (1 - lam) * y[index]
    return mixed_x, mixed_y, lam

mixup = MixUp(
    num_classes=8,   # 类别总数
    alpha=0.8        # Beta 分布参数
)

def cv2_resize(image):
    # 将PIL图像转换为OpenCV格式（BGR）
    image = cv2.cvtColor(np.array(image), cv2.COLOR_RGB2BGR)
    # 使用OpenCV调整图像大小
    image = cv2.resize(image, (image_wh, image_wh), interpolation=cv2.INTER_LINEAR)
    # 转回RGB格式
    image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
    return image

# 自定义转换类
class CV2Resize(object):
    def __call__(self, image):
        return cv2_resize(image)

ransform_to = transforms.Compose([
    #transforms.Resize((image_wh, image_wh)),
    CV2Resize(),
    #RGBHSVConcatenate(),  # 拼接 RGB+HSV 通道
    # transforms.RandomHorizontalFlip(),
    # transforms.ColorJitter(),
    # transforms.RandomAffine(
    #     degrees=20,
    #     translate=(0.2, 0.2),
    #     scale=(0.5, 1.5),
    #     shear=None,
    #     interpolation=InterpolationMode.BILINEAR,  # 例如使用双线性插值
    #     fill=tuple((np.array(mean) * 255).astype(int).tolist())
    # ),
    transforms.ToTensor(),
    #transforms.GaussianBlur(kernel_size=(37, 37), sigma=(10.0, 10.0)),
    transforms.Normalize(mean, std)
])

# Save path for checkpoints
save_path = 'model/'
# Save path for logs
logdir = 'logs/'

flag_path = 'finetune.flag'

iteration = 0
epoch = 0
def train(logger):
    global iteration
    global epoch
    # We use the .tar.gz archive from this(https://github.com/thuml/HashNet/tree/master/pytorch#datasets)
    # github repository to speed up image loading(instead of loading it from Flickr).
    # Let's download and extract it.

    # if not os.path.exists(img_folder):
    #     def download_file_from_google_drive(id, destination):
    #         def get_confirm_token(response):
    #             for key, value in response.cookies.items():
    #                 if key.startswith('download_warning'):
    #                     return value
    #             return None
    #
    #         def save_response_content(response, destination):
    #             CHUNK_SIZE = 32768
    #             with open(destination, "wb") as f:
    #                 for chunk in tqdm(response.iter_content(CHUNK_SIZE), desc='Downloading'):
    #                     if chunk:  # filter out keep-alive new chunks
    #                         f.write(chunk)
    #
    #         URL = "https://docs.google.com/uc?export=download"
    #         session = requests.Session()
    #         response = session.get(URL, params={'id': id}, stream=True)
    #         token = get_confirm_token(response)
    #
    #         if token:
    #             params = {'id': id, 'confirm': token}
    #             response = session.get(URL, params=params, stream=True)
    #         save_response_content(response, destination)
    #
    #
    #     file_id = '0B7IzDz-4yH_HMFdiSE44R1lselE'
    #     path_to_tar_file = str(time.time()) + '.tar.gz'
    #     download_file_from_google_drive(file_id, path_to_tar_file)
    #     print('Extraction')
    #     with tarfile.open(path_to_tar_file) as tar_ref:
    #         tar_ref.extractall(os.path.dirname(img_folder))
    #     os.remove(path_to_tar_file)

    # Also, copy our pre-processed annotations to the dataset folder.
    # Note: you can find script for generating such annotations in attachments
    # copyfile('nus_wide/small_test.json', os.path.join(img_folder, 'small_test.json'))
    # copyfile('nus_wide/small_train.json', os.path.join(img_folder, 'small_train.json'))

    # Let's take a look at the data we have. To do it we need to load the dataset without augmentations.

    with open(flag_path, "w") as file:
        file.write("0")

    with printoptions(precision=3, suppress=True):

        # Sort labels according to their frequency in the dataset.
        sorted_ids = np.array([i[0] for i in sorted(enumerate(class_counts), key=lambda x: x[1])], dtype=int)
        print('Label distribution (count, class name):',
              list(zip(class_counts[sorted_ids].astype(int), np.array(dataset_val.classes)[sorted_ids])))
        plt.rcParams['font.family'] = 'SimHei'
        plt.barh(range(len(dataset_val.classes)), width=class_counts[sorted_ids])
        plt.yticks(range(len(dataset_val.classes)), np.array(dataset_val.classes)[sorted_ids])
        plt.gca().margins(y=0)
        plt.grid()
        plt.title('Label distribution')
        plt.show()

    # Initialize the training parameters.

    save_freq = 1  # Save checkpoint frequency (epochs)
    test_freq = 20  # Test model frequency (iterations)
    max_epoch_number = 180  # Number of epochs for training
    # Note: on the small subset of data overfitting happens after 30-35 epochs

    device = torch.device('cuda')


    # Run tensorboard
    # % load_ext
    # tensorboard
    # % tensorboard - -logdir
    # {logdir}

    # Test preprocessing
    # val_transform = transforms.Compose([
    #     transforms.Resize((image_wh, image_wh)),
    #     transforms.ToTensor(),
    #     transforms.Normalize(mean, std)
    # ])
    print(tuple(np.array(np.array(mean) * 255).tolist()))

    # Train preprocessing
    # train_transform = transforms.Compose([
    #     transforms.Resize((image_wh, image_wh)),
    #     transforms.RandomHorizontalFlip(),
    #     transforms.ColorJitter(),
    #     transforms.RandomAffine(degrees=20, translate=(0.2, 0.2), scale=(0.5, 1.5),
    #                             shear=None, resample=False,
    #                             fillcolor=tuple(np.array(np.array(mean)*255).astype(int).tolist())),
    #     transforms.ToTensor(),
    #     transforms.Normalize(mean, std)
    # ])

    # Initialize the dataloaders for training.
    test_annotations = os.path.join(img_folder, 'small_test.json')
    train_annotations = os.path.join(img_folder, 'small_train.json')

    test_dataset = NusDataset(img_folder, test_annotations, ransform_to)
    train_dataset = NusDataset(img_folder, train_annotations, ransform_to)

    train_dataloader = DataLoader(train_dataset, batch_size=batch_size, num_workers=num_workers, shuffle=True,
                                  drop_last=True)
    test_dataloader = DataLoader(test_dataset, batch_size=batch_size, num_workers=num_workers)

    num_train_batches = int(np.ceil(len(train_dataset) / batch_size))

    # Initialize the model
    model = Resnext50(len(train_dataset.classes))
    # Switch model to the training mode and move it to GPU.
    model.train()
    model = model.to(device)

    optimizer = torch.optim.Adam(model.parameters(), lr=lr)

    # If more than one GPU is available we can use both to speed up the training.
    if torch.cuda.device_count() > 1:
        model = nn.DataParallel(model)

    os.makedirs(save_path, exist_ok=True)

    # Loss function
    #criterion = nn.BCELoss()
    criterion = nn.BCEWithLogitsLoss()


    # 记录超参数（方便后续对比实验）
    hparams = {
        'batch_size': batch_size,
        'lr': lr,
        'max_epoch': max_epoch_number
    }
    logger.add_hparams(hparams, {})

    # Run training
    epoch = 0

    min_loss = 1
    val_min_loss = 1
    loss_value_val = 1
    do_finetune = False
    while True:


        batch_losses = []

        for imgs, targets in train_dataloader:
            imgs, targets = imgs.to(device), targets.to(device)
            # 确保 targets 是 torch.Tensor
            if not isinstance(targets, torch.Tensor):
                targets = torch.from_numpy(targets).to(device)
            # 应用 MixUp：返回混合后的图像和软标签 :contentReference[oaicite:2]{index=2}
            #imgs, targets = mixup_fn(imgs, targets)

            # 应用自定义 MixUp
            imgs, targets, lam = mixup_data(imgs, targets, alpha=0.8, device=device)

            optimizer.zero_grad()

            model_result = model(imgs)
            loss = criterion(model_result, targets.type(torch.float))

            batch_loss_value = loss.item()
            loss.backward()
            optimizer.step()

            logger.add_scalar('train_loss', batch_loss_value, iteration)
            batch_losses.append(batch_loss_value)
            with torch.no_grad():
                result = calculate_metrics(model_result.cpu().numpy(), targets.cpu().numpy())
                for metric in result:
                    logger.add_scalar('train/' + metric, result[metric], iteration)

            print(f"iteration:{iteration}")
            if iteration % test_freq == 0:  # iteration % test_freq == 0:
                model.eval()
                with torch.no_grad():
                    model_result = []
                    targets = []
                    batch_losses_val = []
                    for imgs, batch_targets in test_dataloader:
                        imgs = imgs.to(device)
                        model_batch_result = model(imgs)
                        loss_val = criterion(model_batch_result.cpu(), batch_targets.type(torch.float))
                        batch_loss_val_value = loss_val.item()
                        batch_losses_val.append(batch_loss_val_value)
                        model_result.extend(model_batch_result.cpu().numpy())
                        targets.extend(batch_targets.cpu().numpy())

                    loss_value_val = np.mean(batch_losses_val)

                result = calculate_metrics(np.array(model_result), np.array(targets))
                for metric in result:
                    logger.add_scalar('test/' + metric, result[metric], iteration)

                print("epoch:{:2d} iter:{:3d} test: "
                      "micro f1: {:.3f} "
                      "macro f1: {:.3f} "
                      "samples precision: {:.3f} "
                      "val loss:{:.3f}".format(epoch, iteration,
                                               result['micro/f1'],
                                               result['macro/f1'],
                                               result['samples/precision'],
                                               loss_value_val))

                logger.add_scalar('val/loss', loss_value_val, iteration)

                if loss_value_val < val_min_loss:
                    val_min_loss = loss_value_val
                    checkpoint_save(model, save_path, epoch,optimizer)

                model.train()

            iteration += 1

            try:
                with open(flag_path, 'r') as f:
                    if f.read().strip() == '1':
                        do_finetune = True
                        break
            except FileNotFoundError:
                pass

        loss_value = np.mean(batch_losses)
        # print("epoch:{:2d} iter:{:3d} train-loss:{:.3f} val-loss:{:.3f}".format(epoch, iteration, loss_value, loss_value_val))
        print("epoch:{:2d} iter:{:3d} train-loss:{:.3f}".format(epoch, iteration, loss_value))

        # 记录学习率
        logger.add_scalar('lr', optimizer.param_groups[0]['lr'], epoch)
        epoch += 1

        if max_epoch_number < epoch or do_finetune:
            break

def finetune(logger):
    global iteration
    global epoch
    print("start finetune")
    save_freq = 1  # Save checkpoint frequency (epochs)
    test_freq = 10  # Test model frequency (iterations)
    max_epoch_number = 180  # Number of epochs for training
    # Note: on the small subset of data overfitting happens after 30-35 epochs
    device = torch.device('cuda')

    # Initialize the dataloaders for training.
    test_annotations = os.path.join(img_folder, 'small_test.json')
    train_annotations = os.path.join(img_folder, 'small_train.json')

    test_dataset = NusDataset(img_folder, test_annotations, ransform_to)
    train_dataset = NusDataset(img_folder, train_annotations, ransform_to)

    train_dataloader = DataLoader(train_dataset, batch_size=batch_size, num_workers=num_workers, shuffle=True,
                                  drop_last=True)
    test_dataloader = DataLoader(test_dataset, batch_size=batch_size, num_workers=num_workers)

    num_train_batches = int(np.ceil(len(train_dataset) / batch_size))

    # Initialize the model
    model = Resnext50(len(train_dataset.classes))
    # Switch model to the training mode and move it to GPU.
    model.train()
    model = model.to(device)

    optimizer = torch.optim.Adam(model.parameters(), lr=lr)

    ckpt = torch.load('model/checkpoint-OK.pth', map_location=device)
    model.load_state_dict(ckpt['model_state_dict'])
    optimizer.load_state_dict(ckpt['optimizer_state_dict'])
    # 学习率降低
    for g in optimizer.param_groups:
        g['lr'] = g['lr'] / 10

    # If more than one GPU is available we can use both to speed up the training.
    if torch.cuda.device_count() > 1:
        model = nn.DataParallel(model)

    os.makedirs(save_path, exist_ok=True)

    # Loss function
    #criterion = nn.BCELoss()
    criterion = nn.BCEWithLogitsLoss()

    # Run training
    min_loss = 1
    val_min_loss = 1
    loss_value_val = 1
    while True:
        batch_losses = []

        for imgs, targets in train_dataloader:
            imgs, targets = imgs.to(device), targets.to(device)
            # 确保 targets 是 torch.Tensor
            if not isinstance(targets, torch.Tensor):
                targets = torch.from_numpy(targets).to(device)
            # 应用 MixUp：返回混合后的图像和软标签 :contentReference[oaicite:2]{index=2}
            #imgs, targets = mixup_fn(imgs, targets)

            # 应用自定义 MixUp
            imgs, targets, lam = mixup_data(imgs, targets, alpha=0.8, device=device)

            optimizer.zero_grad()

            model_result = model(imgs)
            loss = criterion(model_result, targets.type(torch.float))

            batch_loss_value = loss.item()
            loss.backward()
            optimizer.step()

            logger.add_scalar('train_loss', batch_loss_value, iteration)
            batch_losses.append(batch_loss_value)
            with torch.no_grad():
                result = calculate_metrics(model_result.cpu().numpy(), targets.cpu().numpy())
                for metric in result:
                    logger.add_scalar('train/' + metric, result[metric], iteration)

            print(f"iteration:{iteration}")
            if iteration % test_freq == 0:  # iteration % test_freq == 0:
                model.eval()
                with torch.no_grad():
                    model_result = []
                    targets = []
                    batch_losses_val = []
                    for imgs, batch_targets in test_dataloader:
                        imgs = imgs.to(device)
                        model_batch_result = model(imgs)
                        loss_val = criterion(model_batch_result.cpu(), batch_targets.type(torch.float))
                        batch_loss_val_value = loss_val.item()
                        batch_losses_val.append(batch_loss_val_value)
                        model_result.extend(model_batch_result.cpu().numpy())
                        targets.extend(batch_targets.cpu().numpy())

                    loss_value_val = np.mean(batch_losses_val)

                result = calculate_metrics(np.array(model_result), np.array(targets))
                for metric in result:
                    logger.add_scalar('test/' + metric, result[metric], iteration)

                print("epoch:{:2d} iter:{:3d} test: "
                      "micro f1: {:.3f} "
                      "macro f1: {:.3f} "
                      "samples precision: {:.3f} "
                      "val loss:{:.3f}".format(epoch, iteration,
                                               result['micro/f1'],
                                               result['macro/f1'],
                                               result['samples/precision'],
                                               loss_value_val))

                logger.add_scalar('val/loss', loss_value_val, iteration)

                if loss_value_val < val_min_loss:
                    val_min_loss = loss_value_val
                    checkpoint_save(model, save_path, epoch,optimizer,"checkpoint-OK-finetune.pth")

                model.train()

            iteration += 1

        loss_value = np.mean(batch_losses)
        # print("epoch:{:2d} iter:{:3d} train-loss:{:.3f} val-loss:{:.3f}".format(epoch, iteration, loss_value, loss_value_val))
        print("epoch:{:2d} iter:{:3d} train-loss:{:.3f}".format(epoch, iteration, loss_value))

        # 记录学习率
        logger.add_scalar('lr', optimizer.param_groups[0]['lr'], epoch)
        epoch += 1

        if max_epoch_number < epoch:
            break


def load_model(checkpoint_path, device):
    checkpoint = torch.load(checkpoint_path, map_location=device)
    train_annotations = os.path.join(img_folder, 'small_train.json')

    train_dataset = NusDataset(img_folder, train_annotations, ransform_to)

    model = Resnext50(len(train_dataset.classes))
    model.load_state_dict(checkpoint)
    model = model.to(device)
    model.eval()
    return model


def predict_single_image(model, classes, img_path, device, transform):
    # 加载图像
    image = Image.open(img_path).convert('RGB')
    # 预处理
    input_tensor = transform(image).unsqueeze(0).to(device)

    # 预测
    with torch.no_grad():
        raw_pred = model(input_tensor).cpu().numpy()[0]

    # 转换为标签
    raw_pred = np.array(raw_pred > 0.5, dtype=float)
    predicted_labels = np.array(classes)[np.argwhere(raw_pred > 0)[:, 0]]
    return predicted_labels if len(predicted_labels) else ['no predictions']


def output1():
    # 配置参数
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    checkpoint_path = "./model/checkpoint-OK.pth"

    # 加载模型和类别
    model = load_model(checkpoint_path, device)

    # val_transform = transforms.Compose([
    #     transforms.Resize((image_wh, image_wh)),
    #     transforms.ToTensor(),
    #     transforms.Normalize(mean, std)
    # ])

    # test_annotations = os.path.join(img_folder, 'small_test_my2.json')
    # test_dataset = NusDataset(img_folder, test_annotations, val_transform)
    #
    # dataset_val = NusDataset(img_folder, os.path.join(img_folder, 'small_test_my2.json'), None)

    # for sample_id in [1, 2, 3, 4]:
    #     test_img, test_labels = test_dataset[sample_id]
    #     test_img_path = os.path.join(img_folder, test_dataset.imgs[sample_id])
    #     with torch.no_grad():
    #         raw_pred = model(test_img.unsqueeze(0).to(device)).cpu().numpy()[0]
    #         raw_pred = np.array(raw_pred > 0.5, dtype=float)
    #
    #     predicted_labels = np.array(dataset_val.classes)[np.argwhere(raw_pred > 0)[:, 0]]
    #     if not len(predicted_labels):
    #         predicted_labels = ['no predictions']
    #     img_labels = np.array(dataset_val.classes)[np.argwhere(test_labels > 0)[:, 0]]
    #     plt.rcParams['font.family'] = 'SimHei'
    #     plt.imshow(Image.open(test_img_path))
    #     plt.title("Predicted labels: {} \nGT labels: {}".format(', '.join(predicted_labels), ', '.join(img_labels)))
    #     plt.axis('off')
    #     plt.show()

    ######################## to excel #######################
    CELL_HIGH = 160

    # Create an Excel workbook and sheet
    workbook = openpyxl.Workbook()
    sheet = workbook.active
    sheet.title = "Image Predictions"

    # Set up headers
    sheet.append(["name", "File Name", "Image", "Predicted Label", "Whole Label"])
    # Row counter for placing images
    row = 2
    center_align = Alignment(horizontal='center', vertical='center')
    font = Font(size=20)
    alignment_center = Alignment(vertical="center")  # 垂直居中

    test_img_folder = r"\\192.168.1.137\share\Test\livan\同一人一致性测试集_all_testjson"
    test_annotations = os.path.join(test_img_folder, 'test.json')
    test_dataset = NusDataset(test_img_folder, test_annotations, ransform_to)

    name = os.path.basename(test_img_folder)

    for sample_id, (test_img, test_labels) in enumerate(test_dataset):
        test_img_path = os.path.join(test_img_folder, test_dataset.imgs[sample_id])
        if not test_img_path.endswith("jpg"):
            continue
        with torch.no_grad():
            raw_pred = model(test_img.unsqueeze(0).to(device)).cpu().numpy()[0]
            raw_pred_y = raw_pred
            raw_pred= np.array(raw_pred >= 0.2007, dtype=float)
            raw_pred_sigm  = 1 / (1 + np.exp(-raw_pred_y))  # 多标签情况下用 sigmoid 得到概率
            probs = 1 / (1 + np.exp(-raw_pred))  # 多标签情况下用 sigmoid 得到概率

        main_label = "no main prediction"

        # 主预测标签逻辑：优先取所有 >=0.5 中概率最大的；若没有，则取全局最大
        if raw_pred.sum() > 0:
            # 在 raw_pred 为 1 的那些索引里，找到最大概率对应的索引
            candidates = np.where(raw_pred == 1)[0]
            # 如果同为 1 的标签不止一个，则从中挑概率最大的
            max_idx = candidates[np.argmax(probs[candidates])]
            predicted_labels = np.array(test_dataset.classes)[np.argwhere(raw_pred > 0)[:, 0]]
        else:
            # 没有任何一个标签概率超过 0.5，直接在所有标签中取概率最大的
            max_idx = np.argmax(raw_pred_y)
            predicted_labels = [test_dataset.classes[max_idx]]

        # if not len(predicted_labels):
        #     predicted_labels = ['no whole predictions']

        main_label = test_dataset.classes[max_idx]
        # img_labels = np.array(test_dataset.classes)[np.argwhere(test_labels > 0)[:, 0]]

        # Append data to the Excel sheet
        # Add image to Excel
        excel_image = OpenpyxlImage(test_img_path)
        excel_image.width = CELL_HIGH  # Resize width for display
        excel_image.height = CELL_HIGH  # Resize height for display

        sheet[f"A{row}"] = name
        sheet[f"A{row}"].font = font
        sheet[f"A{row}"].alignment = alignment_center

        file_name = os.path.basename(test_img_path)
        sheet[f"B{row}"] = file_name
        sheet[f"B{row}"].alignment = alignment_center
        sheet[f"B{row}"].font = font

        img_cell = f"C{row}"  # Adjust column and row

        # Insert image into Excel
        sheet.add_image(excel_image, img_cell)
        sheet.row_dimensions[row].height = CELL_HIGH
        # Add path and label
        # sheet[f"C{row}"] = path
        sheet[f"D{row}"] = main_label  # pingyingTochinese2[label]
        sheet[f"D{row}"].alignment = alignment_center
        sheet[f"D{row}"].font = font

        # 总体质
        sheet[f"E{row}"] = ",".join(predicted_labels)
        sheet[f"E{row}"].alignment = alignment_center
        sheet[f"E{row}"].font = font
        row += 1

    sheet.column_dimensions['A'].width = 20
    sheet.column_dimensions['B'].width = 90
    sheet.column_dimensions['C'].width = 30
    sheet.column_dimensions['D'].width = 20
    # Save workbook
    # 如果文件夹不存在，创建它
    basename = os.path.basename(test_img_folder)
    folder_path = f"./预测结果/预测结果_{basename}"
    if not os.path.exists(folder_path):
        os.makedirs(folder_path)

    fileName = f"{name}_predictions.xlsx"
    savePath = os.path.join(folder_path, fileName)
    # 如果存在同名文件，先删除
    if os.path.exists(savePath):
        os.remove(savePath)
    workbook.save(savePath)
    print(f"Results saved to {savePath}")


def output2():
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    checkpoint_path = "./model/checkpoint-OK.pth"

    model = load_model(checkpoint_path, device)

    val_transform = transforms.Compose([
        transforms.Resize((image_wh, image_wh)),
        transforms.ToTensor(),
        transforms.Normalize(mean, std)
    ])

    CELL_HIGH = 160

    # 创建 Excel
    workbook = openpyxl.Workbook()
    sheet_pred = workbook.active
    sheet_pred.title = "Image Predictions"

    # 设置预测结果表头
    sheet_pred.append(["name", "File Name", "Image", "Predicted Label", "Whole Label"])

    # 为混淆矩阵和分类报告新建 Sheet
    sheet_cm = workbook.create_sheet(title="Confusion Matrix")
    sheet_cr = workbook.create_sheet(title="Classification Report")

    # 用于收集真实与预测标签
    y_true = []
    y_pred = []

    row = 2
    font = Font(size=20)
    alignment_center = Alignment(horizontal='center', vertical='center')

    test_img_folder = img_folder
    test_annotations = os.path.join(test_img_folder, 'small_test.json')
    test_dataset = NusDataset(test_img_folder, test_annotations, val_transform)
    name = os.path.basename(test_img_folder)

    for sample_id, (test_img, test_labels) in enumerate(test_dataset):
        test_img_path = os.path.join(test_img_folder, test_dataset.imgs[sample_id])
        if not test_img_path.lower().endswith(".jpg"):
            continue

        # 获取预测结果
        with torch.no_grad():
            raw_pred = model(test_img.unsqueeze(0).to(device)).cpu().numpy()[0]
            raw_pred = np.array(raw_pred >= 0.5, dtype=float)

            probs = 1 / (1 + np.exp(-raw_pred))  # 多标签情况下用 sigmoid 得到概率

        # 真实标签和主预测标签
        img_labels = np.array(test_dataset.classes)[np.argwhere(test_labels > 0)[:, 0]]
        if len(img_labels) > 0:
            true_label = result = ','.join(map(str, img_labels))
        else:
            true_label = "no_label"

        if len(img_labels) > 3:
            print("s")
        y_true.append(true_label)

        if raw_pred.sum() != 0:
            max_idx = np.argmax(raw_pred)
        else:
            max_idx = np.argmax(probs)
            # main_label = "no main prediction"

        main_label = test_dataset.classes[max_idx]

        # 整理全部预测标签字符串
        predicted_labels = np.array(test_dataset.classes)[np.argwhere(raw_pred > 0)[:, 0]]
        whole_label_str = ",".join(predicted_labels) if len(predicted_labels) else "no whole predictions"

        y_pred.append(whole_label_str)

        # 写入 Image Predictions 表
        excel_image = OpenpyxlImage(test_img_path)
        excel_image.width = CELL_HIGH
        excel_image.height = CELL_HIGH

        sheet_pred[f"A{row}"] = name
        sheet_pred[f"A{row}"].font = font
        sheet_pred[f"A{row}"].alignment = alignment_center

        sheet_pred[f"B{row}"] = os.path.basename(test_img_path)
        sheet_pred[f"B{row}"].font = font
        sheet_pred[f"B{row}"].alignment = alignment_center

        sheet_pred.add_image(excel_image, f"C{row}")
        sheet_pred.row_dimensions[row].height = CELL_HIGH

        sheet_pred[f"D{row}"] = main_label
        sheet_pred[f"D{row}"].font = font
        sheet_pred[f"D{row}"].alignment = alignment_center

        sheet_pred[f"E{row}"] = whole_label_str
        sheet_pred[f"E{row}"].font = font
        sheet_pred[f"E{row}"].alignment = alignment_center

        row += 1

    # 调整列宽
    for col, width in zip(['A', 'B', 'C', 'D', 'E'], [20, 90, 30, 30, 80]):
        sheet_pred.column_dimensions[col].width = width

    # 生成混淆矩阵
    # labels = sorted(set(y_true + y_pred))
    labels = sorted(set(y_true + y_pred))
    cm = confusion_matrix(y_true, y_pred, labels=labels)
    cm_df = pd.DataFrame(cm, index=labels, columns=labels)

    # 写入 Confusion Matrix 表头
    sheet_cm.append([""] + labels)
    for i, true_lbl in enumerate(labels):
        row_vals = [true_lbl] + cm_df.loc[true_lbl].tolist()
        sheet_cm.append(row_vals)

    # 生成分类报告并写入
    report = classification_report(y_true, y_pred, labels=labels, output_dict=True)
    report_df = pd.DataFrame(report).T

    # 写入 Classification Report 表头
    sheet_cr.append(["label"] + list(report_df.columns))
    for idx, row_data in report_df.iterrows():
        sheet_cr.append([idx] + row_data.tolist())

    # 保存文件
    folder_path = f"./预测结果/预测结果_multilabel_{name}_with_confusion"
    os.makedirs(folder_path, exist_ok=True)
    savePath = os.path.join(folder_path, f"{name}_multilabel_predictions_with_cm.xlsx")
    if os.path.exists(savePath):
        os.remove(savePath)
    workbook.save(savePath)
    print(f"Results with confusion matrix saved to {savePath}")


if __name__ == '__main__':
    # Tensoboard logger
    logger = SummaryWriter(logdir)

    train(logger)

    finetune(logger)

    # 训练结束后关闭 writer
    logger.close()

    # 输出验证集混淆矩阵
    # output2()

    # 输出一致性结果
    #output1()
