import importlib
import math
import shutil
import sys
import time

sys.path.append(r"D:\Livan\Codes\Python\manipulatePics")
fillBlack = importlib.import_module("fillBlack")
cropimg = importlib.import_module("cropimg")
import os
import torch
import torchvision
import matplotlib.pyplot as plt
from PIL import Image as PILImage
import openpyxl
from openpyxl.drawing.image import Image as OpenpyxlImage
from openpyxl.styles import Alignment, Font
import torchvision.transforms as transforms


dataType = 'TongueConstitutionAYS_MAIN_V3'

class TongueDataset:
    def __init__(self, batch_size=32):
        self.batch_size = batch_size
        self.train_dataset_dir = r'.\Data\{dataType}\train'.format(dataType = dataType)
        self.test_dataset_dir = r'.\Data\{dataType}\test'.format(dataType = dataType)
        self.validation_dataset_dir = r'.\Data\{dataType}\validation'.format(dataType = dataType)

        self.transform = torchvision.transforms.Compose([
            torchvision.transforms.Resize((512, 512)),
            #torchvision.transforms.GaussianBlur(kernel_size=(37, 37), sigma=(10.0, 10.0)),
            #torchvision.transforms.Lambda(self.rgb_to_hsv),  # 转换为 HSV
            torchvision.transforms.ToTensor(),
            torchvision.transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
        ])
        self.train_dataset = None
        self.train_dataloader = None
        self.test_dataset = None
        self.test_dataloader = None
        self.validation_dataset = None
        self.validation_dataloader = None

        self.id_to_class = dict()

    @staticmethod
    def rgb_to_hsv(image: PILImage.Image) -> PILImage.Image:
        """
        将 RGB 图像转换为 HSV 色彩空间
        """
        return image.convert('HSV')

    def load_train_data(self):
        self.train_dataset = torchvision.datasets.ImageFolder(self.train_dataset_dir, transform=self.transform)
        print(self.train_dataset.classes)
        print(self.train_dataset.class_to_idx)
        print(f'Train dataset size: {len(self.train_dataset)}')

        # Reverse from: label -> id, to: id -> label
        self.id_to_class = dict((val, key) for key, val in self.train_dataset.class_to_idx.items())
        print(self.id_to_class)

        self.train_dataloader = torch.utils.data.DataLoader(self.train_dataset, batch_size=self.batch_size,
                                                            shuffle=True, **{'pin_memory': True})
        return self.train_dataloader

    def load_test_data(self):
        self.test_dataset = torchvision.datasets.ImageFolder(self.test_dataset_dir, transform=self.transform)
        print(f'Test dataset size: {len(self.test_dataset)}')

        self.test_dataloader = torch.utils.data.DataLoader(self.test_dataset, batch_size=self.batch_size,
                                                           **{'pin_memory': True})
        return self.test_dataloader

    def load_validation_data(self):
        self.validation_dataset = torchvision.datasets.ImageFolder(self.validation_dataset_dir,
                                                                   transform=self.transform)
        print(f'Validation dataset size: {len(self.validation_dataset)}')

        self.validation_dataloader = torch.utils.data.DataLoader(self.validation_dataset, batch_size=self.batch_size,
                                                                 **{'pin_memory': True})
        return self.validation_dataloader

    def show_sample_images(self):
        images_to_show = 6
        imgs, labels = next(iter(self.train_dataloader))
        plt.figure(figsize=(56, 56))
        for i, (img, label) in enumerate(zip(imgs[:images_to_show], labels[:images_to_show])):
            # permute交换张量维度，把原来在0维的channel移到最后一维
            img = (img.permute(1, 2, 0).numpy() + 1) / 2
            # rows * cols
            plt.subplot(2, 3, i + 1)
            plt.title(self.id_to_class.get(label.item()))
            plt.xticks([])
            plt.yticks([])
            plt.imshow(img)

        # Show all images
        plt.show()


def folders_in_directory(directory_path):
    dirList = []
    for root, dirs, _ in os.walk(directory_path):
        for dir_name in dirs:
            full_path = os.path.join(root, dir_name)  # 将当前目录路径与文件夹名拼接成完整路径
            dirList.append(full_path)
    return dirList


class TongueResnet(torch.nn.Module):
    def __init__(self, image_width=224, image_height=224, num_classifications=3,
                 enable_dropout=False, enable_bn=False):

        super().__init__()

        self.resnet = torchvision.models.resnet50(weights=torchvision.models.ResNet50_Weights.DEFAULT)

        print(self.resnet)

        fc_features = 128
        resnet_features = self.resnet.fc.in_features
        self.resnet.fc = torch.nn.Sequential(
            torch.nn.Linear(resnet_features, fc_features),
            torch.nn.ReLU(inplace=True),
            torch.nn.Linear(fc_features, num_classifications)
        )



    def forward(self, x):
        y = self.resnet(x)
        return y

    def get_name(self):
        return f'{dataType}Resnet50'

    def transfer_learning_mode(self):
        # 冻结卷积基
        for param in self.resnet.parameters():
            param.requires_grad = False
        # 解冻全连接层
        for param in self.resnet.fc.parameters():
            param.requires_grad = True

    def fine_tune_mode(self):
        # 解冻卷积基
        for param in self.resnet.parameters():
            param.requires_grad = True


class TongueDenseNet(torch.nn.Module):
    def __init__(self, image_width=224, image_height=224, num_classifications=3,
                 enable_dropout=False, enable_bn=False):
        super().__init__()
        # 加载预训练的 DenseNet121 模型
        self.densenet = torchvision.models.densenet121(weights=torchvision.models.DenseNet121_Weights.DEFAULT)
        # 查看模型结构
        print(self.densenet)

        # 获取 DenseNet 最后的分类器输入特征数
        densenet_features = self.densenet.classifier.in_features
        fc_features = 128  # 全连接层的中间节点数，可自定义
        # 替换 DenseNet 的分类器
        self.densenet.classifier = torch.nn.Sequential(
            torch.nn.Linear(densenet_features, fc_features),
            torch.nn.ReLU(inplace=True),
            torch.nn.Linear(fc_features, num_classifications)
        )

    def forward(self, x):
        return self.densenet(x)

    def get_name(self):
        return f'{dataType}DenseNet121'

    def transfer_learning_mode(self):
        # 冻结特征提取部分
        for param in self.densenet.features.parameters():
            param.requires_grad = False
        # 解冻分类器部分
        for param in self.densenet.classifier.parameters():
            param.requires_grad = True

    def fine_tune_mode(self):
        # 解冻全部参数
        for param in self.densenet.parameters():
            param.requires_grad = True

class ModelTrainer():
    def __init__(self, model, loss_func, optimizer, lr_scheduler=None):
        self.device = 'cuda' if torch.cuda.is_available() else 'cpu'
        print(self.device)
        self.model = model
        self.model = self.model.to(self.device)
        self.loss_func = loss_func
        self.optimizer = optimizer
        self.lr_scheduler = lr_scheduler

    def train(self, dataloader):
        # 训练模式
        self.model.train()
        # 所有批次累计损失和
        epoch_loss = 0
        # 累计预测正确的样本总数
        epoch_correct = 0

        # 循环一次数据的多个批次
        for x, y in dataloader:
            # non_blocking=True异步传输数据
            x = x.to(self.device, non_blocking=True)
            y = y.to(self.device, non_blocking=True)

            predicted = self.model(x)
            loss = self.loss_func(predicted, y)
            self.optimizer.zero_grad()
            loss.backward()
            self.optimizer.step()

            # 记录已经训练了多少个epoch并触发学习速率的衰减
            if self.lr_scheduler:
                self.lr_scheduler.step()

            # 累加
            with torch.no_grad():
                epoch_correct += (predicted.argmax(1) == y).type(torch.float).sum().item()
                epoch_loss += loss.item()

        return (epoch_loss, epoch_correct)

    def test(self, dataloader):
        # 测试模式
        self.model.eval()
        # 所有批次累计损失和
        epoch_loss = 0
        # 累计预测正确的样本总数
        epoch_correct = 0

        # 循环一次数据的多个批次
        # 测试模式，不需要梯度计算、反向传播
        with torch.no_grad():
            for x, y in dataloader:
                # non_blocking=True异步传输数据
                x = x.to(self.device, non_blocking=True)
                y = y.to(self.device, non_blocking=True)

                predicted = self.model(x)
                loss = self.loss_func(predicted, y)

                # 累加
                epoch_correct += (predicted.argmax(1) == y).type(torch.float).sum().item()
                epoch_loss += loss.item()

        return (epoch_loss, epoch_correct)

    def validate(self, dataloader):
        total_val_data_cnt = len(dataloader.dataset)
        num_val_batch = len(dataloader)
        val_loss, val_correct = self.test(dataloader)
        # 所有批次的统计和/批次数量 = 平均损失率
        avg_val_loss = val_loss / num_val_batch
        # 预测正确的样本数/总样本数 = 平均正确率
        avg_val_accuracy = val_correct / total_val_data_cnt

        return (avg_val_loss, avg_val_accuracy)

    def fit(self, train_dataloader, test_dataloader, epoch):
        # 数据集总量
        total_train_data_cnt = len(train_dataloader.dataset)
        # 数据集批次数目
        num_train_batch = len(train_dataloader)
        # 数据集总量
        total_test_data_cnt = len(test_dataloader.dataset)
        # 数据集批次数目
        num_test_batch = len(test_dataloader)

        best_accuracy = 0.0

        # 循环全部数据
        for i in range(epoch):
            # 训练模型
            epoch_train_loss, epoch_train_correct = self.train(train_dataloader)
            # 所有批次的统计和/批次数量 = 平均损失率
            avg_train_loss = epoch_train_loss / num_train_batch
            # 预测正确的样本数/总样本数 = 平均正确率
            avg_train_accuracy = epoch_train_correct / total_train_data_cnt

            # 测试模型
            epoch_test_loss, epoch_test_correct = self.test(test_dataloader)
            # 所有批次的统计和/批次数量 = 平均损失率
            avg_test_loss = epoch_test_loss / num_test_batch
            # 预测争取的样本数/总样本数 = 平均正确率
            avg_test_accuracy = epoch_test_correct / total_test_data_cnt

            msg_template = (
                "Epoch {:2d} - Train accuracy: {:.2f}%, Train loss: {:.6f}; Test accuracy: {:.2f}%, Test loss: {:.6f}")
            print(msg_template.format(i + 1, avg_train_accuracy * 100, avg_train_loss, avg_test_accuracy * 100,
                                      avg_test_loss))

            # CheckPoint
            if avg_test_accuracy > best_accuracy:
                # 保存最佳测试模型
                best_accuracy = avg_test_accuracy
                ckpt_path = f'./{self.model.get_name()}.ckpt'
                self.save_checkpoint(i, ckpt_path)
                print(f'Save model to {ckpt_path}')

    def predict(self, x):
        # Prediction
        prediction = self.model(x.to(self.device))
        # Predicted class value using argmax
        # predicted_class = np.argmax(prediction)
        return prediction

    def save_checkpoint(self, epoch, file_path):
        # 构造CheckPoint内容
        ckpt = {
            'model': self.model.state_dict(),
            'optimizer': self.optimizer.state_dict(),
            'epoch': epoch,
            # 'lr_schedule': self.lr_schedule.state_dict()
        }
        # 保存文件
        torch.save(ckpt, file_path)

    def load_checkpoint(self, file_path):
        # 加载文件
        ckpt = torch.load(file_path, weights_only=True)
        # 加载模型参数
        self.model.load_state_dict(ckpt['model'])
        # 加载优化器参数
        self.optimizer.load_state_dict(ckpt['optimizer'])
        # 设置开始的epoch
        epoch = ckpt['epoch']
        # 加载lr_scheduler
        # self.lr_schedule.load_state_dict(ckpt['lr_schedule'])
        return epoch


def train_with_resnet(including_finetune=True):
    class_num = len(folders_in_directory(fr'./Data/{dataType}/train'))
    model = TongueResnet(num_classifications=class_num)
    model.transfer_learning_mode()
    loss_func = torch.nn.CrossEntropyLoss()
    # 仅优化分类器参数
    optimizer = torch.optim.Adam(model.resnet.fc.parameters(), lr=0.0001)

    tongs = TongueDataset(batch_size=32)
    # 训练数据
    train_dataloader = tongs.load_train_data()
    # tongs.show_sample_images()
    # 训练数据
    test_dataloader = tongs.load_test_data()
    # 验证数据
    validation_dataloader = tongs.load_validation_data()

    # Train model and save best one
    print('Begin transfer learning...')
    trainer = ModelTrainer(model, loss_func, optimizer)
    trainer.fit(train_dataloader, test_dataloader, 12)

    if including_finetune:
        # 微调
        model.fine_tune_mode()
        # 较小的lr
        optimizer_finetune = torch.optim.Adam(model.parameters(), lr=0.00001)
        print('Begin fine tune...')
        trainer = ModelTrainer(model, loss_func, optimizer_finetune)
        trainer.fit(train_dataloader, test_dataloader, 10)

    # Load best model
    # trainer.load_checkpoint('./VegetableResnet50.ckpt')
    trainer.load_checkpoint(f'./{dataType}Resnet50.ckpt')
    avg_val_loss, avg_val_accuracy = trainer.validate(validation_dataloader)
    print(f'Validation: {avg_val_accuracy * 100}%, {avg_val_loss}')

    # Try to predict single image

    # images = [
    #     f'./Data/{dataType}/validation/平和质/0016.jpg',
    #     f'./Data/{dataType}/validation/气虚质/0007.jpg',
    #     f'./Data/{dataType}/validation/气虚质_湿热质/0004.jpg',
    #     f'./Data/{dataType}/validation/气郁质/0103.jpg',
    #     f'./Data/{dataType}/validation/湿热质/0003.jpg',
    #     f'./Data/{dataType}/validation/湿热质_气郁质/0014.jpg',
    #     f'./Data/{dataType}/validation/痰湿质/0003.jpg',
    #     f'./Data/{dataType}/validation/痰湿质/0003.jpg',
    #     f'./Data/{dataType}/validation/阴虚质/0009.jpg'
    # ]
    #
    # for path in images:
    #     img = PILImage.open(path)
    #     img_tensor = tongs.transform(img)
    #     img_tensor.unsqueeze_(0)
    #     img_tensor = img_tensor.to(trainer.device)
    #     prediction = trainer.predict(img_tensor)
    #     # numpy需要到CPU上操作
    #     index = prediction.to('cpu').data.numpy().argmax()
    #     label = tongs.id_to_class[index]
    #     print(label)

def train_with_densnet(including_finetune=True):
    class_num = len(folders_in_directory(fr'./Data/{dataType}/train'))
    model = TongueDenseNet(num_classifications=class_num)
    model.transfer_learning_mode()
    loss_func = torch.nn.CrossEntropyLoss()
    # 仅优化分类器参数
    optimizer = torch.optim.Adam(model.densenet.classifier.parameters(), lr=0.00001)

    tongs = TongueDataset(batch_size=16)
    # 训练数据
    train_dataloader = tongs.load_train_data()
    # tongs.show_sample_images()
    # 测试数据
    test_dataloader = tongs.load_test_data()
    # 验证数据
    validation_dataloader = tongs.load_validation_data()

    # Train model and save best one
    print('Begin transfer learning...')
    trainer = ModelTrainer(model, loss_func, optimizer)
    trainer.fit(train_dataloader, validation_dataloader, 20)

    if including_finetune:
        # 微调
        model.fine_tune_mode()
        # 较小的lr
        optimizer_finetune = torch.optim.Adam(model.densenet.classifier.parameters(), lr=0.000001)
        print('Begin fine tune...')
        trainer = ModelTrainer(model, loss_func, optimizer_finetune)
        trainer.fit(train_dataloader, validation_dataloader, 10)

    # Load best model
    # trainer.load_checkpoint('./VegetableResnet50.ckpt')
    trainer.load_checkpoint(f'./{model.get_name()}.ckpt')
    avg_val_loss, avg_val_accuracy = trainer.validate(test_dataloader)
    print(f'Test: {avg_val_accuracy * 100}%, {avg_val_loss}')

    # Try to predict single image

    # images = [
    #     f'./Data/{dataType}/validation/平和质/0016.jpg',
    #     f'./Data/{dataType}/validation/气虚质/0007.jpg',
    #     f'./Data/{dataType}/validation/气虚质_湿热质/0004.jpg',
    #     f'./Data/{dataType}/validation/气郁质/0103.jpg',
    #     f'./Data/{dataType}/validation/湿热质/0003.jpg',
    #     f'./Data/{dataType}/validation/湿热质_气郁质/0014.jpg',
    #     f'./Data/{dataType}/validation/痰湿质/0003.jpg',
    #     f'./Data/{dataType}/validation/痰湿质/0003.jpg',
    #     f'./Data/{dataType}/validation/阴虚质/0009.jpg'
    # ]
    #
    # for path in images:
    #     img = PILImage.open(path)
    #     img_tensor = tongs.transform(img)
    #     img_tensor.unsqueeze_(0)
    #     img_tensor = img_tensor.to(trainer.device)
    #     prediction = trainer.predict(img_tensor)
    #     # numpy需要到CPU上操作
    #     index = prediction.to('cpu').data.numpy().argmax()
    #     label = tongs.id_to_class[index]
    #     print(label)


def predict_images(model = TongueDenseNet()):
    # 初始化模型
    model.transfer_learning_mode()

    # 加载已保存的模型权重
    checkpoint_path = f'./{model.get_name()}.ckpt'
    checkpoint = torch.load(checkpoint_path, map_location=torch.device('cuda' if torch.cuda.is_available() else 'cpu'),
                            weights_only=False)
    model.load_state_dict(checkpoint['model'])
    model.eval()

    # 设置设备
    device = 'cuda' if torch.cuda.is_available() else 'cpu'
    model = model.to(device)

    # 加载数据集处理器
    veg = TongueDataset(batch_size=32)
    veg.load_train_data()
    veg.load_test_data()
    # 验证数据
    veg.load_validation_data()

    # 定义测试图像列表
    # images = [
    #     './Data/Tongue/validation/shese_an_hong_she/1601019973062595_2024-12-09-16-47-22_0.jpg',
    #     './Data/Tongue/validation/shese_dan_bai_she/02087.jpg',
    #     './Data/Tongue/validation/shese_dan_hong_she/01069.jpg',
    #     './Data/Tongue/validation/shese_dan_she/01007.jpg',
    #     './Data/Tongue/validation/shese_dan_zi_she/02089.jpg',
    #     './Data/Tongue/validation/shese_hong_she/1717899939393476_2024-06-09-10-25-39_0.jpg',
    #     './Data/Tongue/validation/shese_jiang_she/1721954933890934_2024-07-26-08-48-53_0.jpg',
    #     './Data/Tongue/validation/shese_jiang_zi_she/1607143237690706_2024-03-09-10-35-53_0.jpg',
    #     './Data/Tongue/validation/shese_zi_hong_she/1715739328072302_2024-05-15-10-15-27_0.jpg'
    # ]

    #print("==========Single===========\n")
    images = [
        './Data/同一人多次检测舌象测试集/分割图/刘广远/extract_out/1624355244527850_2024-11-18-10-32-32_0.jpg',
        './Data/同一人多次检测舌象测试集/分割图/宋思远/extract_out/155383015894038_2024-11-18-09-50-36_0.jpg',
        './Data/同一人多次检测舌象测试集/分割图/张廷成/extract_out/1574651059525841_2024-11-18-10-34-56_0.jpg',
        './Data/同一人多次检测舌象测试集/分割图/徐超/extract_out/156145000241145_2024-11-18-11-05-59_0.jpg',
        './Data/同一人多次检测舌象测试集/分割图/赵利文/extract_out/1596080763581699_2024-11-18-10-04-13_0.jpg',
        './Data/同一人多次检测舌象测试集/分割图/郭志帅/extract_out/156525390729221_2024-11-18-10-18-16_0.jpg',
        './Data/同一人多次检测舌象测试集/分割图/黄让涛/extract_out/155383226669707_2024-11-18-10-51-33_0.jpg'
    ]
    #testImages(device, images, model, veg)


    # print("==========刘广远===========\n")
    # images = dirAllFilesRecursive(r"./Data/同一人多次检测舌象测试集/分割图/刘广远/extract_out")
    # testImages2("刘广远",device, images, model, veg)

    #return

    # print("==========宋思远===========\n")
    # images = dirAllFilesRecursive(r"./Data/同一人多次检测舌象测试集/分割图/宋思远/extract_out")
    # testImages2("宋思远",device, images, model, veg)
    # print("==========张廷成===========\n")
    # images = dirAllFilesRecursive(r"./Data/同一人多次检测舌象测试集/分割图/张廷成/extract_out")
    # testImages2("张廷成",device, images, model, veg)
    # print("==========徐超===========\n")
    # images = dirAllFilesRecursive(r"./Data/同一人多次检测舌象测试集/分割图/徐超/extract_out")
    # testImages2("徐超",device, images, model, veg)
    # print("==========赵利文===========\n")
    # images = dirAllFilesRecursive(r"./Data/同一人多次检测舌象测试集/分割图/赵利文/extract_out")
    # testImages2("赵利文",device, images, model, veg)
    # print("==========郭志帅===========\n")
    # images = dirAllFilesRecursive(r"./Data/同一人多次检测舌象测试集/分割图/郭志帅/extract_out")
    # testImages2("郭志帅",device, images, model, veg)
    # print("==========黄让涛===========\n")
    # images = dirAllFilesRecursive(r"./Data/同一人多次检测舌象测试集/分割图/黄让涛/extract_out")
    # testImages2("黄让涛",device, images, model, veg)


    print("==========艾医生===========\n")
    images = dirAllFilesRecursive(r"./Data/分割后图片_艾医生")
    #images = images[-3:]
    testImages2("艾医生", device, images, model, veg)

def predict_images_from_test():
    # 初始化模型
    model = TongueDenseNet()
    model.transfer_learning_mode()

    # 加载已保存的模型权重
    checkpoint_path = f'./{dataType}Resnet50.ckpt'
    checkpoint = torch.load(checkpoint_path, map_location=torch.device('cuda' if torch.cuda.is_available() else 'cpu'),
                            weights_only=False)
    model.load_state_dict(checkpoint['model'])
    model.eval()

    # 设置设备
    device = 'cuda' if torch.cuda.is_available() else 'cpu'
    model = model.to(device)

    copy_target_path = r"\\192.168.1.137\share\Test\livan\TongueConstitutionV8_深度学习再分类"
    root_path = r"\\192.168.1.137\share\Test\livan\TongueConstitutionV8\test"
    # 加载数据集处理器
    veg = TongueDataset(batch_size=32)
    veg.load_train_data()
    veg.load_test_data()
    veg.load_validation_data()

    images = dirAllFilesRecursive(root_path)
    for path in images:
        try:
            # Load and preprocess the image

            img = PILImage.open(path)
            # img = crop_top_center_region(path, 70)

            # temp_file = "./temp.jpg"
            # img.resize((224, 224))
            # img.save(temp_file)

            # fill black area
            # img = fillBlack.stretch_image_to_cover_dark(temp_file, None)
            # img.save("./fillBlack.jpg")
            # print("to cover dark done")

            # blurred_image = blur_transform(img)
            # blurred_image.save("./blurred_image.jpg")
            # print("to blur done")

            # img.save(temp_file)
            # resimg = cropimg.crop_image_fill_black(temp_file, remove_percentage=50)
            # print("to crop_image_fill_black done")
            # resimg.save("./t2.jpg")

            img_tensor = veg.transform(img)
            img_tensor.unsqueeze_(0)
            img_tensor = img_tensor.to(device)
            print("to img_tensor")
            # Model prediction
            with torch.no_grad():
                prediction = model(img_tensor)

            # Get predicted label
            index = prediction.to('cpu').data.numpy().argmax()
            label = veg.id_to_class[index]

            to_target_path = copy_target_path + "/" + label

            os.makedirs(to_target_path, exist_ok=True)
            safe_copy(path, to_target_path)
            #shutil.copy(path, to_target_path)

        except Exception as e:
            print(f"Error processing image {path}: {e}")
    print("All images processed")


def testImages(device, images, model, veg):
    # 逐一测试图像
    for path in images:
        try:
            # 加载图像并进行预处理
            img = PILImage.open(path)
            img_tensor = veg.transform(img)
            img_tensor.unsqueeze_(0)
            img_tensor = img_tensor.to(device)
            # 模型预测
            with torch.no_grad():
                prediction = model(img_tensor)

            # 获取预测类别
            index = prediction.to('cpu').data.numpy().argmax()
            label = veg.id_to_class[index]
            print(f"Image: {path}")
            print(f"Predicted Label: {label}")
        except Exception as e:
            print(f"Error processing image {path}: {e}")

def crop_black_region(image_path, remove_percentage=65):
    """
    裁剪图片上半部分中间区域的黑色部分并返回。

    :param image_path: 原始图片路径
    :param remove_percentage: 中间需要裁剪的宽度百分比（默认65%）
    :return: 裁剪后的黑色部分图片对象
    """
    with PILImage.open(image_path) as img:
        width, height = img.size
        upper_half_height = int(height * (remove_percentage / 100)+math.floor(height*0.1))  # 图片上半部分的高度

        # 计算中间裁剪区域的左右边界
        black_width = int(width * (remove_percentage / 100))
        left_black = (width - black_width) // 2
        right_black = left_black + black_width

        # 裁剪上半部分中间区域
        black_region = img.crop((left_black, height*0.1, right_black, upper_half_height))

        return black_region  # 返回裁剪的黑色部分



def crop_top_center_region(image_path, remove_percentage=65):
    """
    裁剪图片上半部分中间区域的黑色部分并返回。

    :param image_path: 原始图片路径
    :param remove_percentage: 中间需要裁剪的宽度百分比（默认65%）
    :return: 裁剪后的黑色部分图片对象
    """


    with PILImage.open(image_path) as img:
        width, height = img.size
        upper_half_height = int(height * (remove_percentage / 100)+math.floor(height*0.10))  # 图片上半部分的高度

        # 计算中间裁剪区域的左右边界
        black_width = int(width * ((remove_percentage-20) / 100))
        left_black = (width - black_width) // 2
        right_black = left_black + black_width

        # 裁剪上半部分中间区域
        black_region = img.crop((left_black, height*0.1, right_black, upper_half_height))

        return black_region  # 返回裁剪的黑色部分

totalNum = 0
similarity = 0
def testImages2(name, device, images, model, veg):
    CELL_HIGH = 160

    # Create an Excel workbook and sheet
    workbook = openpyxl.Workbook()
    sheet = workbook.active
    sheet.title = "Image Predictions"

    # Set up headers
    sheet.append(["name", "File Name", "Image", "Predicted Label", "Whole Label"])

    # Row counter for placing images
    row = 2
    center_align = Alignment(horizontal='center', vertical='center')
    font = Font(size=20)
    blur_transform = transforms.GaussianBlur(kernel_size=(31, 31), sigma=(10.0, 10.0))

    alignment_center = Alignment(vertical="center")  # 垂直居中

    for path in images:
        try:
            global totalNum
            totalNum += 1

            # Load and preprocess the image

            img = PILImage.open(path)
            #img = crop_top_center_region(path, 70)

            # temp_file = "./temp.jpg"
            # img.resize((224, 224))
            # img.save(temp_file)

            # fill black area
            #img = fillBlack.stretch_image_to_cover_dark(temp_file, None)
            #img.save("./fillBlack.jpg")
            #print("to cover dark done")

            #blurred_image = blur_transform(img)
            #blurred_image.save("./blurred_image.jpg")
            #print("to blur done")

            # img.save(temp_file)
            # resimg = cropimg.crop_image_fill_black(temp_file, remove_percentage=50)
            # print("to crop_image_fill_black done")
            #resimg.save("./t2.jpg")

            img_tensor = veg.transform(img)
            img_tensor.unsqueeze_(0)
            img_tensor = img_tensor.to(device)
            print("to img_tensor")
            # Model prediction
            with torch.no_grad():
                prediction = model(img_tensor)

            # 获取预测的类别概率值
            prediction_probs = torch.nn.functional.softmax(prediction, dim=1)  # 将模型输出转化为概率分布
            prediction_probs = prediction_probs.cpu().data.numpy().flatten()  # 转换为 NumPy 数组并展平

            # Get predicted label
            index = prediction.to('cpu').data.numpy().argmax()
            label = veg.id_to_class[index]


            other_probs = []

            # 输出所有类别的概率值
            for i, prob in enumerate(prediction_probs):
                print(f"Class: {veg.id_to_class[i]}, Probability: {prob:.4f}")
                # 将大于0.4的概率加入到字符串中（不包括最大概率）
                if prob >= 0.18:
                    other_probs.append(veg.id_to_class[i])

            # 将其它大于0.4的标签用逗号连接
            other_probs_str = ",".join(other_probs)
            if len(other_probs_str) <=0:
                other_probs_str = label

            global similarity

            if (prediction_probs[0] / prediction_probs[2] < 1.5)  and (prediction_probs[0] / prediction_probs[2] > 0.75):
                similarity += 1


            # 输出预测的标签和对应的概率值
            print(f"Predicted Label: {label}, Probability: {prediction_probs[index]:.4f}")


            # Add image to Excel
            excel_image = OpenpyxlImage(path)
            excel_image.width = CELL_HIGH  # Resize width for display
            excel_image.height = CELL_HIGH  # Resize height for display

            sheet[f"A{row}"] = name
            sheet[f"A{row}"].font = font
            sheet[f"A{row}"].alignment = alignment_center

            file_name = os.path.basename(path)
            sheet[f"B{row}"] = file_name
            sheet[f"B{row}"].alignment = alignment_center
            sheet[f"B{row}"].font = font

            img_cell = f"C{row}"  # Adjust column and row

            # Insert image into Excel
            sheet.add_image(excel_image, img_cell)
            sheet.row_dimensions[row].height = CELL_HIGH
            # Add path and label
            #sheet[f"C{row}"] = path
            sheet[f"D{row}"] = label #pingyingTochinese2[label]
            sheet[f"D{row}"].alignment = alignment_center
            sheet[f"D{row}"].font = font

            # 总体质
            sheet[f"E{row}"] = other_probs_str
            sheet[f"E{row}"].alignment = alignment_center
            sheet[f"E{row}"].font = font
            row += 1

        except Exception as e:
            print(f"Error processing image {path}: {e}")

    sheet.column_dimensions['A'].width = 20
    sheet.column_dimensions['B'].width = 90
    sheet.column_dimensions['C'].width = 30
    sheet.column_dimensions['D'].width = 20
    # Save workbook
    # 如果文件夹不存在，创建它
    folder_path = f"./预测结果/预测结果_{dataType}"
    if not os.path.exists(folder_path):
        os.makedirs(folder_path)

    fileName = f"{name}_predictions.xlsx"
    savePath = os.path.join(folder_path, fileName)
    # 如果存在同名文件，先删除
    if os.path.exists(savePath):
        os.remove(savePath)
    workbook.save(savePath)
    print(f"Results saved to {savePath}")


def testCpuOrGpu():
    device = 'cuda' if torch.cuda.is_available() else 'cpu'
    print(f"Is CUDA available: {torch.cuda.is_available()}")
    if device == 'cuda':
        print(f"CUDA version: {torch.version.cuda}")
        print(f"Device count: {torch.cuda.device_count()}")
        print(f"Current device: {torch.cuda.current_device()}")
        print(f"Device name: {torch.cuda.get_device_name(0)}")
        print(f'Running on {device}')

def dirAllFilesRecursive(input_dir):
    result = []
    for root, _, files in os.walk(input_dir):
        for file in files:
            file_path = os.path.join(root, file)  # 将目录路径和文件名拼接成完整的文件路径
            result += (file_path,)  # 将每个文件路径以元组的形式添加到result中
    return result

def safe_copy(source_file, target_file):
    if os.path.exists(target_file):
        base, ext = os.path.splitext(target_file)
        counter = 1
        while os.path.exists(f"{base}_{counter}{ext}"):
            counter += 1
        target_file = f"{base}_{counter}{ext}"
    shutil.copy2(source_file, target_file)
    print(f"文件已复制到 {target_file}")


if __name__ == '__main__':
    #print("开始等待...")
    #time.sleep(3600*5)
    #testCpuOrGpu()

    #ResNet50
    #train_with_resnet(True)
    #predict_images(model=TongueResnet())

    #DensNet121
    #train_with_densnet(True)
    class_num = len(folders_in_directory(fr'./Data/{dataType}/train'))
    predict_images(model=TongueDenseNet(num_classifications=class_num))

    # print(f"totalNum:{totalNum}")
    # print(f"similarity:{similarity}")
    #predict_images_from_test()

