import torch
import torchvision
import pandas as pd
import os
import random
from torchvision import transforms
from torch.utils.data import DataLoader, Dataset
from config import config
from utils.common import load_image
from scipy.special import lambertw
import numpy as np
from pathlib import Path

root = config['root']
train_batch_size = config['train_batch_size']
val_batch_size = config['val_batch_size']
data_types = config['data_types']
# root = os.path.abspath(root)
labels = None

transform_train = torchvision.transforms.Compose([
    # transforms.RandomRotation(degrees=5, expand=True),
    # transforms.RandomAutocontrast(p=1),
    # transforms.RandomEqualize(p=1),
    torchvision.transforms.Resize((224, 224), antialias=True),
    # torchvision.transforms.ColorJitter(brightness=0.2, contrast=0.2),  # 随机调整亮度和对比度
    torchvision.transforms.RandomHorizontalFlip(),  # 随机水平翻转
    torchvision.transforms.ToTensor(),
    torchvision.transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])  # ImageNet 归一化
])
transform_val = torchvision.transforms.Compose([
    torchvision.transforms.Resize((224, 224), antialias=True),
    torchvision.transforms.ToTensor(),
    torchvision.transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])  # ImageNet 归一化
])


class OCTADataset(Dataset):
    def __init__(self, data, transform):
        super().__init__()
        self.transform = transform  # 保存数据增强和预处理的转换方式。
        self.data = data
        if config['shuffle']:  # 是否将数据集打乱
            random.shuffle(self.data)

    def __len__(self):
        # print(len(self.data))
        return len(self.data)

    def __getitem__(self, item):  # 此方法用于根据索引获取数据集中的一个样本。
        # print(item)
        images, label = self.data[item]
        images = [self.transform(i) for i in images]  # 调用数据增强的操作将每张图片都进行变换
        image = torch.cat(images, dim=0)  # 将每名患者变换好的图像沿着channel维度拼接起来，变成一个厚图片，然后再输入到神经网络当中去训练
        # print(image.shape)
        # print(type(images))
        return image, label


def label_find_to_image(labels, regression=False):
    global data_types
    data = []
    for sample in labels.index:  # 遍历csv文件中第一列的样本名
        # print(sample)
        dir_ = os.path.join(root, 'Enface', sample)  # 组合出人名样本文件夹路径
        # dir_ = os.path.join(root, 'OCTA', sample)  # 组合出人名样本文件夹路径
        try:
            files = os.listdir(dir_)  # 得到人名样本文件夹下的所有类型的医学图像，共6类
        except FileNotFoundError:
            continue
        # 只使用我们要求的几种类型的患者图像数据，如表层血流、深层血流等
        images = [load_image(os.path.join(dir_, file)) for file in files if file in data_types]
        # print(images[0].shape)
        # print(type(images[0]))
        if regression:
            # 提取需要拟合的标签
            label_values = labels.loc[
                sample, ['up1', 'up2', 'down1', 'down2']].values.astype(float)
            label_values[:2] = np.exp((label_values[:2] - 50) / 50.0) * label_values[:2]
            label_values[2:] = np.exp((label_values[2:] - 40) / 40.0) * label_values[2:]
            # print(label_values)
            label = torch.tensor(label_values, dtype=torch.float32)  # 将标签转换为浮点张量
        else:
            label = torch.tensor(labels['dpn'][sample]).long()
        # print(label)
        data.append((images, label))
    return data


def load_data(regression=False, fold_index=None):
    global labels
    # 加载csv数据
    labels = pd.read_csv(os.path.join(root, 'label_velo_binary.csv'), index_col=0)
    folder_name = "cv_folds"
    if fold_index is not None:
        train_labels = pd.read_csv(os.path.join(root, f"{folder_name}/fold_{fold_index}", 'train.csv'), index_col=0)
        labels = train_labels  # 这里是为了让后面的get_class_weights函数能能正常获取到训练集的不同类别的占比
        valid_labels = pd.read_csv(os.path.join(root, f"{folder_name}/fold_{fold_index}", 'valid.csv'), index_col=0)
        test_labels = pd.read_csv(os.path.join(root, f"{folder_name}/fold_{fold_index}", 'test.csv'), index_col=0)
        train_data = label_find_to_image(train_labels, regression)
        valid_data = label_find_to_image(valid_labels, regression)
        test_data = label_find_to_image(test_labels, regression)
        data = [train_data, valid_data, test_data]
    else:
        data = label_find_to_image(labels, regression)
    return data


def get_loader(regression=False, fold_index=None):
    test_loader = None
    if fold_index is not None:  # 如果用已经事先分好的K折交叉验证集，则直接加载
        data = load_data(regression, fold_index=fold_index)
        train_dataset, val_dataset, test_dataset = (
            OCTADataset(data[0], transform_train),
            OCTADataset(data[1], transform_val),
            OCTADataset(data[2], transform_val)
        )  # 这里创建数据集的时候就已经将图像叠加为一个厚图片了。
        # print('训练集数据加载器',type(train_dataset))
        train_loader, val_loader, test_loader = (
            DataLoader(train_dataset, batch_size=train_batch_size, shuffle=True),
            DataLoader(val_dataset, batch_size=val_batch_size, shuffle=False),
            DataLoader(test_dataset, batch_size=val_batch_size, shuffle=False),
        )
    else:
        data = load_data(regression)
        length = len(data)
        val_frac = config['val_frac']  # 测试集在整个数据集当中的比例
        threshold = int(length * (1 - val_frac))  # 训练集在整个数据集当中的索引阈值
        train_dataset, val_dataset = (
            OCTADataset(data[:threshold], transform_train),
            OCTADataset(data[threshold:], transform_val)
        )  # 这里创建数据集的时候就已经将图像叠加为一个厚图片了。
        # print('训练集数据加载器',type(train_dataset))
        train_loader, val_loader = (
            DataLoader(train_dataset, batch_size=train_batch_size, shuffle=True),
            DataLoader(val_dataset, batch_size=val_batch_size, shuffle=False)
        )
    return train_loader, val_loader, test_loader


def get_class_weights():
    global labels
    if labels is None:
        labels = pd.read_csv(os.path.join(root, 'label_velo_binary.csv'), index_col=0)
    """ 分类权重，用于交叉熵损失函数等 """
    class_counts = labels['dpn'].value_counts()  # value_counts() 方法用于统计每个唯一值（标签）的出现次数，返回一个 Series，其中索引为类别，值为对应的样本数量。
    # 计算样本的总数
    total_samples = sum(class_counts)
    # 计算每个类别的权重
    weights = [total_samples / count for count in class_counts]
    # 归一化权重
    weights = [w / sum(weights) for w in weights]
    # 转换为 PyTorch 张量
    weights = torch.tensor(weights, dtype=torch.float)
    return weights


if __name__ == '__main__':
    # os.chdir(".../")
    # 获取当前脚本的路径
    current_dir = Path(__file__).parent
    print(current_dir)

    # 更改当前工作目录到上一级目录
    os.chdir(current_dir.parent)
    trainLoader, valLoader, testLoader = get_loader(fold_index=1)
    # print(len(train_loader))
    # print(len(val_loader))
