import torch
from torch.utils.data import Dataset, DataLoader
from PIL import Image
import os
import numpy as np
import pandas as pd
import random
from torchvision import datasets, transforms
import torch.nn as nn
import torch.optim as optim
from torchvision import datasets, transforms
import torch.nn.functional as F


def set_seed(seed):
    # torch.backends.cudnn.enabled = True  # pytorch 使用CUDANN 加速，即使用GPU加速
    torch.backends.cudnn.benchmark = False  # cuDNN使用的非确定性算法自动寻找最适合当前配置的高效算法，设置为False 则每次的算法一致
    torch.backends.cudnn.deterministic = True  # 设置每次返回的卷积算法是一致的
    torch.manual_seed(seed)  # 为当前CPU 设置随机种子
    torch.cuda.manual_seed(seed)  # 为当前的GPU 设置随机种子
    torch.cuda.manual_seed_all(seed)  # 当使用多块GPU 时，均设置随机种子
    # torch.use_deterministic_algorithms(True)
    np.random.seed(seed)
    random.seed(seed)
    os.environ['PYTHONHASHSEED'] = str(seed)


set_seed(666)

# 启用 CUDA 调试，捕捉准确的错误
os.environ['CUDA_LAUNCH_BLOCKING'] = '1'

# 设置参数
batch_size = 16
epochs = 25
learning_rate = 0.001

# 使用 GPU 设备
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(f"Using device: {device}")


# 定义一个简单的权重初始化函数
def weights_init(m):
    if isinstance(m, nn.Conv2d):
        nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
        if m.bias is not None:
            nn.init.constant_(m.bias, 0)
    elif isinstance(m, nn.BatchNorm2d):
        nn.init.constant_(m.weight, 1)
        nn.init.constant_(m.bias, 0)
    elif isinstance(m, nn.Linear):
        nn.init.normal_(m.weight, 0, 0.01)
        nn.init.constant_(m.bias, 0)

# 定义计算准确率的函数
def calculate_accuracy(dataloader, model, device):
    model.eval()  # 设置模型为评估模式
    correct = 0
    total = 0

    with torch.no_grad():  # 在计算时不需要梯度
        for (rgb_images, depth_images,shuju), labels in dataloader:
            rgb_images = rgb_images.to(device)
            depth_images = depth_images.to(device)
            shuju = shuju.to(device)
            labels = labels.to(device)

            outputs = model(rgb_images, depth_images,shuju)
            _, predicted = torch.max(outputs, 1)
            total += labels.size(0)
            correct += (predicted == labels).sum().item()

    accuracy = 100 * correct / total
    return accuracy







# class MultiModalDataset(Dataset):
#     def __init__(self, txt_file_path, rgb_transform=None, depth_transform=None):
#         # 读取 txt 文件，假设文件每行包含 rgb_path, depth_path, label，用逗号分隔
#         self.data_frame = pd.read_csv(txt_file_path, delimiter=',', header=None, names=['DIFF', 'WNB', 'label','WBC','Neu' ,'Lym','Mon','Eos','Bas','IMG',
#                                                                                         'Neu_','Lym_','Mon_','Eos_','Bas_','IMG_','RBC','HGB','HCT','MCV','MCH',
#                                                                                         'MCHC','CV','SD','PLT','MPV','PDW','PCT','LCC','LCR','NRBC','NRBC_','HFC',
#                                                                                         'HFC_','IME','IME_','NLR','InR','InR_','Micro','Micro_' ,'Macro' ,'Macro_',
#                                                                                         'PLR' ,'PDW_SD'])
#         self.rgb_transform = rgb_transform
#         self.depth_transform = depth_transform
#
#     def __len__(self):
#         return len(self.data_frame)
#
#     def __getitem__(self, idx):
#         rgb_path = self.data_frame.loc[idx, 'DIFF']
#         depth_path = self.data_frame.loc[idx, 'WNB']
#         label = int(self.data_frame.loc[idx, 'label'])  # 假设标签是整数（0, 1, 2）
#
#         exclude_columns = ['DIFF', 'WNB', 'label']
#         # 获取其余列的数据
#         other_data = self.data_frame.loc[idx, ~self.data_frame.columns.isin(exclude_columns)]
#         # 将数据转换为列表
#         other_data_list = other_data.tolist()
#         # 将列表转换为张量
#         other_data_tensor = torch.tensor(other_data_list, dtype=torch.float32)

class MultiModalDataset(Dataset):
        def __init__(self, txt_file_path, rgb_transform=None, depth_transform=None):
            self.data = []
            with open(txt_file_path, 'r', encoding='utf-8') as file:
                for line in file:
                    parts = line.strip().split(',')
                    self.data.append(parts)

            self.rgb_transform = rgb_transform
            self.depth_transform = depth_transform

        def __len__(self):
            return len(self.data)

        def __getitem__(self, idx):
            parts = self.data[idx]
            rgb_path = parts[0]
            depth_path = parts[1]
            label = int(parts[2])

            other_data_list = []
            for part in parts[3:]:
                try:
                    value = float(part)
                except ValueError:
                    if part == '0.00.0':
                        value = 0
                    else:
                        raise
                other_data_list.append(value)

            other_data_tensor = torch.tensor(other_data_list, dtype=torch.float32)




            # 加载图像
            rgb_image = Image.open(rgb_path).convert('RGB')
            depth_image = Image.open(depth_path).convert('RGB')  # 深度图像是单通道的

            # 应用变换
            if self.rgb_transform:
                rgb_image = self.rgb_transform(rgb_image)
            if self.depth_transform:
                depth_image = self.depth_transform(depth_image)

            # 将图像和标签打包成元组
            return (rgb_image, depth_image,other_data_tensor), torch.tensor(label, dtype=torch.long)


# 定义加载数据集的函数
# def create_dataloader(txt_file_path, batch_size, rgb_transform=None, depth_transform=None):
#     dataset = MultiModalDataset(txt_file_path, rgb_transform=rgb_transform, depth_transform=depth_transform)
#     return DataLoader(dataset, batch_size=batch_size, shuffle=shuffle)

def create_dataloader(txt_file_path, batch_size, shuffle, rgb_transform=None, depth_transform=None):
    dataset = MultiModalDataset(
        txt_file_path,
        rgb_transform=rgb_transform,
        depth_transform=depth_transform
    )
    return DataLoader(dataset, batch_size=batch_size, shuffle=shuffle)




# 定义图像变换
rgb_transform = transforms.Compose([
    transforms.Resize((256, 256)),
    transforms.ToTensor()
])

depth_transform = transforms.Compose([
    transforms.Resize((256, 256)),
    transforms.ToTensor()
])

# 加载训练集、验证集和测试集
train_txt_file = 'train.txt'
val_txt_file = 'val.txt'
# test_txt_file = 'data_Diff_Wnb_test.txt'

train_dataloader = create_dataloader(train_txt_file, batch_size=batch_size, rgb_transform=rgb_transform,
                                     depth_transform=depth_transform, shuffle=True)
val_dataloader = create_dataloader(val_txt_file, batch_size=batch_size, rgb_transform=rgb_transform,
                                   depth_transform=depth_transform, shuffle=False)


# test_dataloader = create_dataloader(test_txt_file, batch_size=batch_size, rgb_transform=rgb_transform, depth_transform=depth_transform)


# 修改后的 MultiModalDataset 类

class FocalLoss(nn.Module):
    def __init__(self, gamma=2, alpha=None):
        super().__init__()
        self.alpha = alpha.to(device)
        self.gamma = gamma

    def forward(self, inputs, targets):
        ce_loss = F.cross_entropy(inputs, targets, reduction='none', weight=self.alpha)
        pt = torch.exp(-ce_loss)  # pt = p if target class, 所以这里用指数还原概率
        focal_loss = (1 - pt) ** self.gamma * ce_loss
        return focal_loss.mean()


# from model.first import MultiStreamRibConvNet as net
# from model.second import RobustMultiModalNet as net
# from model.third import EnhancedMultiModalNet as net
# from model.forth import MultiStreamRibConvNet as net
# from model.fifth import MultiStreamConvNeXtV2 as net
from model.sixth import MultiStreamConvNeXtV2 as net
# from model.concat_tezheng import MultiStreamConvNeXtV2 as net

from zhibiao.f1 import f1_copute
from zhibiao.hunxiao import calculate_confusion_matrix


# 8,2   4层  4分类
# 创建模型
model = net(3, 4) # 五分类任务，num_classes=5
model.apply(weights_init)
# 将模型移到 GPU 或 CPU
model.to(device)

optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate, betas=(0.9, 0.999), eps=1e-8)

# 创建学习率调度器
scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer, gamma=0.98)
weight = torch.tensor([0.61, 0.17, 0.17, 0.05],device=device)
# focal_loss = FocalLoss(gamma=2, alpha=torch.tensor([0.61, 0.17, 0.17, 0.05]))
# 训练模型
for epoch in range(epochs):
    model.train()  # 设置模型为训练模式
    loss_sum = 0

    # 训练阶段
    for (rgb_images, depth_images,shuju), labels in train_dataloader:
        rgb_images = rgb_images.to(device)
        depth_images = depth_images.to(device)
        shuju = shuju.to(device)
        labels = labels.to(device)

        optimizer.zero_grad()
        output = model(rgb_images, depth_images,shuju)
        loss = nn.CrossEntropyLoss(weight=weight)(output, labels)

        # loss =focal_loss(output, labels)

        loss_sum += loss.item()
        loss.backward()
        optimizer.step()

    # 调整学习率
    scheduler.step()

    # 获取当前学习率
    current_lr = optimizer.param_groups[0]['lr']

    # 在每个epoch结束后，计算并输出训练集、验证集和测试集的准确率
    train_accuracy = calculate_accuracy(train_dataloader, model, device)
    val_accuracy = calculate_accuracy(val_dataloader, model, device)
    f1,recall = f1_copute(val_dataloader, model,device)


    print()
    print(f"Epoch [{epoch + 1}/{epochs}], "
          f"Loss: {loss_sum / len(train_dataloader):.4f}, "
          f"Train Accuracy: {train_accuracy:.2f}%, "
          f"Validation Accuracy: {val_accuracy:.2f}%, "
          f"Current Learning Rate: {current_lr:.10f}, "
          f"训练结束后加权 F1 分数: {f1}, "
          f"训练结束后加权 recall 分数: {recall}")

cm = calculate_confusion_matrix(val_dataloader, model, device,4)
