import torch
from torch.utils.data import Dataset, DataLoader
from PIL import Image
import os
import numpy as np
import pandas as pd
import random
from torchvision import datasets, transforms
import torch.nn as nn
import torch.optim as optim
from torchvision import datasets, transforms
import torch.nn.functional as F


def set_seed(seed):
    # torch.backends.cudnn.enabled = True  # pytorch 使用CUDANN 加速，即使用GPU加速
    torch.backends.cudnn.benchmark = False  # cuDNN使用的非确定性算法自动寻找最适合当前配置的高效算法，设置为False 则每次的算法一致
    torch.backends.cudnn.deterministic = True  # 设置每次返回的卷积算法是一致的
    torch.manual_seed(seed)  # 为当前CPU 设置随机种子
    torch.cuda.manual_seed(seed)  # 为当前的GPU 设置随机种子
    torch.cuda.manual_seed_all(seed)  # 当使用多块GPU 时，均设置随机种子
    # torch.use_deterministic_algorithms(True)
    np.random.seed(seed)
    random.seed(seed)
    os.environ['PYTHONHASHSEED'] = str(seed)


set_seed(888)

# 启用 CUDA 调试，捕捉准确的错误
os.environ['CUDA_LAUNCH_BLOCKING'] = '1'

# 设置参数
batch_size = 64
epochs = 25
learning_rate = 0.001

# 使用 GPU 设备
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(f"Using device: {device}")


# 定义计算准确率的函数
def calculate_accuracy(dataloader, model, device):
    model.eval()  # 设置模型为评估模式
    correct = 0
    total = 0

    with torch.no_grad():  # 在计算时不需要梯度
        for (rgb_images, depth_images), labels in dataloader:
            rgb_images = rgb_images.to(device)
            depth_images = depth_images.to(device)
            labels = labels.to(device)

            outputs = model(rgb_images, depth_images)
            _, predicted = torch.max(outputs, 1)
            total += labels.size(0)
            correct += (predicted == labels).sum().item()

    accuracy = 100 * correct / total
    return accuracy


class MultiModalDataset(Dataset):
    def __init__(self, txt_file_path, rgb_transform=None, depth_transform=None):
        # 读取 txt 文件，假设文件每行包含 rgb_path, depth_path, label，用逗号分隔
        self.data_frame = pd.read_csv(txt_file_path, delimiter=',', header=None, names=['DIFF', 'WNB', 'label'])
        self.rgb_transform = rgb_transform
        self.depth_transform = depth_transform

    def __len__(self):
        return len(self.data_frame)

    def __getitem__(self, idx):
        rgb_path = self.data_frame.loc[idx, 'DIFF']
        depth_path = self.data_frame.loc[idx, 'WNB']
        label = int(self.data_frame.loc[idx, 'label'])  # 假设标签是整数（0, 1, 2）

        # 加载图像
        rgb_image = Image.open(rgb_path).convert('RGB')
        depth_image = Image.open(depth_path).convert('RGB')  # 深度图像是单通道的

        # 应用变换
        if self.rgb_transform:
            rgb_image = self.rgb_transform(rgb_image)
        if self.depth_transform:
            depth_image = self.depth_transform(depth_image)

        # 将图像和标签打包成元组
        return (rgb_image, depth_image), torch.tensor(label, dtype=torch.long)


# 定义加载数据集的函数
# def create_dataloader(txt_file_path, batch_size, rgb_transform=None, depth_transform=None):
#     dataset = MultiModalDataset(txt_file_path, rgb_transform=rgb_transform, depth_transform=depth_transform)
#     return DataLoader(dataset, batch_size=batch_size, shuffle=shuffle)

def create_dataloader(txt_file_path, batch_size, shuffle, rgb_transform=None, depth_transform=None):
    dataset = MultiModalDataset(
        txt_file_path,
        rgb_transform=rgb_transform,
        depth_transform=depth_transform
    )
    return DataLoader(dataset, batch_size=batch_size, shuffle=shuffle)


# 定义图像变换
rgb_transform = transforms.Compose([
    transforms.Resize((256, 256)),
    transforms.ToTensor()
])

depth_transform = transforms.Compose([
    transforms.Resize((256, 256)),
    transforms.ToTensor()
])

# 加载训练集、验证集和测试集
train_txt_file = 'data_Diff_Wnb_train2.txt'
val_txt_file = 'data_Diff_Wnb_val2.txt'
# test_txt_file = 'data_Diff_Wnb_test.txt'

train_dataloader = create_dataloader(train_txt_file, batch_size=batch_size, rgb_transform=rgb_transform,
                                     depth_transform=depth_transform, shuffle=True)
val_dataloader = create_dataloader(val_txt_file, batch_size=batch_size, rgb_transform=rgb_transform,
                                   depth_transform=depth_transform, shuffle=False)


# test_dataloader = create_dataloader(test_txt_file, batch_size=batch_size, rgb_transform=rgb_transform, depth_transform=depth_transform)


# 修改后的 MultiModalDataset 类

# 定义卷积块
class Conv_block(nn.Module):
    def __init__(self, in_channels, out_channels, kernel_size, stride, padding):
        super().__init__()
        self.conv_block = nn.Sequential(
            nn.Conv2d(in_channels, out_channels, kernel_size, stride=stride, padding=padding, bias=False),
            nn.ReLU(inplace=True)
        )

    def forward(self, x):
        return self.conv_block(x)



# 修改后的网络结构：五分类
class MultiStreamRibConvNet(nn.Module):
    def __init__(self, in_channels, num_classes):
        super(MultiStreamRibConvNet, self).__init__()

        # 分支1
        self.conv1_1 = Conv_block(in_channels, 32, kernel_size=3, stride=1, padding=1)
        self.conv2_1 = Conv_block(in_channels, 32, kernel_size=7, stride=1, padding=3)
        self.conv1_2 = Conv_block(32, 64, kernel_size=3, stride=1, padding=1)
        self.conv2_2 = Conv_block(32, 64, kernel_size=3, stride=1, padding=1)
        self.conv1_3 = Conv_block(64, 128, kernel_size=3, stride=1, padding=1)
        self.conv2_3 = Conv_block(64, 128, kernel_size=3, stride=1, padding=1)
        self.conv1_4 = Conv_block(128, 256, kernel_size=3, stride=1, padding=1)
        self.conv2_4 = Conv_block(128, 256, kernel_size=3, stride=1, padding=1)

        # 分支2
        self.convr_1 = Conv_block(32, 64, kernel_size=3, stride=1, padding=1)
        self.convr_2 = Conv_block(64, 128, kernel_size=3, stride=1, padding=1)
        self.convr_3 = Conv_block(128, 256, kernel_size=3, stride=1, padding=1)
        self.convr_4 = Conv_block(256, 512, kernel_size=3, stride=1, padding=1)

        self.pool = nn.MaxPool2d(2, 2)
        self.avg_pool = nn.AdaptiveAvgPool2d((1, 1))

        # Dropout层
        self.dropout = nn.Dropout(0.5)

        # 全连接层
        self.fc1 = nn.Linear(512, 1024)
        self.fc2 = nn.Linear(1024, 512)
        self.fc3 = nn.Linear(512, num_classes)

    # 最后一层输出 num_classes

    def forward(self, x1, x2):
        # 分支1
        f1_1 = self.conv1_1(x1)
        f1_1 = self.pool(f1_1)
        f1_2 = self.conv1_2(f1_1)
        f1_2 = self.pool(f1_2)
        f1_3 = self.conv1_3(f1_2)
        f1_3 = self.pool(f1_3)
        f1_4 = self.conv1_4(f1_3)
        f1_4 = self.pool(f1_4)

        # 分支2
        f2_1 = self.conv2_1(x2)
        f2_1 = self.pool(f2_1)
        f2_2 = self.conv2_2(f2_1)
        f2_2 = self.pool(f2_2)
        f2_3 = self.conv2_3(f2_2)
        f2_3 = self.pool(f2_3)
        f2_4 = self.conv2_4(f2_3)
        f2_4 = self.pool(f2_4)

        # 融合分支
        fr_1 = self.convr_1(f1_1 + f2_1)
        fr_1 = self.pool(fr_1)
        fr_2 = self.convr_2(fr_1 + f1_2 + f2_2)
        fr_2 = self.pool(fr_2)
        fr_3 = self.convr_3(fr_2 + f1_3 + f2_3)
        fr_3 = self.pool(fr_3)
        fr_4 = self.convr_4(fr_3 + f1_4 + f2_4)
        fr_4 = self.pool(fr_4)

        # 全局池化和全连接层
        fr = self.avg_pool(fr_4)
        fr = fr.view(fr.size(0), -1)  # 展平
        fr = self.dropout(self.fc1(fr))
        fr = self.fc2(fr)
        pre = self.fc3(fr)  # 输出五分类
        return pre


# 8,2   4层  4分类
# 创建模型
model = MultiStreamRibConvNet(3, 4)  # 五分类任务，num_classes=5

# 将模型移到 GPU 或 CPU
model.to(device)

optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate, betas=(0.9, 0.999), eps=1e-8)

# 创建学习率调度器
scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer, gamma=0.98)

# 训练模型
for epoch in range(epochs):
    model.train()  # 设置模型为训练模式
    loss_sum = 0

    # 训练阶段
    for (rgb_images, depth_images), labels in train_dataloader:
        rgb_images = rgb_images.to(device)
        depth_images = depth_images.to(device)
        labels = labels.to(device)

        optimizer.zero_grad()
        output = model(rgb_images, depth_images)
        loss = nn.CrossEntropyLoss()(output, labels)
        loss_sum += loss.item()
        loss.backward()
        optimizer.step()

    # 调整学习率
    scheduler.step()

    # 获取当前学习率
    current_lr = optimizer.param_groups[0]['lr']

    # 在每个epoch结束后，计算并输出训练集、验证集和测试集的准确率
    train_accuracy = calculate_accuracy(train_dataloader, model, device)
    val_accuracy = calculate_accuracy(val_dataloader, model, device)
    #    test_accuracy = calculate_accuracy(test_dataloader, model, device)

    print(f"Epoch [{epoch + 1}/{epochs}], "
          f"Loss: {loss_sum / len(train_dataloader):.4f}, "
          f"Train Accuracy: {train_accuracy:.2f}%, "
          f"Validation Accuracy: {val_accuracy:.2f}%, "
          f"Current Learning Rate: {current_lr:.10f}")
