# -*- coding: utf-8 -*-
# @Time    : 2025/9/24 15:23
# @Author  : chenmh
# @File    : dann.py
# @Desc: 精简实现 DANN

import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import Dataset, DataLoader
import numpy as np
from tqdm import tqdm
from torch_geometric import seed_everything
from typing import Tuple

# 设置随机种子确保可复现性
seed_everything(42)


# 1. 梯度反转层 - DANN的核心组件
class GradientReversalLayer(nn.Module):
    """
    梯度反转层：前向传播时保持输入不变，反向传播时反转梯度
    用于实现特征提取器与领域鉴别器之间的对抗训练
    """

    def __init__(self, lambda_: float = 1.0):
        super(GradientReversalLayer, self).__init__()
        self.lambda_ = lambda_

    def forward(self, x: torch.Tensor) -> torch.Tensor:
        return x

    def backward(self, grad_output: torch.Tensor) -> torch.Tensor:
        return -self.lambda_ * grad_output


# 2. 定义DANN模型
class DANN(nn.Module):
    """
    领域适应对抗网络(DANN)
    包含三个主要组件：特征提取器、标签分类器、领域鉴别器
    """

    def __init__(self, input_dim: int, hidden_dim: int, num_classes: int, grl_lambda: float = 1.0):
        super(DANN, self).__init__()
        # 特征提取器 - 学习领域不变特征  ==> 相当于是预训练好的Bert,GPT等模型
        self.feature_extractor = nn.Sequential(
            nn.Linear(input_dim, hidden_dim),
            nn.ReLU(),
            nn.Dropout(0.3),
            nn.Linear(hidden_dim, hidden_dim // 2),
            nn.ReLU()
        )

        # 梯度反转层
        self.grl = GradientReversalLayer(grl_lambda)

        # 标签分类器 - 预测样本标签
        self.label_classifier = nn.Sequential(
            nn.Linear(hidden_dim // 2, hidden_dim // 4),
            nn.ReLU(),
            nn.Linear(hidden_dim // 4, num_classes)
        )

        # 领域鉴别器 - 区分样本来自源领域还是目标领域
        self.domain_classifier = nn.Sequential(
            nn.Linear(hidden_dim // 2, hidden_dim // 4),
            nn.ReLU(),
            nn.Linear(hidden_dim // 4, 1),  # 二分类：源领域(0)或目标领域(1)
            nn.Sigmoid()
        )

    def forward(self, x: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
        # 提取特征
        features = self.feature_extractor(x)

        # 标签预测
        class_logits = self.label_classifier(features)

        # 领域预测（经过梯度反转层）
        domain_features = self.grl(features)
        domain_logits = self.domain_classifier(domain_features)

        return class_logits, domain_logits


# 3. 定义数据集
class DomainDataset(Dataset):
    """领域适应数据集，包含样本特征、标签和领域标签"""

    def __init__(self, features: np.ndarray, labels: np.ndarray, domain_labels: np.ndarray):
        self.features = features
        self.labels = labels
        self.domain_labels = domain_labels

    def __len__(self) -> int:
        return len(self.features)

    def __getitem__(self, idx: int) -> dict:
        return {
            'features': torch.tensor(self.features[idx], dtype=torch.float32),
            'labels': torch.tensor(self.labels[idx], dtype=torch.long),
            'domain_labels': torch.tensor(self.domain_labels[idx], dtype=torch.float32)
        }


# 4. 训练函数
def train_dann(model: nn.Module, source_loader: DataLoader, target_loader: DataLoader, epochs: int = 10,
               lr: float = 1e-3) -> nn.Module:
    """训练DANN模型"""
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    model.to(device)
    # 定义损失函数
    class_criterion = nn.CrossEntropyLoss()  # 标签分类损失
    domain_criterion = nn.BCELoss()  # 领域分类损失
    # 优化器
    optimizer = optim.Adam(model.parameters(), lr=lr)
    # 训练循环
    for epoch in range(epochs):
        model.train()
        total_loss = 0.0
        total_class_loss = 0.0
        total_domain_loss = 0.0
        # 迭代器
        source_iter = iter(source_loader)
        target_iter = iter(target_loader)
        num_batches = min(len(source_loader), len(target_loader))
        progress_bar = tqdm(range(num_batches), desc=f"Epoch {epoch + 1}/{epochs}")
        for _ in progress_bar:
            # 获取源领域和目标领域批次
            source_batch = next(source_iter)
            target_batch = next(target_iter)
            # 合并批次
            x_source = source_batch['features'].to(device)
            y_source = source_batch['labels'].to(device)
            d_source = source_batch['domain_labels'].to(device)  # 源领域标签为0
            x_target = target_batch['features'].to(device)
            d_target = target_batch['domain_labels'].to(device)  # 目标领域标签为1
            x_combined = torch.cat([x_source, x_target])
            d_combined = torch.cat([d_source, d_target])
            # 前向传播
            class_logits, domain_logits = model(x_combined)
            # 计算标签分类损失（只使用源领域数据，因为目标领域没有标签）
            class_loss = class_criterion(class_logits[:len(x_source)], y_source)
            # 计算领域分类损失（所有数据都用于领域适应）
            domain_loss = domain_criterion(domain_logits.squeeze(), d_combined)
            # 总损失 = 分类损失 + 领域损失
            loss = class_loss + domain_loss
            # 反向传播和优化
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
            # 记录损失
            total_loss += loss.item()
            total_class_loss += class_loss.item()
            total_domain_loss += domain_loss.item()
            # 更新进度条
            progress_bar.set_postfix({
                'total_loss': total_loss / (progress_bar.n + 1),
                'class_loss': total_class_loss / (progress_bar.n + 1),
                'domain_loss': total_domain_loss / (progress_bar.n + 1)
            })

        # 打印 epoch 总结
        avg_loss = total_loss / num_batches
        avg_class_loss = total_class_loss / num_batches
        avg_domain_loss = total_domain_loss / num_batches
        print(
            f"Epoch {epoch + 1} - Total Loss: {avg_loss:.4f}, Class Loss: {avg_class_loss:.4f}, Domain Loss: {avg_domain_loss:.4f}")

    return model


# 5. 评估函数
def evaluate(model: nn.Module, dataloader: DataLoader) -> float:
    """评估模型在目标领域上的性能"""
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    model.eval()
    correct = 0
    total = 0

    with torch.no_grad():
        for batch in dataloader:
            features = batch['features'].to(device)
            labels = batch['labels'].to(device)

            class_logits, _ = model(features)
            _, predicted = torch.max(class_logits.data, 1)

            total += labels.size(0)
            correct += (predicted == labels).sum().item()

    accuracy = correct / total
    print(f"Evaluation Accuracy: {accuracy:.4f}")
    return accuracy


# 6. 主函数：生成示例数据并运行DANN
def main() -> None:
    # 生成示例数据
    def generate_domain_data(num_samples, input_dim, num_classes, domain_shift=0.5):
        """
        生成带领域偏移的示例数据
        这里需要注意的是，数据里面有两个 label 的概念，第一个指的是数据本身的分类标签，第二个指的是数据是源域还是目标域 [0,1]
        :param num_samples:
        :param input_dim:
        :param num_classes:
        :param domain_shift:
        :return:
        """
        # 生成随机特征
        features = np.random.randn(num_samples, input_dim)
        # 加入领域偏移
        features += domain_shift
        # 生成标签（基于特征的线性组合）
        weights = np.random.randn(input_dim, num_classes)
        logits = features @ weights
        labels = np.argmax(logits, axis=1)
        return features, labels

    # 超参数
    input_dim = 20  # 输入特征维度
    hidden_dim = 64  # 隐藏层维度
    num_classes = 3  # 分类类别数
    num_samples = 1000  # 每个领域的样本数
    batch_size = 16
    epochs = 10

    # 生成源领域和目标领域数据（带领域偏移）   源域不需要便宜
    source_features, source_labels = generate_domain_data(
        num_samples, input_dim, num_classes, domain_shift=0.0)  # 源领域偏移

    # 目标域必须偏移hi，如果和源域一样就不需要进行迁移学习
    target_features, target_labels = generate_domain_data(
        num_samples, input_dim, num_classes, domain_shift=2.0)  # 目标领域偏移（更大的偏移）

    # 创建数据集（源领域标签为0，目标领域标签为1）
    source_dataset = DomainDataset(
        source_features, source_labels, np.zeros(num_samples))
    target_dataset = DomainDataset(
        target_features, target_labels, np.ones(num_samples))

    # 划分目标领域的训练集和测试集
    target_train_dataset = DomainDataset(
        target_features[:800], target_labels[:800], np.ones(800))
    target_test_dataset = DomainDataset(
        target_features[800:], target_labels[800:], np.ones(200))

    # 创建数据加载器
    source_loader = DataLoader(
        source_dataset, batch_size=batch_size, shuffle=True)
    target_train_loader = DataLoader(
        target_train_dataset, batch_size=batch_size, shuffle=True)
    target_test_loader = DataLoader(
        target_test_dataset, batch_size=batch_size, shuffle=False)

    # 初始化并训练模型
    model = DANN(input_dim, hidden_dim, num_classes)
    print("开始训练DANN模型...")
    trained_model = train_dann(model, source_loader, target_train_loader, epochs=epochs)

    # 在目标领域测试集上评估
    print("\n在目标领域测试集上评估:")
    evaluate(trained_model, target_test_loader)


if __name__ == "__main__":
    main()
