import os
import numpy as np
import yaml
import argparse
import paddle
import paddle.nn as nn
import paddle.nn.functional as F
from paddle.nn import Sequential, Linear, Conv2D, BatchNorm2D, BatchNorm1D, ReLU, MaxPool2D
import paddle.optimizer as optim
from paddle.io import Dataset, DataLoader
from paddle.vision.transforms import Compose, Grayscale, Transpose, RandomHorizontalFlip, RandomRotation, Resize, ToTensor
from paddle.vision.datasets import DatasetFolder
from paddle.vision import get_image_backend
import cv2

from tqdm import tqdm
import matplotlib.pyplot as plt
from matplotlib.figure import Figure
import json
from sklearn.manifold import TSNE

from matplotlib.pyplot import figure

IMG_EXTENSIONS = [
    ".jpg", ".JPG", ".jpeg", ".JPEG",
    ".png", ".PNG", ".ppm", ".PPM", ".bmp", ".BMP",
]

def is_image_file(filename):
    return any(filename.endswith(extension) for extension in IMG_EXTENSIONS)

def pil_loader(path):
    with open(path, 'rb') as f:
        img = Image.open(f)
        return img.convert('RGB')

def cv2_loader(path):
    cv2 = try_import('cv2')
    return cv2.cvtColor(cv2.imread(path), cv2.COLOR_BGR2RGB)

def default_loader(path):
    if get_image_backend() == 'cv2':
        return cv2_loader(path)
    else:
        return pil_loader(path)
    
def make_dataset(root, label):
    images = []
    labeltxt = open(label)
    for line in labeltxt:
        data = line.strip().split(" ")
        if is_image_file(data[0]):
            path = os.path.join(root, data[0])
        gt = int(data[1])
        item = (path, gt)
        images.append(item)
    return images

class ImageSet(Dataset):
    def __init__(self, root, label, transform=None, loader=default_loader):
        imgs = make_dataset(root, label)
        self.root = root
        self.label = label
        self.samples = imgs
        self.transform = transform
        self.loader = loader

    def __getitem__(self, index):
        path, gt = self.samples[index]
        img = self.loader(path)

        if self.transform is not None:
            img = self.transform(img)
        
        return img, gt

    def __len__(self):
        return len(self.samples)
    
# class DigitDataset(Dataset):
#     def __init__(self, x, y=None, transform=None):
#         super(DigitDataset, self).__init__()
#         self.x = x
#         # 标签可能为 None (测试集)
#         self.y = y
#         self.transform = transform
    
#     def __len__(self):
#         return len(self.x)
    
#     def __getitem__(self, index):
#         img = self.x[index]
#         if self.transform is not None:
#             img = self.transform(img)
        
#         if self.y is not None:
#             y = self.y[index]
#             return img, y
#         else:
#             return img
# 这个是对先验知识的利用，已经知道目标域的数据是手绘风格，因此在使用源域数据之前先做边缘提取，并做颜色反转变成白底黑线
class Canny(paddle.vision.transforms.transforms.BaseTransform):
    def __init__(self, low, high, keys=None):
        super(Canny, self).__init__(keys)
        self.low = low
        self.high = high

    def _apply_image(self, img):
        Canny = lambda img: cv2.Canny(np.array(img), self.low, self.high)
        return Canny(img)

import PIL.ImageOps
from PIL import Image
class ReverseColor(paddle.vision.transforms.transforms.BaseTransform):
    def __init__(self, keys=None):
        super(ReverseColor, self).__init__(keys)
    
    def _apply_image(self, img):
        img = np.array(img)
        img = Image.fromarray(img.astype('uint8'))
        img = PIL.ImageOps.invert(img)
        return np.array(img)
    

# 图像预处理转换
def transform(img):
    img = cv2.resize(img, (28, 28))
    img = np.expand_dims(img, axis=0)  # 添加通道维度
    img = img.astype('float32') / 255.0  # 归一化到 [0,1]
    return paddle.to_tensor(img)

# 特征提取器
class FeatureExtractor(nn.Layer):

    def __init__(self):
        super(FeatureExtractor, self).__init__()

        self.conv = Sequential(
            Conv2D(1, 32, 3, 1, 1),
            BatchNorm2D(32),
            ReLU(),
            MaxPool2D(2, 2, 0),

            Conv2D(32, 64, 3, 1, 1),
            BatchNorm2D(64),
            ReLU(),
            MaxPool2D(2, 2, 0), 

            Conv2D(64, 128, 3, 1, 1),
            BatchNorm2D(128),
            ReLU(),
            MaxPool2D(2, 2, 0), 

            Conv2D(128, 256, 3, 1, 1),
            BatchNorm2D(256),
            ReLU(),
            MaxPool2D(2, 2, 0), 

            Conv2D(256, 512, 3, 1, 1),
            BatchNorm2D(512),
            ReLU(),
            MaxPool2D(2, 2, 0), 

            Conv2D(512, 1024, 3, 1, 1),
            BatchNorm2D(1024),
            ReLU(),
            MaxPool2D(2, 2, 0)
        )
        
    def forward(self, x):
        x = self.conv(x).squeeze()
        return x

class LabelPredictor(nn.Layer):

    def __init__(self):
        super(LabelPredictor, self).__init__()

        self.layer = Sequential(
            Linear(1024, 512),
            ReLU(),

            Linear(512, 256),
            ReLU(),

            Linear(256, 128),
            ReLU(),

            Linear(128, 7),
        )

    def forward(self, h):
        c = self.layer(h)
        return c


class DomainClassifier(nn.Layer):

    def __init__(self):
        super(DomainClassifier, self).__init__()

        self.layer = Sequential(
            Linear(1024, 512),
            BatchNorm1D(512),
            ReLU(),

            Linear(512, 256),
            BatchNorm1D(256),
            ReLU(),

            Linear(256, 128),
            BatchNorm1D(128),
            ReLU(),

            Linear(128, 1),
        )

    def forward(self, h):
        y = self.layer(h)
        return y

source_transform = Compose([
    Resize((64, 64)),
    RandomHorizontalFlip(),
    RandomRotation(15),
    Grayscale(),
    Canny(low=170, high=300),
    ReverseColor(),
    ToTensor()
])

target_transform = Compose([
    Resize((64, 64)),
    Grayscale(),
    RandomHorizontalFlip(),
    RandomRotation(15, fill=(0,)),
    ToTensor()
    ])

eval_transform = Compose([
    Resize((64, 64)),
    Grayscale(),
    ToTensor()
    ])

# 创建数据集
source_dataset = DatasetFolder('work/PACS/src', transform=source_transform)
target_dataset = DatasetFolder('work/PACS/tgt', transform=target_transform)
valid_dataset = DatasetFolder('work/PACS/val', transform=eval_transform)
test_dataset = ImageSet('work/PACS/tgt/7', label='work/PACS/tgt/student.txt', transform=eval_transform)

def load_data(config):
    # 创建数据加载器
    source_dataloader = DataLoader(
        source_dataset, 
        batch_size=config['train_batch_size'],
        shuffle=True
    )
    target_dataloader = DataLoader(
        target_dataset,
        batch_size=config['train_batch_size'],
        shuffle=True
    )
    valid_dataloader = DataLoader(
        valid_dataset,
        batch_size=config['eval_batch_size'],
        shuffle=False
    )
    test_dataloader = DataLoader(
        test_dataset,
        batch_size=config['eval_batch_size'],
        shuffle=False
    )
    
    return source_dataloader, target_dataloader, valid_dataloader, test_dataloader

def sample_from_numpy(images, labels, num_per_class=30):
    """从numpy数据中采样，每个类别选择固定数量的样本"""
    selected_images = []
    selected_indices = []
    
    for class_id in range(7):  # 假设有7个类别
        indices = np.where(labels == class_id)[0]
        if len(indices) > num_per_class:
            selected = np.random.choice(indices, size=num_per_class, replace=False)
            selected_images.extend(images[selected])
            selected_indices.extend(selected)
    
    return np.array(selected_images), np.array(selected_indices)

def minmax_norm(x):
    """最小最大值归一化"""
    mmin = np.min(x, axis=0)
    mmax = np.max(x, axis=0)
    return (x - mmin) / (mmax - mmin)

def draw_tsne(feat1, feat2, filename=None):
    """绘制t-SNE可视化图"""
    tsne = TSNE(n_components=2, random_state=0, learning_rate=200, perplexity=18, n_iter=5000, init='random')
    seg = len(feat1)
    X = tsne.fit_transform(np.concatenate((feat1, feat2)))
    X = minmax_norm(X)
    
    plt.figure(figsize=(8, 8), frameon=False)
    plt.scatter(X[:seg, 0], X[:seg, 1], c="blue", s=10, marker="o", label='Source')
    plt.scatter(X[seg:, 0], X[seg:, 1], c="red", s=10, marker="o", label='Target')
    plt.xticks([])
    plt.yticks([])
    plt.legend()
    
    if filename:
        plt.savefig(filename)
    else:
        plt.show()
    plt.close()

def plot_learning_curve(record, title='loss', ylabel='Loss', filename=None):
    ''' Plot learning curve '''
    color_dict = {'trainF': 'tab:green', 'trainD': 'tab:blue', 'valid': 'tab:red', 'trainCE': 'tab:purple', 'trainKL': 'tab:orange', 'trainVAT': 'tab:brown'}
    
    # 检查是否有有效数据
    has_valid_data = False
    for key in record.keys():
        if title in record[key] and len(record[key][title]) > 0:
            has_valid_data = True
            break
    
    if not has_valid_data:
        print(f"警告: 没有找到有效的{title}数据用于绘图")
        return
    
    ymax = 0.0
    for key in record.keys():
        if title in record[key] and len(record[key][title]) > 0:
            try:
                mmax = max(map(float, record[key][title]))
                if ymax < mmax:
                    ymax = mmax
            except ValueError:
                # 处理空序列
                continue
    ymax *= 1.1
    
    ymin = 1e6
    for key in record.keys():
        if title in record[key] and len(record[key][title]) > 0:
            try:
                mmin = min(map(float, record[key][title]))
                if ymin > mmin:
                    ymin = mmin
            except ValueError:
                # 处理空序列
                continue
    
    if ymin == 1e6:  # 如果所有序列都是空的
        ymin = 0
    else:
        ymin *= 0.9
    
    # total_steps = nepoch
    figure(figsize=(10, 6))
    for key in record.keys():
        if title in record[key] and len(record[key][title]) > 0 and key in color_dict:
            try:
                x = list(map(int, record[key]['iter']))
                plt.plot(x, record[key][title], c=color_dict[key], label=key)
            except (ValueError, KeyError):
                # 处理可能的错误
                continue

    plt.ylim(ymin, ymax)
    plt.xlabel('Training steps')
    plt.ylabel(ylabel)
    plt.title('Learning curve of {}'.format(title))
    plt.legend()
    if filename:
        plt.savefig(filename)
    else:
        plt.show()
    plt.close()

def get_lambda(epoch, max_epoch, gamma=0.8):
    """计算域适应的lambda参数"""
    p = float(epoch) / max_epoch
    return 2.0 / (1.0 + np.exp(-gamma * p)) - 1.0

def train_source_only(extractor, predictor, optimizer_F, optimizer_C, class_criterion, source_dataloader):
    """仅使用源域数据训练"""
    extractor.train()
    predictor.train()
    
    running_F_loss = 0.0
    F_hit, F_num = 0.0, 0.0
    
    for i, (source_data, source_label) in enumerate(tqdm(source_dataloader, desc="Source training")):
        feature = extractor(source_data)
        class_logits = predictor(feature)
        # loss为class CE
        loss = class_criterion(class_logits, source_label)
        running_F_loss += loss.item()

        # 反向传播
        loss.backward()
        optimizer_F.step()
        optimizer_C.step()
        
        optimizer_F.clear_grad()
        optimizer_C.clear_grad()

        F_hit += (paddle.argmax(class_logits, axis=1) == source_label).astype('float32').sum().item()
        F_num += source_data.shape[0]

    return running_F_loss / (i+1), F_hit / F_num

def train_dann(extractor, predictor, discriminator, 
               optimizer_F, optimizer_C, optimizer_D,
               class_criterion, domain_criterion, 
               source_dataloader, target_dataloader, lamb):
    """使用域对抗训练"""
    extractor.train()
    predictor.train()
    discriminator.train()
    
    # D loss: Domain Classifier的loss
    # F loss: Feature Extrator & Label Predictor的loss
    running_D_loss, running_F_loss = 0.0, 0.0
    F_hit, F_num = 0.0, 0.0
    D_hit, D_num = 0.0, 0.0
    
    # 为了训练循环，需要可迭代的目标域数据
    target_iter = iter(target_dataloader)
    
    for i, (source_data, source_label) in enumerate(tqdm(source_dataloader, desc="DANN training")):
        # 获取目标域批次，如果迭代结束则重新开始
        try:
            target_data, _ = next(target_iter)
        except StopIteration:
            target_iter = iter(target_dataloader)
            target_data, _ = next(target_iter)
        
        # 混合数据，避免batch_norm计算错误
        mixed_data = paddle.concat([source_data, target_data], axis=0)
        domain_label = paddle.zeros([source_data.shape[0] + target_data.shape[0], 1])
        # 设定source data的label为1
        domain_label[:source_data.shape[0]] = 1
        
        # Step 1: 训练Domain Classifier
        feature = extractor(mixed_data)
        # 因为我们在Step 1不需要训练Feature Extractor，所以把feature做detach避免更新Feature Extractor的参数
        domain_logits = discriminator(feature.detach())
        loss = domain_criterion(domain_logits, domain_label)
        running_D_loss += loss.item()
        
        optimizer_D.clear_grad()
        loss.backward()
        optimizer_D.step()
        
        domain_pred = (F.sigmoid(domain_logits) > 0.5).astype('float32')
        D_hit += (domain_pred == domain_label).astype('float32').sum().item()
        D_num += mixed_data.shape[0]
        
        # Step 2: 训练Feature Extractor和Label Predictor
        class_logits = predictor(feature[:source_data.shape[0]])
        domain_logits = discriminator(feature)
        # loss为原本的class CE 减去 lamb * domain BCE
        class_loss = class_criterion(class_logits, source_label)
        running_F_loss += class_loss.item()
        
        loss = class_loss - lamb * domain_criterion(domain_logits, domain_label)
        
        optimizer_F.clear_grad()
        optimizer_C.clear_grad()
        loss.backward()
        optimizer_F.step()
        optimizer_C.step()
        
        F_hit += (paddle.argmax(class_logits, axis=1) == source_label).astype('float32').sum().item()
        F_num += source_data.shape[0]
    
    return running_D_loss / (i+1), running_F_loss / (i+1), F_hit / F_num, D_hit / D_num

def valid_epoch(valid_dataloader, acc_manager, feature_extractor, label_predictor, class_criterion):
    running_F_loss = 0.0

    feature_extractor.eval()
    label_predictor.eval()

    # Iterate the validation set by batches.
    for i, data in enumerate(valid_dataloader):

        # A batch consists of image data and corresponding labels.
        x_data, y_data = data
        labels = paddle.unsqueeze(y_data, axis=1)
        # We don't need gradient in validation.
        # Using paddle.no_grad() accelerates the forward process.
        with paddle.no_grad():
            feat = feature_extractor(x_data)
            logits = label_predictor(feat)

            # We can still compute the loss (but not the gradient).
            loss = class_criterion(logits, y_data)
            running_F_loss += loss.numpy()

            # Compute the accuracy for current batch.
            correct = acc_manager.compute(logits, labels)
            acc_manager.update(correct)

    feature_extractor.train()
    label_predictor.train()

    return running_F_loss / (i+1)

# 计算KL散度
def kl_divergence(p_logit, q_logit):
    """Computes KL-divergence between two sets of logits."""
    p_softmax = F.softmax(p_logit, axis=1)
    # D_KL(P||Q) = sum P * (log P - log Q)
    # Use log_softmax for numerical stability.
    kl_div = paddle.sum(
        p_softmax * (F.log_softmax(p_logit, axis=1) - F.log_softmax(q_logit, axis=1)),
        axis=1
    )
    return kl_div
    
# VADA specific loss functions
class ConditionalEntropyLoss(nn.Layer):
    def __init__(self):
        super(ConditionalEntropyLoss, self).__init__()

    def forward(self, x):
        """
        Computes the conditional entropy loss.
        L_c = - E_{x_t} [ sum_k p_k log p_k ]
        Args:
            x: Logits from the predictor. Shape: (batch_size, num_classes)
        Returns:
            Scalar loss.
        """
        x_softmax = F.softmax(x, axis=1)
        # Add a small epsilon to prevent log(0)
        entropy = -paddle.sum(x_softmax * paddle.log(x_softmax ), axis=1)
        return paddle.mean(entropy)


class VATLoss(nn.Layer):
    def __init__(self, xi=1e-6, eps=2.5, ip=1):
        super(VATLoss, self).__init__()
        self.xi = xi  # Small constant for initial perturbation normalization
        self.eps = eps # Norm constraint for the adversarial perturbation
        self.ip = ip   # Number of power iterations to find r_adv

    def _normalize_perturbation(self, d, norm_value):
        """Normalizes a perturbation tensor d to have a specific L2 norm."""
        # d shape: (N, C, H, W)
        d_reshaped = d.reshape((d.shape[0], -1)) # (N, C*H*W)
        d_norm = paddle.norm(d_reshaped, p=2, axis=1, keepdim=True) # (N, 1)
        # Add epsilon to prevent division by zero
        d_normalized = d_reshaped / (d_norm + 1e-9) 
        d_scaled = norm_value * d_normalized
        return d_scaled.reshape(d.shape) # Reshape back to original (N, C, H, W)

    def forward(self, extractor, predictor, x):
        """
        Computes the Virtual Adversarial Training Loss.
        Args:
            extractor: The feature extractor model.
            predictor: The label predictor model.
            x: Input data (e.g., images).
        Returns:
            Scalar VAT loss.
        """
        # Prediction with original x (detached, treated as constant for r_adv search)
        with paddle.no_grad():
            original_features = extractor(x)
            pred_x_logit = predictor(original_features).detach()

        # Generate initial random perturbation 'd' (same shape as x)
        d = paddle.randn(shape=x.shape)
        d = self._normalize_perturbation(d, self.xi) # Normalize to small xi

        # Power iteration to find the adversarial perturbation r_adv
        for _ in range(self.ip):
            d.stop_gradient = False # Make d require grad for optimization
            
            perturbed_features = extractor(x + d)
            pred_x_plus_d_logit = predictor(perturbed_features)
            
            # Calculate KL divergence: KL( h(x) || h(x+d) )
            # pred_x_logit is detached (constant target)
            kl_div = kl_divergence(pred_x_logit, pred_x_plus_d_logit)
            kl_div_mean = paddle.mean(kl_div) # Average over batch
            
            # Compute gradient of KL divergence w.r.t. d
            # create_graph=False as we don't need to backprop through this gradient calculation later
            grad_d = paddle.grad(outputs=[kl_div_mean], inputs=[d], create_graph=False)[0]
            
            # Update d to be the gradient direction, then normalize and scale to eps
            d = grad_d 
            d = self._normalize_perturbation(d, self.eps)
            d = d.detach() # Detach d for the next iteration or for use as r_adv

        r_adv = d # The final adversarial perturbation

        # Compute VAT loss with r_adv
        # Gradients should flow through predictor(extractor(x + r_adv))
        perturbed_features_final = extractor(x + r_adv)
        pred_x_plus_r_adv_logit = predictor(perturbed_features_final)
        
        # pred_x_logit is still the detached original prediction
        vat_loss_val = kl_divergence(pred_x_logit, pred_x_plus_r_adv_logit)
        return paddle.mean(vat_loss_val)

class VisualizationTsne:
    def __init__(self, source_dataset, target_dataset, extractor, config=None):
        self.source_dataset = source_dataset
        self.target_dataset = target_dataset
        self.extractor = extractor
        self.config = config

    def _sample_source(self, num):
        classes = self.source_dataset.class_to_idx
        samples = np.array(self.source_dataset.samples)
        labels = samples[:, 1].astype("int64")
        paths = []
        for cid in range(7):
            subsamples = samples[labels == cid]
            paths.extend(np.random.choice(subsamples[:, 0], size=num, replace=False))
        return paths

    def _sample_target(self, num):
        classes = self.target_dataset.class_to_idx
        samples = np.array(self.target_dataset.samples)
        np.random.shuffle(samples)
        subsamples = samples[:num]
        labels = subsamples[:, 1].astype("int64")
        paths = subsamples[:, 0]
        return paths
    
    def _minmax_norm(self, x):
        """最小最大值归一化"""
        mmin = np.min(x, axis=0)
        mmax = np.max(x, axis=0)
        return (x - mmin) / (mmax - mmin)
    
    def _draw_tsne(self, feat1, feat2):
        tsne = TSNE(n_components=2, random_state=0, learning_rate=200, perplexity=18, n_iter=5000, init='random')
        seg = len(feat1)
        X = tsne.fit_transform(np.concatenate((feat1, feat2)))
        X = self._minmax_norm(X)
        figure(figsize=(8, 8), frameon=False)

        plt.scatter(X[:seg, 0], X[:seg, 1], c="blue", s=10, marker="o")
        plt.scatter(X[seg:, 0], X[seg:, 1], c="red", s=10, marker="o")

        plt.xticks([])
        plt.yticks([])
        if self.config['save_path']:
            plt.savefig(os.path.join(self.config['save_path'], 'tsne.png'))
        else:
            plt.show()
    
    def draw(self):
        source_paths = self._sample_source(num=30)
        source_imgs = []
        for path in source_paths:
            source_imgs.append(source_transform(default_loader(path)))

        target_paths = self._sample_target(num=210)
        target_imgs = []
        for path in target_paths:
            target_imgs.append(target_transform(default_loader(path)))

        source_feats = []
        for img in source_imgs:
            img = img.unsqueeze(0)
            source_feats.append(self.extractor(img).numpy())
        source_feats = np.array(source_feats)

        target_feats = []
        for img in target_imgs:
            img = img.unsqueeze(0)
            target_feats.append(self.extractor(img).numpy())
        target_feats = np.array(target_feats)

        self._draw_tsne(source_feats, target_feats)


def validate_visualization(config_path):
    """验证模型并生成t-SNE可视化"""
    # 加载配置
    with open(config_path, 'r', encoding='utf-8') as f:
        config = yaml.safe_load(f)
    
    # 加载数据
    _, _, target_val_loader, _ = load_data(config)
    
    # 加载模型
    extractor = FeatureExtractor()
    predictor = LabelPredictor()
    
    extractor.set_state_dict(paddle.load(os.path.join(config['save_path'], 'extractor.pdparams')))
    predictor.set_state_dict(paddle.load(os.path.join(config['save_path'], 'predictor.pdparams')))

    extractor.eval()
    predictor.eval()

    # 评估模型
    valid_loss = 0.0
    valid_acc_manager = paddle.metric.Accuracy()
    class_criterion = paddle.nn.loss.CrossEntropyLoss()
    for batch_id, data in enumerate(target_val_loader):
        x_data, y_data = data
        labels = paddle.unsqueeze(y_data, axis=1)
        
        # ===================forward=====================
        # We don't need gradient in testing.
        # Using paddle.no_grad() accelerates the forward process.
        with paddle.no_grad():
            feat = extractor(x_data)
            predicts = predictor(feat)
            loss = class_criterion(predicts, y_data)

        # ==================calculate acc================
        acc = valid_acc_manager.compute(predicts, labels)
        valid_acc_manager.update(acc)

        valid_loss += loss

    valid_acc = valid_acc_manager.accumulate()
    total_valid_loss = valid_loss / (batch_id+1)
    print("validation loss is: {}, validation acc is: {}".format(total_valid_loss.numpy(), valid_acc))    

    
    # 准备t-SNE可视化
    visualizer = VisualizationTsne(source_dataset, target_dataset, extractor, config)
    visualizer.draw()
    
    # 保存结果到experiment_results.json
    result = {
        'name': config['name'],
        'method': config['method'],
        'total_valid_loss': float(total_valid_loss.numpy()),
        'valid_accuracy': float(valid_acc)
    }
    
    # 如果文件存在，加载已有结果并更新
    result_file = 'experiment_results.json'
    if os.path.exists(result_file):
        try:
            with open(result_file, 'r', encoding='utf-8') as f:
                all_results = json.load(f)
        except FileNotFoundError:
            print(f"FileNotFoundError")
            all_results = []
    else:
        all_results = []
    
    # 检查是否已存在相同方法的结果
    name_exists = False
    for i, r in enumerate(all_results):
        if r['name'] == config['name']:
            all_results[i] = result
            name_exists = True
            break
    
    if not name_exists:
        all_results.append(result)
    
    # 保存结果
    with open(result_file, 'w', encoding='utf-8') as f:
        json.dump(all_results, f, indent=2)

def test(config_path):
    """在测试集上进行推理"""
    # 加载配置
    with open(config_path, 'r', encoding='utf-8') as f:
        config = yaml.safe_load(f)
    
    # 加载测试数据
    try:
        _, _, _, test_dataloader = load_data(config)
    except FileNotFoundError:
        print("测试数据文件不存在")
        return
    
    # 加载模型
    extractor = FeatureExtractor()
    predictor = LabelPredictor()
    
    extractor.set_state_dict(paddle.load(os.path.join(config['save_path'], 'extractor.pdparams')))
    predictor.set_state_dict(paddle.load(os.path.join(config['save_path'], 'predictor.pdparams')))
    
    extractor.eval()
    predictor.eval()
    
    # 进行预测
    predictions = []
    
    with paddle.no_grad():
        for data in tqdm(test_dataloader, desc="Testing"):
            x_data, y_data = data
            feature = extractor(x_data)
            logits = predictor(feature)
            pred = paddle.argmax(logits, axis=1)
            predictions.extend(pred.cpu().numpy().tolist())
    
    # 保存预测结果
    with open("predict.csv", "w") as f:
        f.write("Id,Category\n")
        for i, pred in enumerate(predictions):
            f.write(f"{i},{pred}\n")
    
    print(f"预测完成，结果已保存到 predict.csv")
    # 将分类图片按标签保存到不同的路径下
    titles = ['dog', 'elephant', 'giraffe', 'guitar', 'horse', 'house', 'person', 'dog']
    for i, pred in enumerate(predictions):
        if not os.path.exists(os.path.join(config['save_path'], f"{titles[pred]}")):
            os.makedirs(os.path.join(config['save_path'], f"{titles[pred]}"))
        tensor, _ = test_dataset[i]
        # 将张量转换为适合cv2.imwrite的格式
        img = tensor.cpu().numpy()
        # 处理通道顺序和维度
        if img.shape[0] == 1:  # 灰度图像
            img = img[0] * 255  # 从[0,1]转换到[0,255]
        else:  # 彩色图像
            img = img.transpose((1, 2, 0)) * 255  # 从CHW转换为HWC并缩放
            img = img.astype(np.uint8)
        cv2.imwrite(os.path.join(config['save_path'], f"{titles[pred]}", f"{i}.png"), img)

def generate_report():
    """生成实验报告"""
    result_file = 'experiment_results.json'
    if not os.path.exists(result_file):
        print("没有找到实验结果文件")
        return
    
    with open(result_file, 'r', encoding='utf-8') as f:
        results = json.load(f)
    
    # 创建表格比较不同方法
    plt.figure(figsize=(10, 6))
    
    methods = [r['method'] for r in results]
    total_valid_loss = [r['total_valid_loss'] for r in results]
    valid_accuracy = [r['valid_accuracy'] for r in results]
    
    x = np.arange(len(methods))
    width = 0.35
    
    # plt.bar(x - width/2, total_valid_loss, width, label='Loss')
    # plt.bar(x + width/2, valid_accuracy, width, label='Accuracy')
    
    # plt.xlabel('Method')
    # plt.ylabel('Loss/Accuracy')
    # plt.title('Comparison of Different Domain Adaptation Methods')
    # plt.xticks(x, methods)
    # plt.ylim(0, 1.0)
    # plt.legend()
    
    # plt.tight_layout()
    # plt.savefig('method_comparison.png')
    # plt.close()
    
    # 生成报告文本
    report = "# 域适应实验报告\\n\\n"
    report += "## 实验结果\\n\\n"
    report += "| 方法 | 源域准确率 | 目标域准确率 |\\n"
    report += "|------|------------|------------|\\n"
    
    for r in results:
        report += f"| {r['method']} | {r['total_valid_loss']:.4f} | {r['valid_accuracy']:.4f} |\\n"
    
    report += "\\n## 结论\\n\\n"
    
    # 比较不同方法的效果
    best_method = min(results, key=lambda x: x['total_valid_loss'])
    report += f"在所有实验中，{best_method['method']}方法在目标域上获得了最高的准确率({best_method['valid_accuracy']:.4f})。\\n\\n"
    
    if len(results) > 1:
        # 计算域适应的改进
        source_only = next((r for r in results if r['method'] == 'source_only'), None)
        dann = next((r for r in results if r['method'] == 'dann'), None)
        
        if source_only and dann:
            improvement = dann['target_accuracy'] - source_only['target_accuracy']
            report += f"与仅使用源域训练的模型相比，DANN方法在目标域上的准确率提高了{improvement:.4f}。\\n"
    
    report += "\\n## 可视化\\n\\n"
    report += "请参考各方法保存目录下的t-SNE可视化和学习曲线。\\n"
    
    # 保存报告
    with open('experiment_report.md', 'w',  encoding='utf-8') as f:
        f.write(report)
    
    print("实验报告已生成：experiment_report.md")

def train_vada(extractor, predictor, discriminator,
               optimizer_F, optimizer_C, optimizer_D,
               class_criterion, domain_criterion, conditional_entropy_loss_fn, vat_loss_fn,
               source_dataloader, target_dataloader,
               lambda_d, lambda_s_vat, lambda_t,
               config):
    """使用VADA方法进行训练"""
    extractor.train()
    predictor.train()
    discriminator.train()

    running_D_loss, running_F_loss = 0.0, 0.0
    running_CE_loss = 0.0  # 新增：跟踪条件熵损失
    F_hit, F_num = 0.0, 0.0
    D_hit, D_num = 0.0, 0.0
    
    target_iter = iter(target_dataloader)
    num_batches = len(source_dataloader)

    for i, (source_data, source_label) in enumerate(tqdm(source_dataloader, desc="VADA training")):
        try:
            target_data, _ = next(target_iter) 
        except StopIteration:
            target_iter = iter(target_dataloader)
            target_data, _ = next(target_iter)
        
        # 避免不同批次大小问题
        if source_data.shape[0] != target_data.shape[0]:
            # 简单处理，实际中应根据具体情况调整
            pass

        # 一次性提取特征
        with paddle.no_grad():
            # 用于域分类器训练的特征 (不需要梯度)
            source_features_d = extractor(source_data)
            target_features_d = extractor(target_data)
        
        # --- Step 1: 训练域分类器 ---
        features_concat_d = paddle.concat([source_features_d, target_features_d], axis=0)
        domain_labels = paddle.concat([
            paddle.ones([source_features_d.shape[0], 1], dtype='float32'),  # 源域 = 1
            paddle.zeros([target_features_d.shape[0], 1], dtype='float32')  # 目标域 = 0
        ], axis=0)

        domain_logits_d = discriminator(features_concat_d)
        loss_d = domain_criterion(domain_logits_d, domain_labels)
        
        optimizer_D.clear_grad()
        loss_d.backward()
        optimizer_D.step()
        
        running_D_loss += loss_d.item()
        domain_pred = (F.sigmoid(domain_logits_d) > 0.5).astype('float32')
        D_hit += (domain_pred == domain_labels).astype('float32').sum().item()
        D_num += features_concat_d.shape[0]

        # --- Step 2: 训练特征提取器和标签预测器 ---
        # 重新计算特征以保留梯度连接
        source_features = extractor(source_data)
        target_features = extractor(target_data)
        features_concat = paddle.concat([source_features, target_features], axis=0)
        
        # 2.1. 源域分类损失
        class_logits = predictor(source_features)
        loss_y = class_criterion(class_logits, source_label)

        # 2.2. 域对抗损失
        domain_logits = discriminator(features_concat)
        loss_domain_fool = -domain_criterion(domain_logits, domain_labels)

        # 2.3. 源域VAT损失
        vat_loss_s = vat_loss_fn(extractor, predictor, source_data)
        
        # 2.4 & 2.5. 目标域损失：VAT损失和条件熵损失 (使用同一个lambda_t)
        vat_loss_t = vat_loss_fn(extractor, predictor, target_data)
        target_logits = predictor(target_features)
        cond_entropy_loss_t = conditional_entropy_loss_fn(target_logits)
        running_CE_loss += cond_entropy_loss_t.item()  # 记录条件熵损失

        # 综合损失
        total_loss_F_C = loss_y + \
                         lambda_d * loss_domain_fool + \
                         lambda_s_vat * vat_loss_s + \
                         lambda_t * (vat_loss_t + cond_entropy_loss_t)
        
        optimizer_F.clear_grad()
        optimizer_C.clear_grad()
        total_loss_F_C.backward()
        optimizer_F.step()
        optimizer_C.step()

        running_F_loss += total_loss_F_C.item() 
        F_hit += (paddle.argmax(class_logits, axis=1) == source_label).astype('float32').sum().item()
        F_num += source_data.shape[0]

    avg_D_loss = running_D_loss / num_batches if num_batches > 0 else 0.0
    avg_F_loss = running_F_loss / num_batches if num_batches > 0 else 0.0
    avg_CE_loss = running_CE_loss / num_batches if num_batches > 0 else 0.0
    avg_F_acc = F_hit / F_num if F_num > 0 else 0.0
    avg_D_acc = D_hit / D_num if D_num > 0 else 0.0
    
    # 返回条件熵损失
    return avg_D_loss, avg_F_loss, avg_F_acc, avg_D_acc, avg_CE_loss

def train_dirt(extractor, predictor, teacher_extractor, teacher_predictor,
              optimizer_F, optimizer_C,
              conditional_entropy_loss_fn, vat_loss_fn,
              source_dataloader, target_dataloader,
              lambda_t, beta_t,
              config):
    """使用DIRT方法进行训练，DIRT = Decision-boundary Iterative Refinement Training"""
    extractor.train()
    predictor.train()
    # 教师模型始终处于评估模式，不参与梯度计算
    teacher_extractor.eval()
    teacher_predictor.eval()

    running_F_loss = 0.0
    running_CE_loss = 0.0
    running_KL_loss = 0.0
    running_VAT_loss = 0.0
    F_hit, F_num = 0.0, 0.0
    
    target_iter = iter(target_dataloader)
    num_batches = len(source_dataloader)

    for i, (source_data, source_label) in enumerate(tqdm(source_dataloader, desc="DIRT training")):
        try:
            target_data, _ = next(target_iter) 
        except StopIteration:
            target_iter = iter(target_dataloader)
            target_data, _ = next(target_iter)

        # 仅针对目标域数据进行计算，简化不必要的损失计算
        target_features = extractor(target_data)
        target_logits = predictor(target_features)
        
        # 1. 目标域VAT损失 
        vat_loss_t = vat_loss_fn(extractor, predictor, target_data)
        running_VAT_loss += vat_loss_t.item()
        
        # 2. 条件熵损失
        cond_entropy_loss_t = conditional_entropy_loss_fn(target_logits)
        running_CE_loss += cond_entropy_loss_t.item()

        # 3. DIRT：教师-学生KL散度损失
        with paddle.no_grad():
            teacher_target_features = teacher_extractor(target_data)
            teacher_target_logits = teacher_predictor(teacher_target_features)
        
        # 计算教师-学生模型的KL散度
        kl_loss = kl_divergence(teacher_target_logits, target_logits)
        kl_loss = paddle.mean(kl_loss)
        running_KL_loss += kl_loss.item()

        # 简化后的综合损失：仅包含三个部分
        total_loss_F_C = lambda_t * vat_loss_t + \
                         lambda_t * cond_entropy_loss_t + \
                         beta_t * kl_loss
        
        optimizer_F.clear_grad()
        optimizer_C.clear_grad()
        total_loss_F_C.backward()
        optimizer_F.step()
        optimizer_C.step()

        running_F_loss += total_loss_F_C.item()
        
        # 为了记录分类准确率，计算源域预测
        with paddle.no_grad():
            source_features = extractor(source_data)
            class_logits = predictor(source_features)
            F_hit += (paddle.argmax(class_logits, axis=1) == source_label).astype('float32').sum().item()
            F_num += source_data.shape[0]

    avg_F_loss = running_F_loss / num_batches if num_batches > 0 else 0.0
    avg_CE_loss = running_CE_loss / num_batches if num_batches > 0 else 0.0
    avg_KL_loss = running_KL_loss / num_batches if num_batches > 0 else 0.0
    avg_VAT_loss = running_VAT_loss / num_batches if num_batches > 0 else 0.0
    avg_F_acc = F_hit / F_num if F_num > 0 else 0.0
    
    # 返回简化后的指标
    return avg_F_loss, avg_F_acc, avg_VAT_loss, avg_CE_loss, avg_KL_loss

def train(config_path):
    """训练模型"""
    # 加载配置
    with open(config_path, 'r', encoding='utf-8') as f:
        config = yaml.safe_load(f)
    
    # 设置随机种子
    paddle.seed(config['seed'])
    np.random.seed(config['seed'])
    
    # 创建保存目录
    os.makedirs(config['save_path'], exist_ok=True)
    
    # 加载数据
    source_dataloader, target_dataloader, valid_dataloader, _ = load_data(config)
    
    # 创建模型
    extractor = FeatureExtractor()
    predictor = LabelPredictor()
    discriminator = DomainClassifier()
    
    # DIRT方法额外需要教师模型
    teacher_extractor = None
    teacher_predictor = None
    if config['method'] == 'dirt':
        teacher_extractor = FeatureExtractor()
        teacher_predictor = LabelPredictor()
        extractor.load_dict(paddle.load(os.path.join(config['save_path'], 'teacher_extractor.pdparams')))
        predictor.load_dict(paddle.load(os.path.join(config['save_path'], 'teacher_predictor.pdparams')))
    if config['resume']:
        # 从检查点恢复训练
        print(f"从检查点恢复训练，使用配置 {config['resume']}")
        # 这里需要实现恢复训练的逻辑
        print("目前不支持恢复训练功能")
        extractor.load_dict(paddle.load(os.path.join(config['save_path'], 'extractor.pdparams')))
        predictor.load_dict(paddle.load(os.path.join(config['save_path'], 'predictor.pdparams')))
        if config['method'] in ['dann', 'vada', 'dirt']:
            discriminator.load_dict(paddle.load(os.path.join(config['save_path'], 'discriminator.pdparams')))
        if config['method'] == 'dirt' and os.path.exists(os.path.join(config['save_path'], 'teacher_extractor.pdparams')):
            teacher_extractor.load_dict(paddle.load(os.path.join(config['save_path'], 'teacher_extractor.pdparams')))
            teacher_predictor.load_dict(paddle.load(os.path.join(config['save_path'], 'teacher_predictor.pdparams')))

    # 创建损失函数
    class_criterion = nn.CrossEntropyLoss()
    domain_criterion = nn.BCEWithLogitsLoss()
    
    # 创建优化器
    optimizer_F = optim.Adam(parameters=extractor.parameters())
    optimizer_C = optim.Adam(parameters=predictor.parameters())
    optimizer_D = optim.Adam(parameters=discriminator.parameters())
    
    # 针对VADA和DIRT方法，初始化特定的损失函数
    conditional_entropy_loss_fn = None
    vat_loss_fn = None
    if config['method'] in ['vada', 'dirt']:
        conditional_entropy_loss_fn = ConditionalEntropyLoss()
        vat_params = config.get('vat_params', {})
        vat_loss_fn = VATLoss(
            xi=float(vat_params.get('xi', 1e-6)),
            eps=float(vat_params.get('eps', 2.5)),
            ip=int(vat_params.get('ip', 1))
        )

    # 开始训练
    max_epoch = config['num_epochs']
    best_acc = 0.0

    if config['method'] == 'source_only':
        loss_record = {'trainF': {'loss': [], 'iter': []}, 'valid': {'loss': [], 'iter': []}}
        acc_record = {'trainF': {'acc': [], 'iter': []}, 'valid': {'acc': [], 'iter': []}}
    else:
        loss_record = {'trainF': {'loss': [], 'iter': []}, 'trainD': {'loss': [], 'iter': []}, 'valid': {'loss': [], 'iter': []}}
        # 为VADA和DIRT方法添加条件熵损失记录
        if config['method'] in ['vada', 'dirt']:
            loss_record['trainCE'] = {'loss': [], 'iter': []}
        # 为DIRT方法添加KL散度损失记录
        if config['method'] == 'dirt':
            loss_record['trainKL'] = {'loss': [], 'iter': []}
            loss_record['trainVAT'] = {'loss': [], 'iter': []}
            # 移除对trainD的引用，因为DIRT方法中不再训练域分类器
            acc_record = {'trainF': {'acc': [], 'iter': []}, 'valid': {'acc': [], 'iter': []}}
        else:
            acc_record = {'trainF': {'acc': [], 'iter': []}, 'trainD': {'acc': [], 'iter': []}, 'valid': {'acc': [], 'iter': []}}
    
    it = 0
    for epoch in range(max_epoch):
        if config['method'] == 'source_only':
            # 源域训练模式
            running_F_loss, F_acc = train_source_only(extractor, predictor, optimizer_F, optimizer_C, 
                                                     class_criterion, source_dataloader)
            domain_acc = 0
            # log for train curve
            loss_record['trainF']['loss'].append(running_F_loss)
            loss_record['trainF']['iter'].append(it)
            acc_record['trainF']['acc'].append(F_acc)
            acc_record['trainF']['iter'].append(it)
            
        elif config['method'] == 'dann' :
            # DANN训练模式
            running_D_loss, running_F_loss, F_acc, domain_acc = train_dann(
                extractor, predictor, discriminator,
                optimizer_F, optimizer_C, optimizer_D,
                class_criterion, domain_criterion,
                source_dataloader, target_dataloader,
                get_lambda(epoch, max_epoch, config.get('gamma', 0.8)) * config['lambda']
            )
            # log for train curve
            loss_record['trainF']['loss'].append(running_F_loss)
            loss_record['trainF']['iter'].append(it)
            loss_record['trainD']['loss'].append(running_D_loss)
            loss_record['trainD']['iter'].append(it)
            acc_record['trainF']['acc'].append(F_acc)
            acc_record['trainF']['iter'].append(it)
            acc_record['trainD']['acc'].append(domain_acc)
            acc_record['trainD']['iter'].append(it)
            print('epoch {:>3d}: train D loss: {:6.4f}, train F loss: {:6.4f}, trainF acc {:6.4f}, trainD acc {:6.4f}'.format(epoch, running_D_loss, running_F_loss, F_acc, domain_acc))

        elif config['method'] == 'vada':
            # VADA training mode
            # Get lambdas from config, with defaults
            lambda_d_val = get_lambda(epoch, max_epoch, config.get('gamma', 0.8)) * config.get('lambda_d', 1.0)
            lambda_s_vat_val = float(config.get('lambda_s_vat', 0.1))
            # 使用新的整合参数 lambda_t
            lambda_t_val = float(config.get('lambda_t', 0.1))
            
            running_D_loss, running_F_loss, F_acc, domain_acc, running_CE_loss = train_vada(
                extractor, predictor, discriminator,
                optimizer_F, optimizer_C, optimizer_D,
                class_criterion, domain_criterion, conditional_entropy_loss_fn, vat_loss_fn,
                source_dataloader, target_dataloader,
                lambda_d_val, lambda_s_vat_val, lambda_t_val,
                config 
            )
            # log for train curve
            loss_record['trainF']['loss'].append(running_F_loss)
            loss_record['trainF']['iter'].append(it)
            loss_record['trainD']['loss'].append(running_D_loss) 
            loss_record['trainD']['iter'].append(it)
            # 记录条件熵损失
            loss_record['trainCE']['loss'].append(running_CE_loss)
            loss_record['trainCE']['iter'].append(it)
            acc_record['trainF']['acc'].append(F_acc)
            acc_record['trainF']['iter'].append(it)
            acc_record['trainD']['acc'].append(domain_acc) 
            acc_record['trainD']['iter'].append(it)
            print('epoch {:>3d}: VADA: D loss: {:6.4f}, F loss: {:6.4f}, CE loss: {:6.4f}, F acc {:6.4f}, D acc {:6.4f}'.format(epoch, running_D_loss, running_F_loss, running_CE_loss, F_acc, domain_acc))
        
        elif config['method'] == 'dirt':
            # DIRT训练模式
            # 如果是第一个epoch，初始化教师模型等于学生模型
            if epoch == 0 or teacher_extractor is None:
                # 将当前学生模型参数复制给教师模型
                teacher_extractor.load_dict(extractor.state_dict())
                teacher_predictor.load_dict(predictor.state_dict())
                print("初始化教师模型参数")
            
            # 获取配置的超参数
            lambda_t_val = float(config.get('lambda_t', 0.1))
            beta_t_val = float(config.get('beta_t', 0.5))  # 教师-学生KL散度权重
            
            running_F_loss, F_acc, running_VAT_loss, running_CE_loss, running_KL_loss = train_dirt(
                extractor, predictor, teacher_extractor, teacher_predictor,
                optimizer_F, optimizer_C,
                conditional_entropy_loss_fn, vat_loss_fn,
                source_dataloader, target_dataloader,
                lambda_t_val, beta_t_val,
                config 
            )
            
            # 记录训练曲线
            loss_record['trainF']['loss'].append(running_F_loss)
            loss_record['trainF']['iter'].append(it)
            loss_record['trainCE']['loss'].append(running_CE_loss)
            loss_record['trainCE']['iter'].append(it)
            loss_record['trainKL']['loss'].append(running_KL_loss)
            loss_record['trainKL']['iter'].append(it)
            loss_record['trainVAT']['loss'].append(running_VAT_loss)
            loss_record['trainVAT']['iter'].append(it)
            acc_record['trainF']['acc'].append(F_acc)
            acc_record['trainF']['iter'].append(it)
            print('epoch {:>3d}: DIRT: F loss: {:6.4f}, VAT loss: {:6.4f}, CE loss: {:6.4f}, KL loss: {:6.4f}, F acc {:6.4f}'.format(
                epoch, running_F_loss, running_VAT_loss, running_CE_loss, running_KL_loss, F_acc))
        
        # 在每个epoch结束时评估模型
        valid_acc_manager = paddle.metric.Accuracy()
        valid_F_loss = valid_epoch(valid_dataloader, valid_acc_manager, extractor, predictor, class_criterion)
        valid_acc = valid_acc_manager.accumulate()
        # log for valid curve
        loss_record['valid']['loss'].append(valid_F_loss)
        loss_record['valid']['iter'].append(it)
        acc_record['valid']['acc'].append(valid_acc)
        acc_record['valid']['iter'].append(it)
        print('epoch {:>3d}: valid F loss: {:6.4f}, valid acc {:6.4f}'.format(epoch, valid_F_loss, valid_acc))
        it += 1
        
        # 保存最佳模型
        if valid_acc > best_acc:
            best_acc = valid_acc
            # 保存模型
            paddle.save(extractor.state_dict(), os.path.join(config['save_path'], 'extractor.pdparams'))
            paddle.save(predictor.state_dict(), os.path.join(config['save_path'], 'predictor.pdparams'))
            if config['method'] in ['dann', 'vada']:
                paddle.save(discriminator.state_dict(), os.path.join(config['save_path'], 'discriminator.pdparams'))
    
    # 绘制学习曲线
    plot_learning_curve(loss_record, title='loss', ylabel='Loss', filename=os.path.join(config['save_path'], 'loss_curve.png'))
    plot_learning_curve(acc_record, title='acc', ylabel='Acc', filename=os.path.join(config['save_path'], 'acc_curve.png'))


def main():
    """主函数，处理命令行参数"""
    parser = argparse.ArgumentParser(description='域适应训练')
    group = parser.add_mutually_exclusive_group(required=True)
    group.add_argument('--train', type=str, help='使用指定配置文件训练模型')
    group.add_argument('--val', type=str, help='验证模型')
    group.add_argument('--test', type=str, help='在测试集上进行推理')
    group.add_argument('--report', action='store_true', help='生成实验报告')
    group.add_argument('--all', type=str, help='执行训练、验证和报告生成')
    
    args = parser.parse_args()
    
    if args.train:
        print(f"使用配置 {args.train} 训练模型")
        train(args.train)
    
    elif args.val:
        print(f"验证模型，使用配置 {args.val}")
        validate_visualization(args.val)
    
    elif args.test:
        print(f"在测试集上进行推理，使用配置 {args.test}")
        test(args.test)
    
    elif args.report:
        print("生成实验报告")
        generate_report()
    
    elif args.all:
        print(f"执行完整流程，使用配置 {args.all}")
        train(args.all)
        validate_visualization(args.all)
        generate_report()

if __name__ == "__main__":
    main()
