import os
import random
import numpy as np
import tensorflow as tf
from tensorflow.keras import layers, models, regularizers
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.applications.efficientnet import EfficientNetB0
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.callbacks import EarlyStopping, ReduceLROnPlateau
from sklearn.metrics import accuracy_score, f1_score
from sklearn.utils.class_weight import compute_class_weight
import kagglehub

# 固定随机种子
seed = 42
random.seed(seed)
np.random.seed(seed)
tf.random.set_seed(seed)

# 禁用 TensorFlow 的布局优化器
tf.config.optimizer.set_experimental_options({'layout_optimizer': False})

# 下载数据集
try:
    path = kagglehub.dataset_download("shuvoalok/raf-db-dataset")
    print("Path to dataset files:", path)
    dataset_path = os.path.join(path, 'DATASET')
    train_dir = os.path.join(dataset_path, 'train')
    test_dir = os.path.join(dataset_path, 'test')
except Exception as e:
    print(f"数据集下载或路径设置出错: {e}")
    exit(1)

# 定义图像大小和批量大小
img_size = (224, 224)
batch_size = 32  # 适当减小批量大小以减少内存消耗

# 数据增强和归一化
train_datagen = ImageDataGenerator(
    rescale=1. / 255,
    rotation_range=20,  # 减少数据增强的复杂度
    width_shift_range=0.2,
    height_shift_range=0.2,
    shear_range=0.2,
    zoom_range=0.2,
    horizontal_flip=True,
    fill_mode='nearest'
)

val_datagen = ImageDataGenerator(rescale=1. / 255)

# 生成训练集和验证集
train_generator = train_datagen.flow_from_directory(
    train_dir,
    target_size=img_size,
    batch_size=batch_size,
    class_mode='categorical',
    shuffle=True,
    seed=seed  # 固定数据打乱的随机种子
)

val_generator = val_datagen.flow_from_directory(
    test_dir,
    target_size=img_size,
    batch_size=batch_size,
    class_mode='categorical',
    shuffle=False
)


# 构建半监督训练集
def build_semi_supervised_generator(train_generator, labeled_ratio=0.1):
    num_samples = train_generator.samples
    num_labeled = int(num_samples * labeled_ratio)
    indices = np.arange(num_samples)
    np.random.shuffle(indices)
    labeled_indices = set(indices[:num_labeled])  # 使用集合加快查找速度

    def labeled_generator():
        while True:
            for i in range(train_generator.samples):
                if i in labeled_indices:
                    img, label = train_generator[i // batch_size][0][i % batch_size], \
                    train_generator[i // batch_size][1][i % batch_size]
                    yield img, label

    def unlabeled_generator():
        while True:
            for i in range(train_generator.samples):
                if i not in labeled_indices:
                    img = train_generator[i // batch_size][0][i % batch_size]
                    yield img

    return labeled_generator(), unlabeled_generator()


# 加载预训练模型（使用本地权重文件）
base_model = EfficientNetB0(weights='/mnt/workspace/efficientnetb0_notop.h5', include_top=False,
                            input_shape=(224, 224, 3))

# 解冻部分预训练层
for layer in base_model.layers[-20:]:  # 解冻最后 20 层
    layer.trainable = True

# 添加自定义分类层
x = base_model.output
x = layers.GlobalAveragePooling2D()(x)
x = layers.Dense(256, activation='relu', kernel_regularizer=regularizers.l2(0.001))(x)
x = layers.Dropout(0.3)(x)
x = layers.Dense(128, activation='relu')(x)
x = layers.Dropout(0.3)(x)
predictions = layers.Dense(len(train_generator.class_indices), activation='softmax')(x)

# 构建最终模型
model = models.Model(inputs=base_model.input, outputs=predictions)

# 编译模型
optimizer = Adam(learning_rate=0.0001)
model.compile(optimizer=optimizer, loss='categorical_crossentropy', metrics=['accuracy'])

# 计算类别权重
class_weights = compute_class_weight(
    class_weight='balanced',
    classes=np.unique(train_generator.classes),
    y=train_generator.classes
)
class_weights = dict(enumerate(class_weights))


# 一致性正则化损失
def consistency_loss(y_true, y_pred):
    return tf.reduce_mean(tf.square(y_true - y_pred))


# 半监督训练
def semi_supervised_train(model, labeled_gen, unlabeled_gen, val_generator, batch_size=32, epochs=30):
    for epoch in range(epochs):
        print(f"Epoch {epoch + 1}/{epochs}")

        # 训练标注数据
        labeled_loss, labeled_accuracy = 0, 0
        num_labeled_batches = train_generator.samples // batch_size
        for _ in range(num_labeled_batches):
            img, label = next(labeled_gen)
            history_labeled = model.train_on_batch(img[np.newaxis, ...], label[np.newaxis, ...])
            labeled_loss += history_labeled[0]
            labeled_accuracy += history_labeled[1]

        labeled_loss /= num_labeled_batches
        labeled_accuracy /= num_labeled_batches
        print(f"Labeled Data - Loss: {labeled_loss:.4f}, Accuracy: {labeled_accuracy:.4f}")

        # 对未标注数据进行一致性正则化
        consistency_loss_value = 0
        num_unlabeled_batches = train_generator.samples // batch_size
        for _ in range(num_unlabeled_batches):
            img = next(unlabeled_gen)
            augmented_img = train_datagen.random_transform(img)  # 数据增强
            augmented_img = train_datagen.standardize(augmented_img)

            # 获取原始预测和增强后的预测
            predictions = model.predict(img[np.newaxis, ...], verbose=0)
            augmented_predictions = model.predict(augmented_img[np.newaxis, ...], verbose=0)

            # 计算一致性损失
            loss = consistency_loss(predictions, augmented_predictions)
            consistency_loss_value += loss

            # 使用一致性正则化更新模型
            model.train_on_batch(img[np.newaxis, ...], predictions)

        consistency_loss_value /= num_unlabeled_batches
        print(f"Unlabeled Data - Consistency Loss: {consistency_loss_value:.4f}")

        # 验证集评估
        val_loss, val_accuracy = model.evaluate(val_generator, verbose=0)
        print(f"Validation Data - Loss: {val_loss:.4f}, Accuracy: {val_accuracy:.4f}")
        print("-" * 50)

        # 清理内存
        tf.keras.backend.clear_session()


# 构建半监督生成器
labeled_gen, unlabeled_gen = build_semi_supervised_generator(train_generator, labeled_ratio=0.1)

# 训练模型
semi_supervised_train(
    model, labeled_gen, unlabeled_gen, val_generator,
    batch_size=batch_size, epochs=30
)

# 模型评估
y_pred = model.predict(val_generator)
y_pred_classes = np.argmax(y_pred, axis=1)
y_true = val_generator.classes

accuracy = accuracy_score(y_true, y_pred_classes)
f1 = f1_score(y_true, y_pred_classes, average='weighted')

print(f"平均准确率: {accuracy}")
print(f"F1分数: {f1}")