import os
import random
import numpy as np
import tensorflow as tf
from tensorflow.keras import layers, models, regularizers
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.applications.efficientnet import EfficientNetB0
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.callbacks import EarlyStopping, ReduceLROnPlateau
from sklearn.metrics import accuracy_score, f1_score
from sklearn.utils.class_weight import compute_class_weight
import kagglehub

# 固定随机种子
seed = 42
random.seed(seed)
np.random.seed(seed)
tf.random.set_seed(seed)

# 下载数据集
try:
    path = kagglehub.dataset_download("shuvoalok/raf-db-dataset")
    print("Path to dataset files:", path)
    dataset_path = os.path.join(path, 'DATASET')
    train_dir = os.path.join(dataset_path, 'train')
    test_dir = os.path.join(dataset_path, 'test')
except Exception as e:
    print(f"数据集下载或路径设置出错: {e}")
    exit(1)

# 定义图像大小和批量大小
img_size = (224, 224)
batch_size = 64  # 增加批量大小

# 数据增强和归一化
train_datagen = ImageDataGenerator(
    rescale=1. / 255,
    rotation_range=40,
    width_shift_range=0.3,
    height_shift_range=0.3,
    shear_range=0.3,
    zoom_range=0.3,
    horizontal_flip=True,
    fill_mode='nearest'
)

val_datagen = ImageDataGenerator(rescale=1. / 255)

# 生成训练集和验证集
train_generator = train_datagen.flow_from_directory(
    train_dir,
    target_size=img_size,
    batch_size=batch_size,
    class_mode='categorical',
    shuffle=True,
    seed=seed  # 固定数据打乱的随机种子
)

val_generator = val_datagen.flow_from_directory(
    test_dir,
    target_size=img_size,
    batch_size=batch_size,
    class_mode='categorical',
    shuffle=False
)


# 随机抽取小部分数据作为标注数据，剩下的作为未标注数据
def split_labeled_unlabeled(generator, labeled_ratio=0.1):
    num_samples = generator.samples
    num_labeled = int(num_samples * labeled_ratio)
    indices = np.arange(num_samples)
    np.random.shuffle(indices)
    labeled_indices = indices[:num_labeled]
    unlabeled_indices = indices[num_labeled:]

    return labeled_indices, unlabeled_indices


labeled_indices, unlabeled_indices = split_labeled_unlabeled(train_generator, labeled_ratio=0.1)


# 创建新的生成器，只包含标注数据
def create_labeled_dataset(generator, labeled_indices, batch_size):
    def generator_fn():
        for i in labeled_indices:
            batch_idx = i // batch_size  # 计算批次索引
            sample_idx = i % batch_size  # 计算批次内的样本索引
            x, y = generator[batch_idx]  # 获取批次数据
            yield x[sample_idx], y[sample_idx]  # 返回单个样本

    dataset = tf.data.Dataset.from_generator(
        generator_fn,
        output_signature=(
            tf.TensorSpec(shape=(224, 224, 3), dtype=tf.float32),
            tf.TensorSpec(shape=(7,), dtype=tf.float32)  # 7 是类别数
        )
    )
    dataset = dataset.batch(batch_size).prefetch(tf.data.AUTOTUNE)
    return dataset


labeled_dataset = create_labeled_dataset(train_generator, labeled_indices, batch_size)

# 加载预训练模型
base_model = EfficientNetB0(weights='/mnt/workspace/efficientnetb0_notop.h5', include_top=False,
                            input_shape=(224, 224, 3))

# 解冻部分预训练层
for layer in base_model.layers[-20:]:  # 解冻最后 20 层
    layer.trainable = True

# 添加自定义分类层
x = base_model.output
x = layers.GlobalAveragePooling2D()(x)
x = layers.Dense(256, activation='relu', kernel_regularizer=regularizers.l2(0.001))(x)
x = layers.Dropout(0.3)(x)  # 调整 Dropout 比例为 0.3
x = layers.Dense(128, activation='relu')(x)
x = layers.Dropout(0.3)(x)  # 调整 Dropout 比例为 0.3
predictions = layers.Dense(len(train_generator.class_indices), activation='softmax')(x)

# 构建最终模型
model = models.Model(inputs=base_model.input, outputs=predictions)

# 编译模型
optimizer = Adam(learning_rate=0.0001)  # 初始学习率
model.compile(optimizer=optimizer, loss='categorical_crossentropy', metrics=['accuracy'])

# 计算类别权重
class_weights = compute_class_weight(
    class_weight='balanced',
    classes=np.unique(train_generator.classes),
    y=train_generator.classes
)
class_weights = dict(enumerate(class_weights))


# 自训练方法
def self_training(model, labeled_dataset, labeled_indices, unlabeled_indices, train_generator, val_generator, epochs=30,
                  confidence_threshold=0.9):
    for epoch in range(epochs):
        print(f"Epoch {epoch + 1}/{epochs}")

        # 训练模型
        history = model.fit(
            labeled_dataset,
            steps_per_epoch=len(labeled_indices) // batch_size,
            validation_data=val_generator,
            validation_steps=val_generator.samples // batch_size,
            epochs=1,
            callbacks=[early_stopping, reduce_lr],
            class_weight=class_weights
        )

        # 对未标注数据进行预测
        steps = len(unlabeled_indices) // batch_size
        unlabeled_predictions = model.predict(train_generator, steps=steps)
        unlabeled_pred_classes = np.argmax(unlabeled_predictions, axis=1)
        unlabeled_confidences = np.max(unlabeled_predictions, axis=1)

        # 确保 unlabeled_confidences 和 unlabeled_indices 的长度一致
        assert len(unlabeled_confidences) == len(
            unlabeled_indices[:steps * batch_size]), "Length mismatch between predictions and unlabeled indices"

        # 选择置信度高的样本加入标注数据
        high_confidence_indices = unlabeled_indices[:steps * batch_size][unlabeled_confidences > confidence_threshold]
        labeled_indices = np.concatenate([labeled_indices, high_confidence_indices])
        unlabeled_indices = np.setdiff1d(unlabeled_indices, high_confidence_indices)

        # 更新标注数据生成器
        labeled_dataset = create_labeled_dataset(train_generator, labeled_indices, batch_size)

        print(f"Added {len(high_confidence_indices)} new labeled samples.")

    return labeled_indices, unlabeled_indices


# 训练模型
early_stopping = EarlyStopping(monitor='val_loss', patience=5, restore_best_weights=True)
reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.5, patience=3, min_lr=1e-6)
labeled_indices, unlabeled_indices = self_training(model, labeled_dataset, labeled_indices, unlabeled_indices,
                                                   train_generator, val_generator, epochs=30)

# 模型评估
y_pred = model.predict(val_generator)
y_pred_classes = np.argmax(y_pred, axis=1)
y_true = val_generator.classes

accuracy = accuracy_score(y_true, y_pred_classes)
f1 = f1_score(y_true, y_pred_classes, average='weighted')

print(f"平均准确率: {accuracy}")
print(f"F1分数: {f1}")