import os
import random
import numpy as np
import tensorflow as tf
from tensorflow.keras import layers, models, regularizers
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.applications.efficientnet import EfficientNetB0
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.callbacks import EarlyStopping, ReduceLROnPlateau
from sklearn.metrics import accuracy_score, f1_score
from sklearn.utils.class_weight import compute_class_weight
import kagglehub

# 固定随机种子
seed = 42
random.seed(seed)
np.random.seed(seed)
tf.random.set_seed(seed)

# 下载数据集
try:
    path = kagglehub.dataset_download("shuvoalok/raf-db-dataset")
    print("Path to dataset files:", path)
    dataset_path = os.path.join(path, 'DATASET')
    train_dir = os.path.join(dataset_path, 'train')
    test_dir = os.path.join(dataset_path, 'test')
except Exception as e:
    print(f"数据集下载或路径设置出错: {e}")
    exit(1)

# 定义图像大小和批量大小
img_size = (224, 224)
batch_size = 64  # 增加批量大小

# 数据增强和归一化
train_datagen = ImageDataGenerator(
    rescale=1./255,
    rotation_range=40,
    width_shift_range=0.3,
    height_shift_range=0.3,
    shear_range=0.3,
    zoom_range=0.3,
    horizontal_flip=True,
    fill_mode='nearest'
)

val_datagen = ImageDataGenerator(rescale=1./255)

# 生成训练集和验证集
train_generator = train_datagen.flow_from_directory(
    train_dir,
    target_size=img_size,
    batch_size=batch_size,
    class_mode='categorical',
    shuffle=True,
    seed=seed  # 固定数据打乱的随机种子
)

val_generator = val_datagen.flow_from_directory(
    test_dir,
    target_size=img_size,
    batch_size=batch_size,
    class_mode='categorical',
    shuffle=False
)

# 随机抽取大部分数据作为未标注数据
def split_labeled_unlabeled_data(generator, labeled_ratio=0.1):
    num_samples = generator.samples
    num_labeled = int(num_samples * labeled_ratio)
    indices = np.arange(num_samples)
    np.random.shuffle(indices)
    labeled_indices = indices[:num_labeled]
    unlabeled_indices = indices[num_labeled:]
    return labeled_indices, unlabeled_indices

labeled_indices, unlabeled_indices = split_labeled_unlabeled_data(train_generator)

# 构建变分自编码器 (VAE) 用于生成伪标签
class VAE(tf.keras.Model):
    def __init__(self, latent_dim):
        super(VAE, self).__init__()
        self.latent_dim = latent_dim
        self.encoder = tf.keras.Sequential([
            layers.InputLayer(input_shape=(224, 224, 3)),
            layers.Conv2D(32, 3, strides=2, activation='relu'),
            layers.Conv2D(64, 3, strides=2, activation='relu'),
            layers.Conv2D(128, 3, strides=2, activation='relu'),
            layers.Flatten(),
            layers.Dense(latent_dim + latent_dim),
        ])
        self.decoder = tf.keras.Sequential([
            layers.InputLayer(input_shape=(latent_dim,)),
            layers.Dense(7*7*128, activation='relu'),
            layers.Reshape((7, 7, 128)),
            layers.Conv2DTranspose(128, 3, strides=2, padding='same', activation='relu'),
            layers.Conv2DTranspose(64, 3, strides=2, padding='same', activation='relu'),
            layers.Conv2DTranspose(32, 3, strides=2, padding='same', activation='relu'),
            layers.Conv2DTranspose(3, 3, strides=1, padding='same'),
        ])

    def encode(self, x):
        mean, logvar = tf.split(self.encoder(x), num_or_size_splits=2, axis=1)
        return mean, logvar

    def reparameterize(self, mean, logvar):
        eps = tf.random.normal(shape=mean.shape)
        return eps * tf.exp(logvar * .5) + mean

    def decode(self, z):
        return self.decoder(z)

    def call(self, x):
        mean, logvar = self.encode(x)
        z = self.reparameterize(mean, logvar)
        return self.decode(z), mean, logvar

# 定义 VAE 损失函数
def vae_loss(x, x_recon, mean, logvar):
    recon_loss = tf.reduce_mean(tf.square(x - x_recon))
    kl_loss = -0.5 * tf.reduce_mean(1 + logvar - tf.square(mean) - tf.exp(logvar))
    return recon_loss + kl_loss

# 训练 VAE
latent_dim = 128
vae = VAE(latent_dim)
vae_optimizer = tf.keras.optimizers.Adam(learning_rate=1e-4)

@tf.function
def train_step_vae(x):
    with tf.GradientTape() as tape:
        x_recon, mean, logvar = vae(x)
        loss = vae_loss(x, x_recon, mean, logvar)
    gradients = tape.gradient(loss, vae.trainable_variables)
    vae_optimizer.apply_gradients(zip(gradients, vae.trainable_variables))
    return loss

# 使用 VAE 生成伪标签
def generate_pseudo_labels(unlabeled_data):
    pseudo_labels = []
    for x in unlabeled_data:
        x_recon, _, _ = vae(x)
        pseudo_labels.append(np.argmax(x_recon, axis=1))
    return np.array(pseudo_labels)

# 加载预训练模型
base_model = EfficientNetB0(weights='/mnt/workspace/efficientnetb0_notop.h5', include_top=False, input_shape=(224, 224, 3))

# 解冻部分预训练层
for layer in base_model.layers[-20:]:  # 解冻最后 20 层
    layer.trainable = True

# 添加自定义分类层
x = base_model.output
x = layers.GlobalAveragePooling2D()(x)
x = layers.Dense(256, activation='relu', kernel_regularizer=regularizers.l2(0.001))(x)
x = layers.Dropout(0.3)(x)  # 调整 Dropout 比例为 0.3
x = layers.Dense(128, activation='relu')(x)
x = layers.Dropout(0.3)(x)  # 调整 Dropout 比例为 0.3
predictions = layers.Dense(len(train_generator.class_indices), activation='softmax')(x)

# 构建最终模型
model = models.Model(inputs=base_model.input, outputs=predictions)

# 编译模型
optimizer = Adam(learning_rate=0.0001)  # 初始学习率
model.compile(optimizer=optimizer, loss='categorical_crossentropy', metrics=['accuracy'])

# 计算类别权重
class_weights = compute_class_weight(
    class_weight='balanced',
    classes=np.unique(train_generator.classes),
    y=train_generator.classes
)
class_weights = dict(enumerate(class_weights))

# 训练模型
early_stopping = EarlyStopping(monitor='val_loss', patience=5, restore_best_weights=True)
reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.5, patience=3, min_lr=1e-6)
history = model.fit(
    train_generator,
    steps_per_epoch=train_generator.samples // batch_size,
    validation_data=val_generator,
    validation_steps=val_generator.samples // batch_size,
    epochs=30,
    callbacks=[early_stopping, reduce_lr],
    class_weight=class_weights
)

# 模型评估
y_pred = model.predict(val_generator)
y_pred_classes = np.argmax(y_pred, axis=1)
y_true = val_generator.classes

accuracy = accuracy_score(y_true, y_pred_classes)
f1 = f1_score(y_true, y_pred_classes, average='weighted')

print(f"平均准确率: {accuracy}")
print(f"F1分数: {f1}")